repo
stringclasses
358 values
pull_number
int64
6
67.9k
instance_id
stringlengths
12
49
issue_numbers
sequencelengths
1
7
base_commit
stringlengths
40
40
patch
stringlengths
87
101M
test_patch
stringlengths
72
22.3M
problem_statement
stringlengths
3
256k
hints_text
stringlengths
0
545k
created_at
stringlengths
20
20
PASS_TO_PASS
sequencelengths
0
0
FAIL_TO_PASS
sequencelengths
0
0
chaoss/augur
46
chaoss__augur-46
[ "41" ]
c417d3a13aa33099f6ca6af2db18bf18217b144b
diff --git a/ghdata/ghtorrent.py b/ghdata/ghtorrent.py --- a/ghdata/ghtorrent.py +++ b/ghdata/ghtorrent.py @@ -1,8 +1,6 @@ #SPDX-License-Identifier: MIT import pandas as pd import sqlalchemy as s -import sys -import json import re class GHTorrent(object): @@ -16,6 +14,10 @@ def __init__(self, dbstr): """ self.DB_STR = dbstr self.db = s.create_engine(dbstr) + try: + self.userid('howderek') + except Exception as e: + print("Could not connect to database.\nError: " + str(e)) def __single_table_count_by_date(self, table, repo_col='project_id'): """ @@ -327,3 +329,38 @@ def pull_acceptance_rate(self, repoid): """) return pd.read_sql(pullAcceptanceSQL, self.db, params={"repoid": str(repoid)}) + + def classify_contributors(self, repoid): + """ + Classify everyone who has interacted with a repo into + - user + - tester + - rejected_contributor + - contributor + - major_contributor + - maintainer + + :param repoid: The id of the project in the projects table. + :return: DataFrame with the login and role of contributors + """ + contributors = self.contributors(repoid) + sums = contributors.sum() + + def classify(row): + role = 'user' + ratio = row / sums + if (ratio['issue_comments'] > 0.05): + role = 'tester' + if (row['pull_requests'] >= 1 and row['commits'] == 0): + role = 'rejected_contributor' + if (row['pull_requests'] >= 1 and row['commits'] >= 1): + role = 'contributor' + if (ratio['pull_requests'] > 0.10 or ratio['commits'] > 0.01): + role = 'major_contributor' + if (ratio['commits'] > 0.02 or ratio['pull_request_comments'] > 0.15): + role = 'maintainer' + + return pd.Series({'login': row['login'], 'role': role}) + + roles = contributors.apply(classify, axis=1) + return roles diff --git a/ghdata/githubapi.py b/ghdata/githubapi.py --- a/ghdata/githubapi.py +++ b/ghdata/githubapi.py @@ -1,4 +1,8 @@ + +import datetime +from dateutil.parser import parse import pandas as pd +import github class GitHubAPI(object): @@ -6,37 +10,142 @@ class GitHubAPI(object): GitHubAPI is a class for getting metrics from the GitHub API """ def __init__(self, api_key): - """ - Creates a new GitHub instance + """ + Creates a new GitHub instance - :param api_key: GitHub API key - """ - import github - self.GITUB_API_KEY = api_key - self.__api = github.Github(api_key) + :param api_key: GitHub API key + """ + self.GITUB_API_KEY = api_key + self.__api = github.Github(api_key) - def contributions_by_file(self, owner, repo, start=None, end=None): + def contributions_by_file(self, owner, repo, filename=None, start=None, end=None, ascending=False): """ Gets number of addtions and deletions in each file by user - Puts it in dataframe with columns: - file user num of additions num of deletion total changes - Currently ignores changes from local users unattributed to Github users - WORK IN PROGRESS - + :param owner: repo owner username + :param repo: repo name + :param filename: optional; file or directory for function to run on + :param start: optional; start time for analysis + :param end: optional; end time for analysis + :param ascending: Default False; returns dataframe in ascending order """ + if start != None: + start = parse(start) + else: + start = github.GithubObject.NotSet + + if end != None: + end = parse(end) + else: + end = github.GithubObject.NotSet + + commits = self.__api.get_repo((owner + "/" + repo)).get_commits(since=start, until=end) + + if filename != None: + self.__api.get_repo((owner + "/" + repo)).get_contents(filename) + df = [] - for commit in self.__api.get_repo((owner + "/" + repo)).get_commits(since=start,until=end): + + for commit in commits: for file in commit.files: + if filename != None: + try: + if file.changes != 0 and file.filename == filename: + df.append({'user': commit.author.login, 'file': file.filename, 'number of additions': file.additions, 'number of deletions': file.deletions, 'total': file.changes}) + except AttributeError: + pass + else: + try: + if file.changes != 0: + df.append({'user': commit.author.login, 'file': file.filename, 'number of additions': file.additions, 'number of deletions': file.deletions, 'total': file.changes}) + except AttributeError: + pass + + df = pd.DataFrame(df) + + df = df.groupby(["file", "user"]).sum() + + df = df.sort_values(ascending=ascending) + + return df + + def contributions_by_percentage(self, owner, repo, filename=None, start=None, end=None, ascending=False): + """ + Calculates percentage of commits in repo by user + + Puts it in dataframe with columns: + user percentage of commits + + Currently ignores changes from local users unattributed to Github user + + :param owner: repo owner username + :param repo: repo name + :param filename: optional; file or directory for function to run on + :param start: optional; start time for analysis + :param end: optional; end time for analysis + :param ascending: Default False; returns dataframe in ascending order + """ + if start != None: + start = parse(start) + else: + start = github.GithubObject.NotSet + + if end != None: + end = parse(end) + else: + end = github.GithubObject.NotSet + + commits = self.__api.get_repo((owner + "/" + repo)).get_commits(since=start, until=end) + + if filename != None: + self.__api.get_repo((owner + "/" + repo)).get_contents(filename) + + df = [] + + if filename != None: + for commit in commits: + for file in commit.files: + if file.filename == filename: + try: + df.append({'user': commit.author.login}) + except AttributeError: + pass + break + else: + for commit in commits: try: - df.append({'user': commit.author.login, 'file': file.filename, 'additions': file.additions, 'deletions': file.deletions, 'total': file.changes}) + df.append({'user': commit.author.login}) except AttributeError: pass df = pd.DataFrame(df) - df.groupby(["file" ,"user"]).sum() + df = df.groupby(['user']).user.count() / df.groupby(['user']).user.count().sum() * 100 + + df = df.sort_values(ascending=ascending) return df + + def bus_factor(self, owner, repo, filename=None, start=None, end=None, threshold=50, best=False): + """ + Calculates bus factor by adding up percentages from highest to lowest until they exceed threshold + + :param owner: repo owner username + :param repo: repo name + :param filename: optional; file or directory for function to run on + :param start: optional; start time for analysis + :param end: optional; end time for analysis + :param threshold: Default 50; + :param best: Default False; If true, sums from lowest to highestn + """ + + df = self.contributions_by_percentage(owner, repo, filename, start, end, best) + + i = 0 + for num in df.cumsum(): + i = i + 1 + if num >= threshold: + bus_factor = pd.Series(i, index=["Bus Factor"]) + return bus_factor diff --git a/ghdata/publicwww.py b/ghdata/publicwww.py --- a/ghdata/publicwww.py +++ b/ghdata/publicwww.py @@ -1,10 +1,13 @@ -import pandas as pd +""" +PublicWWW is a class for making API requests to https://publicwww.com/ a +search engine for the source of websites +""" import sys -if (sys.version_info > (3, 0)): +import pandas as pd +if sys.version_info > (3, 0): import urllib.parse as url else: import urllib as url -import requests class PublicWWW(object): @@ -13,13 +16,13 @@ class PublicWWW(object): search engine for the source of websites """ - def __init__(self, public_www_api_key): + def __init__(self, api_key): """ Initalizes a PublicWWW instance - :param public_www_api_key: The API key for PublicWWW. This is required to get the full names of more results + :param api_key: The API key for PublicWWW. This is required to get the full names of more results """ - self.PUBLIC_WWW_API_KEY = public_www_api_key + self.__api_key = api_key def linking_websites(self, owner, repo): """ @@ -32,8 +35,9 @@ def linking_websites(self, owner, repo): """ # Find websites that link to that repo - repo_url="https://github.com/{owner}/{repo}".format(owner=owner, repo=repo) + repo_url = "https://github.com/{owner}/{repo}".format(owner=owner, repo=repo) query = '<a+href%3D"{repourl}"'.format(repourl=url.quote_plus(repo_url)) - r = 'https://publicwww.com/websites/{query}/?export=csv&apikey={apikey}'.format(query=query, apikey=self.PUBLIC_WWW_API_KEY) - result = pd.read_csv(r, delimiter=';', header=None, names=['url', 'rank']) - return result \ No newline at end of file + req = 'https://publicwww.com/websites/{query}/?export=csv&apikey={apikey}' + req.format(query=query, apikey=self.__api_key) + result = pd.read_csv(req, delimiter=';', header=None, names=['url', 'rank']) + return result diff --git a/ghdata/server.py b/ghdata/server.py --- a/ghdata/server.py +++ b/ghdata/server.py @@ -1,33 +1,30 @@ #SPDX-License-Identifier: MIT +import ghdata -from flask import Flask, request, Response, json, send_from_directory -from flask_cors import CORS, cross_origin import os import sys -import datetime if (sys.version_info > (3, 0)): import configparser as configparser else: import ConfigParser as configparser -from dateutil import parser, tz -import ghdata +from flask import Flask, request, Response, send_from_directory +from flask_cors import CORS GHDATA_API_VERSION = 'unstable' - def serialize(func, **args): """ Serailizes a function that returns a dataframe """ data = func(**args) - if (hasattr(data, 'to_json')): + if hasattr(data, 'to_json'): return data.to_json(orient='records', date_format='iso', date_unit='ms') else: return data -def flaskify_ghtorrent(flaskapp, func): +def flaskify_ghtorrent(ghtorrent, func): """ Simplifies API endpoints that just accept owner and repo, serializes them and spits them out @@ -35,404 +32,446 @@ def flaskify_ghtorrent(flaskapp, func): def generated_function(owner, repo): repoid = ghtorrent.repoid(owner=owner, repo=repo) return Response(response=serialize(func, repoid=repoid), - status=200, - mimetype="application/json") + status=200, + mimetype="application/json") generated_function.__name__ = func.__name__ return generated_function -def flaskify(flaskapp, func): +def flaskify(func): """ Simplifies API endpoints that just accept owner and repo, serializes them and spits them out """ def generated_function(owner, repo): return Response(response=serialize(func, owner=owner, repo=repo), - status=200, - mimetype="application/json") + status=200, + mimetype="application/json") generated_function.__name__ = func.__name__ return generated_function +def read_config(parser, section, name, environment_variable, default): + try: + value = os.getenv(environment_variable, parser.get(section, name)) + return value + except: + if not parser.has_section(section): + parser.add_section(section) + parser.set(section, name, default) + with open('ghdata.cfg', 'w') as configfile: + parser.write(configfile) + return default + -app = Flask(__name__, static_url_path=os.path.abspath('static/')) -CORS(app) -# Flags and Initialization +def run(): -"""Reads the config file""" -try: + app = Flask(__name__) + CORS(app) # Try to open the config file and parse it parser = configparser.RawConfigParser() parser.read('ghdata.cfg') - host = parser.get('Server', 'host') - port = parser.get('Server', 'port') + try: - dbstr = 'mysql+pymysql://{}:{}@{}:{}/{}'.format(parser.get('Database', 'user'), parser.get('Database', 'pass'), parser.get('Database', 'host'), parser.get('Database', 'port'), parser.get('Database', 'name')) + dbstr = 'mysql+pymysql://{}:{}@{}:{}/{}'.format( + read_config(parser, 'Database', 'user', 'GHDATA_DB_USER', 'root'), + read_config(parser, 'Database', 'pass', 'GHDATA_DB_PASS', 'password'), + read_config(parser, 'Database', 'host', 'GHDATA_DB_HOST', '127.0.0.1'), + read_config(parser, 'Database', 'port', 'GHDATA_DB_PORT', '3306'), + read_config(parser, 'Database', 'name', 'GHDATA_DB_NAME', 'msr14') + ) + print("Connecting with " + dbstr) ghtorrent = ghdata.GHTorrent(dbstr=dbstr) except Exception as e: print("Failed to connect to database (" + str(e) + ")"); - publicwww = ghdata.PublicWWW(public_www_api_key=parser.get('PublicWWW', 'APIKey')) - if (parser.get('Development', 'developer') == '1' or os.getenv('FLASK_DEBUG') == '1'): - DEBUG = True - else: - DEBUG = False - -except Exception as e: - # Uh-oh. Save a new config file. - print('Failed to open config file.') - print('Error: ' + str(e)) - config = configparser.RawConfigParser() - config.add_section('Server') - config.set('Server', 'host', '0.0.0.0') - config.set('Server', 'port', '5000') - config.add_section('Database') - config.set('Database', 'host', '127.0.0.1') - config.set('Database', 'port', '3306') - config.set('Database', 'user', 'root') - config.set('Database', 'pass', 'root') - config.set('Database', 'name', 'ghtorrent') - config.add_section('PublicWWW') - config.set('PublicWWW', 'APIKey', '0') - config.add_section('Development') - config.set('Development', 'developer', '0') - # Writing our configuration file to 'example.cfg' - with open('ghdata.cfg', 'w') as configfile: - config.write(configfile) - print('Default config saved to ghdata.cfg') - sys.exit() - - - -""" -@api {get} / API Status -@apiName Status -@apiGroup Misc -""" [email protected]('/{}/'.format(GHDATA_API_VERSION)) -def api_root(): - """API status""" - # @todo: When we support multiple data sources this should keep track of their status - return """{"status": "healthy", "ghtorrent": "online"}""" - -####################### -# Timeseries # -####################### - -# @todo: Link to LF Metrics - -""" -@api {get} /:owner/:repo/commits Commits by Week -@apiName CommitsByWeek -@apiGroup Timeseries - -@apiParam {String} owner Username of the owner of the GitHub repository -@apiParam {String} repo Name of the GitHub repository - -@apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "commits": 153 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "commits": 192 - } - ] -""" -app.route('/{}/<owner>/<repo>/timeseries/commits'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.commits)) - -""" -@api {get} /:owner/:repo/forks Forks by Week -@apiName ForksByWeek -@apiGroup Timeseries - -@apiParam {String} owner Username of the owner of the GitHub repository -@apiParam {String} repo Name of the GitHub repository - -@apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "forks": 13 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "forks": 12 - } - ] -""" -app.route('/{}/<owner>/<repo>/timeseries/forks'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.forks)) - -""" -@api {get} /:owner/:repo/issues Issues by Week -@apiName IssuesByWeek -@apiGroup Timeseries - -@apiParam {String} owner Username of the owner of the GitHub repository -@apiParam {String} repo Name of the GitHub repository - -@apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "issues":13 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "issues":15 - } - ] -""" -app.route('/{}/<owner>/<repo>/timeseries/issues'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.issues)) - -""" -@api {get} /:owner/:repo/issues/response_time Issue Response Time -@apiName IssueResponseTime -@apiGroup Timeseries - -@apiParam {String} owner Username of the owner of the GitHub repository -@apiParam {String} repo Name of the GitHub repository - -@apiSuccessExample {json} Success-Response: - [ - { - "created_at": "2013-09-16T17:00:54.000Z", - "responded_at": "2013-09-16T17:20:58.000Z" - }, - { - "created_at": "2013-09-16T09:31:34.000Z", - "responded_at": "2013-09-16T09:43:03.000Z" - } - ] -""" -app.route('/{}/<owner>/<repo>/timeseries/issues/response_time'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.issue_response_time)) - -""" -@api {get} /:owner/:repo/pulls Pull Requests by Week -@apiName PullRequestsByWeek -@apiGroup Timeseries - -@apiParam {String} owner Username of the owner of the GitHub repository -@apiParam {String} repo Name of the GitHub repository - -@apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "pull_requests": 1 - "comments": 11 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "pull_requests": 2 - "comments": 31 - } - ] -""" -app.route('/{}/<owner>/<repo>/timeseries/pulls'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.pulls)) - -""" -@api {get} /:owner/:repo/stargazers Stargazers by Week -@apiName StargazersByWeek -@apiGroup Timeseries - -@apiParam {String} owner Username of the owner of the GitHub repository -@apiParam {String} repo Name of the GitHub repository - -@apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "watchers": 133 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "watchers": 54 - } - ] -""" -app.route('/{}/<owner>/<repo>/timeseries/stargazers'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.stargazers)) - -""" -@api {get} /:owner/:repo/pulls/acceptance_rate Pull Request Acceptance Rate by Week -@apiDescription For each week, the rate is calculated as (pull requests merged that week) / (pull requests opened that week) -@apiName Stargazers -@apiGroup Timeseries - -@apiParam {String} owner Username of the owner of the GitHub repository -@apiParam {String} repo Name of the GitHub repository - -@apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "rate": 0.5 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "rate": 0.33 - } - ] -""" -app.route('/{}/<owner>/<repo>/pulls/acceptance_rate'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.pull_acceptance_rate)) - -# Contribution Trends -""" -@api {get} /:owner/:repo/contributors Total Contributions by User -@apiName TotalContributions -@apiGroup Users - -@apiParam {String} owner Username of the owner of the GitHub repository -@apiParam {String} repo Name of the GitHub repository - -@apiSuccessExample {json} Success-Response: - [ - { - "login": "foo", - "location": "Springfield", - "commits": 1337.0, - "pull_requests": 60.0, - "issues": null, - "commit_comments": 158.0, - "pull_request_comments": 718.0, - "issue_comments": 1668.0 - }, - { - "login": "bar", - "location": null, - "commits": 3968.0, - "pull_requests": null, - "issues": 12.0, - "commit_comments": 158.0, - "pull_request_comments": 718.0, - "issue_comments": 1568.0 - } - ] -""" -app.route('/{}/<owner>/<repo>/contributors'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.contributors)) - -####################### -# Contribution Trends # -####################### - -""" -@api {get} /:owner/:repo/contributions Contributions by Week -@apiName ContributionsByWeek -@apiGroup Timeseries - -@apiParam {String} owner Username of the owner of the GitHub repository -@apiParam {String} repo Name of the GitHub repository -@apiParam (String) user Limit results to the given user's contributions - -@apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "commits": 37.0, - "pull_requests": null, - "issues": null, - "commit_comments": 7.0, - "pull_request_comments": 8.0, - "issue_comments": 17.0 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "commits": 68.0, - "pull_requests": null, - "issues": 12.0, - "commit_comments": 18.0, - "pull_request_comments": 13.0, - "issue_comments": 28.0 - } - ] -""" [email protected]('/{}/<owner>/<repo>/contributions'.format(GHDATA_API_VERSION)) -def contributions(owner, repo): - repoid = ghtorrent.repoid(owner=owner, repo=repo) - user = request.args.get('user') - if (user): - userid = ghtorrent.userid(username=user) - contribs = ghtorrent.contributions(repoid=repoid, userid=userid) + + host = read_config(parser, 'Server', 'host', 'GHDATA_HOST', '0.0.0.0') + port = read_config(parser, 'Server', 'port', 'GHDATA_PORT', '5000') + + publicwww = ghdata.PublicWWW(api_key=read_config(parser, 'PublicWWW', 'APIKey', 'GHDATA_PUBLIC_WWW_API_KEY', 'None')) + github = ghdata.GitHubAPI(api_key=read_config(parser, 'GitHub', 'APIKey', 'GHDATA_GITHUB_API_KEY', 'None')) + + if (read_config(parser, 'Development', 'developer', 'GHDATA_DEBUG', '0') == '1'): + debugmode = True else: - contribs = ghtorrent.contributions(repoid=repoid) - return Response(response=contribs, - status=200, - mimetype="application/json") - -# Diversity - -""" -@api {get} /:owner/:repo/commits/locations Commits and Location by User -@apiName Stargazers -@apiGroup Diversity - -@apiParam {String} owner Username of the owner of the GitHub repository -@apiParam {String} repo Name of the GitHub repository - -@apiSuccessExample {json} Success-Response: - [ - { - "login": "bonnie", - "location": "Rowena, TX", - "commits": 12 - }, - { - "login":"clyde", - "location":"Ellis County, TX", - "commits": 12 - } - ] -""" -app.route('/{}/<owner>/<repo>/commits/locations'.format(GHDATA_API_VERSION))(flaskify_ghtorrent(app, ghtorrent.committer_locations)) - -# Popularity -""" -@api {get} /:owner/:repo/linking_websites Linking Websites -@apiDescription Returns an array of websites and their rank according to http://publicwww.com/ -@apiName LinkingWebsites -@apiGroup Popularity - -@apiParam {String} owner Username of the owner of the GitHub repository -@apiParam {String} repo Name of the GitHub repository - -@apiSuccessExample {json} Success-Response: - [ - { - "url": "missouri.edu", - "rank": "1" - }, - { - "url": "unomaha.edu", - "rank": "2" - } - ] -""" -app.route('/{}/<owner>/<repo>/linking_websites'.format(GHDATA_API_VERSION))(flaskify(app, publicwww.linking_websites)) - - -if (DEBUG): - print(" * Serving static routes") - # Serve the front-end files in debug mode to make it easier for developers to work on the interface - # @todo: Figure out why this isn't working. - @app.route('/') - def index(): - root_dir = os.path.dirname(os.getcwd()) - print(root_dir + '/ghdata/static') - return send_from_directory(root_dir + '/ghdata/ghdata/static', 'index.html') - - @app.route('/scripts/<path>') - def send_scripts(path): - root_dir = os.path.dirname(os.getcwd()) - return send_from_directory(root_dir + '/ghdata/ghdata/static/scripts', path) - - @app.route('/styles/<path>') - def send_styles(path): - root_dir = os.path.dirname(os.getcwd()) - return send_from_directory(root_dir+ '/ghdata/ghdata/static/styles', path) - - app.debug = True + debugmode = False -def run(): - app.run(host=host, port=int(port), debug=DEBUG) + + + """ + @api {get} / API Status + @apiName Status + @apiGroup Misc + """ + @app.route('/{}/'.format(GHDATA_API_VERSION)) + def api_root(): + """API status""" + # @todo: When we support multiple data sources this should keep track of their status + # @todo: Add GHTorrent test to determine status + ghtorrent_status = "good" + # @todo: Add GitHub API status + # @todo: Add PublicWWW API status + return """{"status": "healthy", "ghtorrent": "{}"}""".format(ghtorrent_status) + + ####################### + # Timeseries # + ####################### + + # @todo: Link to LF Metrics + + """ + @api {get} /:owner/:repo/commits Commits by Week + @apiName CommitsByWeek + @apiGroup Timeseries + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "commits": 153 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "commits": 192 + } + ] + """ + app.route('/{}/<owner>/<repo>/timeseries/commits'.format(GHDATA_API_VERSION))( + flaskify_ghtorrent(ghtorrent, ghtorrent.commits)) + + """ + @api {get} /:owner/:repo/forks Forks by Week + @apiName ForksByWeek + @apiGroup Timeseries + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "forks": 13 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "forks": 12 + } + ] + """ + app.route('/{}/<owner>/<repo>/timeseries/forks'.format(GHDATA_API_VERSION))( + flaskify_ghtorrent(ghtorrent, ghtorrent.forks)) + + """ + @api {get} /:owner/:repo/issues Issues by Week + @apiName IssuesByWeek + @apiGroup Timeseries + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "issues":13 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "issues":15 + } + ] + """ + app.route('/{}/<owner>/<repo>/timeseries/issues'.format(GHDATA_API_VERSION))( + flaskify_ghtorrent(ghtorrent, ghtorrent.issues)) + + """ + @api {get} /:owner/:repo/issues/response_time Issue Response Time + @apiName IssueResponseTime + @apiGroup Timeseries + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "created_at": "2013-09-16T17:00:54.000Z", + "responded_at": "2013-09-16T17:20:58.000Z" + }, + { + "created_at": "2013-09-16T09:31:34.000Z", + "responded_at": "2013-09-16T09:43:03.000Z" + } + ] + """ + app.route('/{}/<owner>/<repo>/timeseries/issues/response_time'.format(GHDATA_API_VERSION))( + flaskify_ghtorrent(ghtorrent, ghtorrent.issue_response_time)) + + """ + @api {get} /:owner/:repo/pulls Pull Requests by Week + @apiName PullRequestsByWeek + @apiGroup Timeseries + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "pull_requests": 1 + "comments": 11 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "pull_requests": 2 + "comments": 31 + } + ] + """ + app.route('/{}/<owner>/<repo>/timeseries/pulls'.format(GHDATA_API_VERSION))( + flaskify_ghtorrent(ghtorrent, ghtorrent.pulls)) + + """ + @api {get} /:owner/:repo/stargazers Stargazers by Week + @apiName StargazersByWeek + @apiGroup Timeseries + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "watchers": 133 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "watchers": 54 + } + ] + """ + app.route('/{}/<owner>/<repo>/timeseries/stargazers'.format(GHDATA_API_VERSION))( + flaskify_ghtorrent(ghtorrent, ghtorrent.stargazers)) + + """ + @api {get} /:owner/:repo/pulls/acceptance_rate Pull Request Acceptance Rate by Week + @apiDescription For each week, the rate is calculated as (pull requests merged that week) / (pull requests opened that week) + @apiName Stargazers + @apiGroup Timeseries + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "rate": 0.5 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "rate": 0.33 + } + ] + """ + app.route('/{}/<owner>/<repo>/pulls/acceptance_rate'.format(GHDATA_API_VERSION))( + flaskify_ghtorrent(ghtorrent, ghtorrent.pull_acceptance_rate)) + + # Contribution Trends + """ + @api {get} /:owner/:repo/contributors Total Contributions by User + @apiName TotalContributions + @apiGroup Users + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "login": "foo", + "location": "Springfield", + "commits": 1337.0, + "pull_requests": 60.0, + "issues": null, + "commit_comments": 158.0, + "pull_request_comments": 718.0, + "issue_comments": 1668.0 + }, + { + "login": "bar", + "location": null, + "commits": 3968.0, + "pull_requests": null, + "issues": 12.0, + "commit_comments": 158.0, + "pull_request_comments": 718.0, + "issue_comments": 1568.0 + } + ] + """ + app.route('/{}/<owner>/<repo>/contributors'.format(GHDATA_API_VERSION))( + flaskify_ghtorrent(ghtorrent, ghtorrent.contributors)) + + ####################### + # Contribution Trends # + ####################### + + """ + @api {get} /:owner/:repo/contributions Contributions by Week + @apiName ContributionsByWeek + @apiGroup Timeseries + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + @apiParam (String) user Limit results to the given user's contributions + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "commits": 37.0, + "pull_requests": null, + "issues": null, + "commit_comments": 7.0, + "pull_request_comments": 8.0, + "issue_comments": 17.0 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "commits": 68.0, + "pull_requests": null, + "issues": 12.0, + "commit_comments": 18.0, + "pull_request_comments": 13.0, + "issue_comments": 28.0 + } + ] + """ + @app.route('/{}/<owner>/<repo>/contributions'.format(GHDATA_API_VERSION)) + def contributions(owner, repo): + repoid = ghtorrent.repoid(owner=owner, repo=repo) + user = request.args.get('user') + if (user): + userid = ghtorrent.userid(username=user) + contribs = ghtorrent.contributions(repoid=repoid, userid=userid) + else: + contribs = ghtorrent.contributions(repoid=repoid) + return Response(response=contribs, + status=200, + mimetype="application/json") + + # Diversity + + """ + @api {get} /:owner/:repo/commits/locations Commits and Location by User + @apiName Stargazers + @apiGroup Diversity + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "login": "bonnie", + "location": "Rowena, TX", + "commits": 12 + }, + { + "login":"clyde", + "location":"Ellis County, TX", + "commits": 12 + } + ] + """ + app.route('/{}/<owner>/<repo>/commits/locations'.format(GHDATA_API_VERSION))( + flaskify_ghtorrent(ghtorrent, ghtorrent.committer_locations)) + + # Popularity + """ + @api {get} /:owner/:repo/linking_websites Linking Websites + @apiDescription Returns an array of websites and their rank according to http://publicwww.com/ + @apiName LinkingWebsites + @apiGroup Popularity + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "url": "missouri.edu", + "rank": "1" + }, + { + "url": "unomaha.edu", + "rank": "2" + } + ] + """ + app.route('/{}/<owner>/<repo>/linking_websites'.format(GHDATA_API_VERSION))(flaskify(publicwww.linking_websites)) + + ####################### + # GitHub API # + ####################### + + """ + @api {get} /:owner/:repo/bus_factor Bus Factor + @apiDescription Returns an integer that is the number of develpers that have a summed percentage of contributions higher than the threshold + @apiName GitHub + @apiGroup Users + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + @apiParam {String} filename: optional; file or directory for function to run on + @apiParam {String} start: optional; start time for analysis + @apiParam {String} end: optional; end time for analysis + @apiParam {String} threshold: Default 50; + @apiParam {String} best: Default False; If true, sums from lowest to highest + + @apiSuccessExample {json} Success-Response: + [ + { + "repo": "ghdata", + "bus_factor": "2" + } + ] + """ + @app.route('/{}/<owner>/<repo>/bus_factor'.format(GHDATA_API_VERSION)) + def bus_factor(owner,repo): + kwargs = request.args.to_dict() + return Response(response=github.bus_factor(owner, repo, **kwargs).to_json(), status=200, mimetype="application/json") + + + + if (debugmode): + print(" * Serving static routes") + # Serve the front-end files in debug mode to make it easier for developers to work on the interface + # @todo: Figure out why this isn't working. + @app.route('/') + def index(): + root_dir = os.path.dirname(os.getcwd()) + print(root_dir + '/ghdata/static') + return send_from_directory(root_dir + '/ghdata/ghdata/static', 'index.html') + + @app.route('/scripts/<path>') + def send_scripts(path): + root_dir = os.path.dirname(os.getcwd()) + return send_from_directory(root_dir + '/ghdata/ghdata/static/scripts', path) + + @app.route('/styles/<path>') + def send_styles(path): + root_dir = os.path.dirname(os.getcwd()) + return send_from_directory(root_dir+ '/ghdata/ghdata/static/styles', path) + + app.debug = True + + app.run(host=host, port=int(port), debug=debugmode) if __name__ == '__main__': - run() \ No newline at end of file + run()
diff --git a/test/test_publicwww.py b/test/test_publicwww.py --- a/test/test_publicwww.py +++ b/test/test_publicwww.py @@ -7,7 +7,7 @@ def publicwww(): import ghdata key = os.getenv("PUBLIC_WWW_TEST_API_KEY") assert key is not None and len(key) >= 1 - return ghdata.PublicWWW(public_www_api_key=key) + return ghdata.PublicWWW(key) def test_linking_websites(publicwww): assert publicwww.linking_websites(owner='yihui', repo='knitr').isin(["sohu.com"]).any \ No newline at end of file
Uninstall Using the current install command to update the version of ghdata conflicts with previously installed versions on an OS level and causes errors whenever you try to start ghdata afterwords. If you add an uninstall script or post the command for it in the README, it would make it a lot easier to stay current. Thanks, Spencer Robinson
2017-05-11T14:41:44Z
[]
[]
chaoss/augur
67
chaoss__augur-67
[ "56" ]
ccdcba9bae81d4b4256b928e9c4879aca811578b
diff --git a/docs/python/conf.py b/docs/python/source/conf.py similarity index 89% rename from docs/python/conf.py rename to docs/python/source/conf.py --- a/docs/python/conf.py +++ b/docs/python/source/conf.py @@ -1,7 +1,8 @@ +#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # GHData documentation build configuration file, created by -# sphinx-quickstart on Mon Feb 20 10:26:03 2017. +# sphinx-quickstart on Tue Oct 24 12:27:08 2017. # # This file is execfile()d with the current directory set to its # containing dir. @@ -16,9 +17,9 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) +import os +import sys +sys.path.insert(0, os.path.abspath('../../')) # -- General configuration ------------------------------------------------ @@ -35,7 +36,8 @@ 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', - 'sphinx.ext.imgmath', + 'sphinx.ext.mathjax', + 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.githubpages'] @@ -52,18 +54,18 @@ master_doc = 'index' # General information about the project. -project = u'GHData' -copyright = u'2017, OSSHealth Team' -author = u'OSSHealth Team' +project = 'GHData' +copyright = '2017, GHData Contributors' +author = 'GHData Contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = u'' +version = '' # The full version, including alpha/beta/rc tags. -release = u'' +release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -75,7 +77,7 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' @@ -84,6 +86,8 @@ todo_include_todos = True + + # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for @@ -133,8 +137,8 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'GHData.tex', u'GHData Documentation', - u'OSSHealth Team', 'manual'), + (master_doc, 'GHData.tex', 'GHData Documentation', + 'GHData Contributors', 'manual'), ] @@ -143,7 +147,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'ghdata', u'GHData Documentation', + (master_doc, 'ghdata', 'GHData Documentation', [author], 1) ] @@ -154,7 +158,7 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'GHData', u'GHData Documentation', + (master_doc, 'GHData', 'GHData Documentation', author, 'GHData', 'One line description of project.', 'Miscellaneous'), ] diff --git a/ghdata/__init__.py b/ghdata/__init__.py --- a/ghdata/__init__.py +++ b/ghdata/__init__.py @@ -1,3 +1,7 @@ from .ghtorrent import GHTorrent from .publicwww import PublicWWW -from .githubapi import GitHubAPI \ No newline at end of file +from .githubapi import GitHubAPI +from .librariesio import LibrariesIO +from .util import makeRelative +from .downloads import Downloads +from .localcsv import LocalCSV \ No newline at end of file diff --git a/deploy.py b/ghdata/deploy.py similarity index 100% rename from deploy.py rename to ghdata/deploy.py diff --git a/ghdata/downloads.py b/ghdata/downloads.py new file mode 100644 --- /dev/null +++ b/ghdata/downloads.py @@ -0,0 +1,68 @@ +import json +import pandas as pd +import requests +import datetime +import base64 + +class Downloads(object): + """Class for retrieveing download information using APIs and web scrapers""" + def __init__(self, githubapi): + self.__githubapi = githubapi._GitHubAPI__api + + + def downloads(self, owner, repo): + """ + Detects package file and calls correct function for download statistics + + :param owner: repo owner username + :param repo: repo name + """ + root_dir = self.__githubapi.get_repo((owner + "/" + repo)).get_dir_contents("/") + + for file in root_dir: + if file.name == "Gemfile": + return self.ruby_downloads(repo) + if file.name == "package.json": + contents = base64.b64decode(file.content) + contents = contents.decode('utf-8') + return self.npm_downloads(repo, contents) + + def ruby_downloads(self, repo): + """ + Returns daily downloads for ruby gems from bestgems.org API + + :param repo: repo name + """ + r = requests.get("http://bestgems.org/api/v1/gems/%s/daily_downloads.json" % (repo)) + raw = r.text + df = pd.DataFrame(json.loads(json.loads(json.dumps(raw)))) + + columnsTitles=["date","daily_downloads"] + df = df.reindex(columns= columnsTitles) + df.rename(columns= {"daily_downloads" : "downloads"}, inplace=True) + + return df + + def npm_downloads(self, repo, contents): + """ + Returns daily downloads for ruby gems from bestgems.org API + + :param repo: repo name + :param contents: contents of package.json + """ + contents = json.loads(json.loads(json.dumps(contents))) + name = contents["name"] + dates = [] + r = requests.get("https://api.npmjs.org/downloads/range/0:%s/%s" % (datetime.datetime.today().strftime('%Y-%m-%d'), name)) + raw = r.text + raw = json.loads(json.loads(json.dumps(raw))) + df = pd.DataFrame(raw["downloads"]) + df.rename(columns= {"day" : "date"}, inplace=True) + + for i, row in df.iterrows(): + if row["downloads"] != 0: + break + else: + df.drop(i, inplace=True) + + return df \ No newline at end of file diff --git a/ghdata/ghtorrent.py b/ghdata/ghtorrent.py --- a/ghdata/ghtorrent.py +++ b/ghdata/ghtorrent.py @@ -1,6 +1,7 @@ #SPDX-License-Identifier: MIT import pandas as pd import sqlalchemy as s +import numpy as np import re class GHTorrent(object): @@ -19,22 +20,55 @@ def __init__(self, dbstr): except Exception as e: print("Could not connect to database.\nError: " + str(e)) - def __single_table_count_by_date(self, table, repo_col='project_id'): + def __single_table_count_by_date(self, table, repo_col='project_id', user_col='author_id', group_by="week"): """ Generates query string to count occurances of rows per date for a given table. External input must never be sent to this function, it is for internal use only. :param table: The table in GHTorrent to generate the string for :param repo_col: The column in that table with the project ids + :param user_col: The column in that table with the user ids + :param group_by: Default week; Options raw, day, week, month, year; Selects period of time to be grouped by :return: Query string """ - return """ - SELECT date(created_at) AS "date", COUNT(*) AS "{0}" - FROM {0} - WHERE {1} = :repoid - GROUP BY WEEK(created_at)""".format(table, repo_col) - - def repoid(self, owner, repo): + if group_by == "raw": + return """ + SELECT date(created_at) AS "date", {2} AS "user_id" + FROM {0} + WHERE {1} = :repoid + """.format(table, repo_col, user_col) + + if group_by == "day": + return """ + SELECT date(created_at) AS "date", COUNT(*) AS "{0}" + FROM {0} + WHERE {1} = :repoid + GROUP BY DATE(created_at)""".format(table, repo_col) + + if group_by == "week": + return """ + SELECT date(created_at) AS "date", COUNT(*) AS "{0}" + FROM {0} + WHERE {1} = :repoid + GROUP BY YEARWEEK(created_at)""".format(table, repo_col) + + if group_by == "month": + return """ + SELECT date(created_at) AS "date", COUNT(*) AS "{0}" + FROM {0} + WHERE {1} = :repoid + GROUP BY MONTH(created_at), YEAR(created_at)""".format(table, repo_col) + + if group_by == "year": + return """ + SELECT date(created_at) AS "date", COUNT(*) AS "{0}" + FROM {0} + WHERE {1} = :repoid + GROUP BY YEAR(created_at)""".format(table, repo_col) + + + + def repoid(self, owner_or_repoid, repo=None): """ Returns a repository's ID as it appears in the GHTorrent projects table github.com/[owner]/[project] @@ -43,11 +77,14 @@ def repoid(self, owner, repo): :param repo: The name of the repository :return: The repository's ID as it appears in the GHTorrent projects table """ - reposql = s.sql.text('SELECT projects.id FROM projects INNER JOIN users ON projects.owner_id = users.id WHERE projects.name = :repo AND users.login = :owner') repoid = 0 - result = self.db.execute(reposql, repo=repo, owner=owner) - for row in result: - repoid = row[0] + if repo is None: + repoid = owner_or_repoid + else: + reposql = s.sql.text('SELECT projects.id FROM projects INNER JOIN users ON projects.owner_id = users.id WHERE projects.name = :repo AND users.login = :repoowner') + result = self.db.execute(reposql, repo=repo, repoowner=owner_or_repoid) + for row in result: + repoid = row[0] return repoid def userid(self, username): @@ -66,53 +103,65 @@ def userid(self, username): # Basic timeseries queries - def stargazers(self, repoid, start=None, end=None): + def stargazers(self, owner, repo=None, group_by="week"): """ Timeseries of when people starred a repo - :param repoid: The id of the project in the projects table. Use repoid() to get this. + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with stargazers/day """ - stargazersSQL = s.sql.text(self.__single_table_count_by_date('watchers', 'repo_id')) - return pd.read_sql(stargazersSQL, self.db, params={"repoid": str(repoid)}) + repoid = self.repoid(owner, repo) + stargazersSQL = s.sql.text(self.__single_table_count_by_date('watchers', 'repo_id', 'user_id', group_by=group_by)) + df = pd.read_sql(stargazersSQL, self.db, params={"repoid": str(repoid)}) + df.drop(df.index[:1], inplace=True) + return df - def commits(self, repoid): + def commits(self, owner, repo=None, group_by="week"): """ Timeseries of all the commits on a repo - :param repoid: The id of the project in the projects table. Use repoid() to get this. + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with commits/day """ - commitsSQL = s.sql.text(self.__single_table_count_by_date('commits')) + repoid = self.repoid(owner, repo) + commitsSQL = s.sql.text(self.__single_table_count_by_date('commits', group_by=group_by)) return pd.read_sql(commitsSQL, self.db, params={"repoid": str(repoid)}) - def forks(self, repoid): + def forks(self, owner, repo=None, group_by="week"): """ Timeseries of when a repo's forks were created - :param repoid: The id of the project in the projects table. Use repoid() to get this. + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with forks/day """ - forksSQL = s.sql.text(self.__single_table_count_by_date('projects', 'forked_from')) + repoid = self.repoid(owner, repo) + forksSQL = s.sql.text(self.__single_table_count_by_date('projects', 'forked_from', 'owner_id', group_by=group_by)) return pd.read_sql(forksSQL, self.db, params={"repoid": str(repoid)}).drop(0) - def issues(self, repoid): + def issues(self, owner, repo=None, group_by="week"): """ Timeseries of when people starred a repo - :param repoid: The id of the project in the projects table. Use repoid() to get this. + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with issues/day """ - issuesSQL = s.sql.text(self.__single_table_count_by_date('issues', 'repo_id')) + repoid = self.repoid(owner, repo) + issuesSQL = s.sql.text(self.__single_table_count_by_date('issues', 'repo_id', 'reporter_id', group_by=group_by)) return pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)}) - def issues_with_close(self, repoid): + def issues_with_close(self, owner, repo=None): """ How long on average each week it takes to close an issue - :param repoid: The id of the project in the projects table. Use repoid() to get this. + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with issues/day """ + repoid = self.repoid(owner, repo) issuesSQL = s.sql.text(""" SELECT issues.id as "id", issues.created_at as "date", @@ -127,13 +176,15 @@ def issues_with_close(self, repoid): WHERE issues.repo_id = :repoid""") return pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)}) - def pulls(self, repoid): + def pulls(self, owner, repo=None): """ Timeseries of pull requests creation, also gives their associated activity - :param repoid: The id of the project in the projects table. Use repoid() to get this. + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with pull requests by day """ + repoid = self.repoid(owner, repo) pullsSQL = s.sql.text(""" SELECT date(pull_request_history.created_at) AS "date", (COUNT(pull_requests.id)) AS "pull_requests", @@ -148,13 +199,15 @@ def pulls(self, repoid): """) return pd.read_sql(pullsSQL, self.db, params={"repoid": str(repoid)}) - def contributors(self, repoid): + def contributors(self, owner, repo=None): """ All the contributors to a project and the counts of their contributions - :param repoid: The id of the project in the projects table. Use repoid() to get this. + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with users id, users login, and their contributions by type """ + repoid = self.repoid(owner, repo) contributorsSQL = s.sql.text(""" SELECT * FROM @@ -204,14 +257,16 @@ def contributors(self, repoid): return pd.read_sql(contributorsSQL, self.db, index_col=['user_id'], params={"repoid": str(repoid)}) - def contributions(self, repoid, userid=None): + def contributions(self, owner, repo=None, userid=None): """ Timeseries of all the contributions to a project, optionally limited to a specific user - :param repoid: The id of the project in the projects table. + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table + :param repo: The name of the repo. Unneeded if repository id was passed as owner. :param userid: The id of user if you want to limit the contributions to a specific user. :return: DataFrame with all of the contributions seperated by day. """ + repoid = self.repoid(owner, repo) rawContributionsSQL = """ SELECT DATE(coms.created_at) as "date", coms.count as "commits", @@ -239,6 +294,7 @@ def contributions(self, repoid, userid=None): LEFT JOIN (SELECT issue_comments.created_at AS created_at, COUNT(*) AS count FROM issue_comments JOIN issues ON issue_comments.issue_id = issues.id WHERE issues.repo_id = :repoid[[ AND issue_comments.user_id = :userid]] GROUP BY DATE(issue_comments.created_at)) AS isscoms ON DATE(isscoms.created_at) = DATE(coms.created_at) + GROUP BY YEARWEEK(coms.created_at) ORDER BY DATE(coms.created_at) """ @@ -252,15 +308,17 @@ def contributions(self, repoid, userid=None): parameterized = s.sql.text(rawContributionsSQL) return pd.read_sql(parameterized, self.db, params={"repoid": str(repoid)}) - def committer_locations(self, repoid): + def committer_locations(self, owner, repo=None): """ Return committers and their locations @todo: Group by country code instead of users, needs the new schema - :param repoid: The id of the project in the projects table. + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. + :param repo: The name of the repo. :return: DataFrame with users and locations sorted by commtis """ + repoid = self.repoid(owner, repo) rawContributionsSQL = s.sql.text(""" SELECT users.login, users.location, COUNT(*) AS "commits" FROM commits @@ -269,21 +327,22 @@ def committer_locations(self, repoid): JOIN users ON users.id = commits.author_id WHERE project_commits.project_id = :repoid - AND LENGTH(users.location) > 1 GROUP BY users.id ORDER BY commits DESC """) return pd.read_sql(rawContributionsSQL, self.db, params={"repoid": str(repoid)}) - def issue_response_time(self, repoid): + def issue_response_time(self, owner, repo=None): """ How long it takes for issues to be responded to by people who have commits associate with the project - :param repoid: The id of the project in the projects table. + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with the issues' id the date it was opened, and the date it was first responded to """ + repoid = self.repoid(owner, repo) issuesSQL = s.sql.text(""" SELECT issues.created_at AS "created_at", MIN(issue_comments.created_at) AS "responded_at" @@ -299,18 +358,24 @@ def issue_response_time(self, repoid): AND issues.repo_id = :repoid GROUP BY issues.id """) - return pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)}) + df = pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)}) + df['created_at'] = pd.to_datetime(df['created_at']) + df['responded_at'] = pd.to_datetime(df['responded_at']) + df['hours_between'] = np.floor((df['responded_at'] - df['created_at']) / np.timedelta64(1, 'h')) + df = df['hours_between'].value_counts().sort_index().reset_index().rename(columns={'index': 'hours_between', 'hours_between': 'count'}) + df = df[df['hours_between'] < 48] + return df - def pull_acceptance_rate(self, repoid): + def pull_acceptance_rate(self, owner, repo=None): """ Timeseries of pull request acceptance rate (Number of pull requests merged on a date over Number of pull requests opened on a date) - :param repoid: The id of the project in the projects table. + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with the pull acceptance rate and the dates """ - + repoid = self.repoid(owner, repo) pullAcceptanceSQL = s.sql.text(""" - SELECT DATE(date_created) AS "date", CAST(num_approved AS DECIMAL)/CAST(num_open AS DECIMAL) AS "rate" FROM (SELECT COUNT(DISTINCT pull_request_id) AS num_approved, DATE(pull_request_history.created_at) AS accepted_on @@ -330,7 +395,7 @@ def pull_acceptance_rate(self, repoid): return pd.read_sql(pullAcceptanceSQL, self.db, params={"repoid": str(repoid)}) - def classify_contributors(self, repoid): + def classify_contributors(self, owner, repo=None): """ Classify everyone who has interacted with a repo into - user @@ -340,10 +405,12 @@ def classify_contributors(self, repoid): - major_contributor - maintainer - :param repoid: The id of the project in the projects table. + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with the login and role of contributors """ - contributors = self.contributors(repoid) + repoid = self.repoid(owner, repo) + contributors = self.contributors(repoid, repo=None) sums = contributors.sum() def classify(row): @@ -364,3 +431,61 @@ def classify(row): roles = contributors.apply(classify, axis=1) return roles + + def community_age(self, owner, repo=None): + """ + Information helpful to determining a community's age + + For now, returns the date of the first of each type of action (fork, pull request, etc.) + """ + + repoid = self.repoid(owner, repo) + communityAgeSQL = s.sql.text(""" + SELECT DATE(proj.created_at) AS "project", + DATE(commits.created_at) AS "commit", + DATE(frk.created_at) AS "fork", + DATE(iss.created_at) AS "issue", + DATE(pr.created_at) AS "pull_request" + + FROM commits + + LEFT JOIN (SELECT forked_from_id AS "repo_id", created_at AS "created_at" FROM forks WHERE forks.forked_from_id = :repoid ORDER BY created_at DESC LIMIT 1) AS frk + ON frk.repo_id = commits.project_id + + LEFT JOIN (SELECT repo_id AS "repo_id", created_at AS "created_at" FROM issues WHERE issues.repo_id = :repoid ORDER BY created_at DESC LIMIT 1) AS iss + ON iss.repo_id = commits.project_id + + LEFT JOIN (SELECT pull_request_history.created_at AS "created_at", pull_requests.base_repo_id AS "repo_id" FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.pull_request_id WHERE pull_requests.base_repo_id = :repoid AND pull_request_history.action = 'merged' ORDER BY pull_request_history.created_at DESC LIMIT 1) AS pr + ON pr.repo_id = commits.project_id + + LEFT JOIN (SELECT projects.id AS "repo_id", created_at AS "created_at" FROM projects WHERE projects.id = :repoid) AS proj + ON proj.repo_id = commits.project_id + + WHERE commits.project_id = :repoid + ORDER BY commits.created_at DESC + LIMIT 1 + """) + + return pd.read_sql(communityAgeSQL, self.db, params={"repoid": str(repoid)}) + + def unique_committers(self, owner, repo=None): + repoid = self.repoid(owner, repo) + uniqueCommittersSQL = s.sql.text(""" + SELECT unique_committers.created_at AS "date", MAX(@number_of_committers:=@number_of_committers+1) total_unique_committers + FROM ( + SELECT author_id, MIN(DATE(created_at)) created_at + FROM commits + WHERE project_id = :repoid + GROUP BY author_id + ORDER BY created_at ASC) AS unique_committers, + (SELECT @number_of_committers:= 0) AS number_of_committers + GROUP BY DATE(unique_committers.created_at) + """) + return pd.read_sql(uniqueCommittersSQL, self.db, params={"repoid": str(repoid)}) + + def ghtorrent_range(self): + ghtorrentRangeSQL = s.sql.text(""" + SELECT MIN(date(created_at)) AS "min_date", MAX(date(created_at)) AS "max_date" + FROM commits + """) + return pd.read_sql(ghtorrentRangeSQL, self.db) diff --git a/ghdata/githubapi.py b/ghdata/githubapi.py --- a/ghdata/githubapi.py +++ b/ghdata/githubapi.py @@ -1,9 +1,10 @@ - -import datetime +from .localcsv import LocalCSV +import json +import re from dateutil.parser import parse import pandas as pd import github - +import requests class GitHubAPI(object): """ @@ -15,78 +16,21 @@ def __init__(self, api_key): :param api_key: GitHub API key """ - self.GITUB_API_KEY = api_key + self.GITHUB_API_KEY = api_key self.__api = github.Github(api_key) - def contributions_by_file(self, owner, repo, filename=None, start=None, end=None, ascending=False): + def bus_factor(self, owner, repo, filename=None, start=None, end=None, threshold=50): """ - Gets number of addtions and deletions in each file by user - - Currently ignores changes from local users unattributed to Github users + Calculates bus factor by adding up percentages from highest to lowest until they exceed threshold :param owner: repo owner username :param repo: repo name :param filename: optional; file or directory for function to run on :param start: optional; start time for analysis :param end: optional; end time for analysis - :param ascending: Default False; returns dataframe in ascending order - """ - if start != None: - start = parse(start) - else: - start = github.GithubObject.NotSet - - if end != None: - end = parse(end) - else: - end = github.GithubObject.NotSet - - commits = self.__api.get_repo((owner + "/" + repo)).get_commits(since=start, until=end) - - if filename != None: - self.__api.get_repo((owner + "/" + repo)).get_contents(filename) - - df = [] - - for commit in commits: - for file in commit.files: - if filename != None: - try: - if file.changes != 0 and file.filename == filename: - df.append({'user': commit.author.login, 'file': file.filename, 'number of additions': file.additions, 'number of deletions': file.deletions, 'total': file.changes}) - except AttributeError: - pass - else: - try: - if file.changes != 0: - df.append({'user': commit.author.login, 'file': file.filename, 'number of additions': file.additions, 'number of deletions': file.deletions, 'total': file.changes}) - except AttributeError: - pass - - df = pd.DataFrame(df) - - df = df.groupby(["file", "user"]).sum() - - df = df.sort_values(ascending=ascending) - - return df - - def contributions_by_percentage(self, owner, repo, filename=None, start=None, end=None, ascending=False): + :param threshold: Default 50; """ - Calculates percentage of commits in repo by user - - Puts it in dataframe with columns: - user percentage of commits - Currently ignores changes from local users unattributed to Github user - - :param owner: repo owner username - :param repo: repo name - :param filename: optional; file or directory for function to run on - :param start: optional; start time for analysis - :param end: optional; end time for analysis - :param ascending: Default False; returns dataframe in ascending order - """ if start != None: start = parse(start) else: @@ -109,43 +53,159 @@ def contributions_by_percentage(self, owner, repo, filename=None, start=None, en for file in commit.files: if file.filename == filename: try: - df.append({'user': commit.author.login}) + df.append({'userid': commit.author.id}) except AttributeError: pass break else: for commit in commits: try: - df.append({'user': commit.author.login}) + df.append({'userid': commit.author.id}) except AttributeError: pass df = pd.DataFrame(df) - df = df.groupby(['user']).user.count() / df.groupby(['user']).user.count().sum() * 100 + df = df.groupby(['userid']).userid.count() / df.groupby(['userid']).userid.count().sum() * 100 + + i = 0 + for num in df.cumsum(): + i = i + 1 + if num >= threshold: + worst = i + break + + i = 0 + for num in df.sort_values(ascending=True).cumsum(): + i = i + 1 + if num >= threshold: + best = i + break - df = df.sort_values(ascending=ascending) + bus_factor = [{'worst': worst, 'best' : best}] - return df + return pd.DataFrame(bus_factor) - def bus_factor(self, owner, repo, filename=None, start=None, end=None, threshold=50, best=False): + def tags(self, owner, repo, raw=False): """ - Calculates bus factor by adding up percentages from highest to lowest until they exceed threshold + Returns dates and names of tags :param owner: repo owner username :param repo: repo name - :param filename: optional; file or directory for function to run on - :param start: optional; start time for analysis - :param end: optional; end time for analysis - :param threshold: Default 50; - :param best: Default False; If true, sums from lowest to highestn + :param raw: Default False; Returns list of dicts """ - df = self.contributions_by_percentage(owner, repo, filename, start, end, best) + cursor = "null" + tags_list = [] + url = "https://api.github.com/graphql" + + while True: + query = {"query" : + """ + query { + repository(owner: "%s", name: "%s") { + tags: refs(refPrefix: "refs/tags/", first: 100, after: "%s") { + edges { + cursor + tag: node { + name + target { + ... on Tag { + tagger { + date + } + } + } + } + } + } + } + } + """ % (owner, repo, cursor) + } + r = requests.post(url, auth=requests.auth.HTTPBasicAuth('user', self.GITHUB_API_KEY), json=query) + raw = r.text + data = json.loads(json.loads(json.dumps(raw))) + tags = data['data']['repository']['tags']['edges'] + for i in tags: + try: + tags_list.append({'date' : i['tag']['target']['tagger']['date'], 'release' : i['tag']['name']}) + except KeyError: + pass + if data['data']['repository']['tags']['edges'] == []: + break + else: + cursor = data['data']['repository']['tags']['edges'][-1]['cursor'] + return pd.DataFrame(tags_list) + + def major_tags(self, owner, repo): + """ + Returns dates and names of major version (according to semver) tags. May return blank if no major versions + :param owner: repo owner username + :param repo: repo name + """ + cursor = "null" + tags_list = [] + url = "https://api.github.com/graphql" + + while True: + query = {"query" : + """ + query { + repository(owner: "%s", name: "%s") { + tags: refs(refPrefix: "refs/tags/", first: 100, after: "%s") { + edges { + cursor + tag: node { + name + target { + ... on Tag { + tagger { + date + } + } + } + } + } + } + } + } + """ % (owner, repo, cursor) + } + r = requests.post(url, auth=requests.auth.HTTPBasicAuth('user', self.GITHUB_API_KEY), json=query) + raw = r.text + data = json.loads(json.loads(json.dumps(raw))) + tags = data['data']['repository']['tags']['edges'] + for i in tags: + try: + tags_list.append({'date' : i['tag']['target']['tagger']['date'], 'release' : i['tag']['name']}) + except KeyError: + pass + if data['data']['repository']['tags']['edges'] == []: + break + else: + cursor = data['data']['repository']['tags']['edges'][-1]['cursor'] + + major_versions = [] + pattern = re.compile("[0-9]+\.[0]+\.[0]+$") + for i in tags_list: + try: + if re.search(pattern, i["release"]) != None: + major_versions.append(i) + except AttributeError: + pass + + return pd.DataFrame(major_versions) + + + def contributors_gender(self, owner, repo=None): + contributors = self.__api.get_repo((owner + "/" + repo)).get_contributors() + names = pd.DataFrame(columns=['name']) i = 0 - for num in df.cumsum(): - i = i + 1 - if num >= threshold: - bus_factor = pd.Series(i, index=["Bus Factor"]) - return bus_factor + for contributor in contributors: + if contributor.name is not None: + names.loc[i] = [contributor.name.split()[0]] + i += 1 + genderized = names.merge(LocalCSV.name_gender, how='inner', on=['name']) + return genderized \ No newline at end of file diff --git a/ghdata/librariesio.py b/ghdata/librariesio.py new file mode 100644 --- /dev/null +++ b/ghdata/librariesio.py @@ -0,0 +1,103 @@ +import requests +import pandas as pd +import numpy as np +from bs4 import BeautifulSoup + +class LibrariesIO(object): + """Handles interaction with https://libraries.io/api to get dependency data""" + def __init__(self, api_key, githubapi): + self.API_KEY = api_key + self.__githubapi = githubapi._GitHubAPI__api + + + def dependencies(self, owner, repo): + """ + Finds the packages that a project depends on + + :param owner: GitHub username of the owner of the repo + :param repo: Repository name + :return: Dict that contains the results (https://libraries.io/api#repository-dependencies) + """ + url = "https://libraries.io/api/github/{owner}/{repo}/dependencies".format(owner=owner, repo=repo) + r = requests.get(url, params={"api_key": self.API_KEY}) + return r.json() + + def dependents(self, owner, repo): + """ + Finds the packages depend on this repository + + :param owner: GitHub username of the owner of the repo + :param repo: Repository name + :return: Dict that contains the results (https://libraries.io/api#project-dependents) + """ + projectsUrl = "https://libraries.io/api/github/{owner}/{repo}/projects".format(owner=owner, repo=repo) + projectsRequest = requests.get(projectsUrl, params={"api_key": self.API_KEY}) + json = projectsRequest.json() + + if projectsRequest.status_code == 400: + print('You need to set the LibrariesIO API key in ghdata.cfg or the environment variable GHDATA_LIBRARIESIO_API_KEY') + + if projectsRequest.status_code != 200: + return projectsRequest.json() + else: + project = projectsRequest.json()[0]['name'] + platform = projectsRequest.json()[0]['platform'] + dependentsUrl = "https://libraries.io/api/{platform}/{repo}/dependents".format(platform=platform, repo=repo) + dependentsRequest = requests.get(dependentsUrl, params={"api_key": self.API_KEY}) + return dependentsRequest + + def dependency_stats(self, owner, repo): + """ + Finds the number of dependencies, dependant projects, and dependent repos by scrapping it off of the libraries.io website + + :param owner: GitHub username of the owner of the repo + :param repo: Repository name + :return: Dict that contains the results + """ + root_dir = self.__githubapi.get_repo((owner + "/" + repo)).get_dir_contents("/") + + platform = None + + for file in root_dir: + if file.name == "Gemfile": + platform = 'rubygems' + if file.name == "package.json": + platform = 'npm' + if file.name == 'setup.py': + platform = 'pypi' + + if platform == None: + return {'Stats' : 'null'} + + url = "https://libraries.io/{platform}/{repo}/".format(platform=platform, repo=repo) + + resp = requests.get(url) + + if resp.status_code == 404: + return {'Stats' : 'null'} + + soup = BeautifulSoup(resp.text, "html.parser") + + infotable = soup.body.div.next_sibling.next_sibling.div.div.next_sibling.next_sibling.dl.next_sibling.next_sibling.next_sibling.next_sibling + + data =[] + for child in infotable.children: + if child.string == '\n': + pass + if child.string == None: + if child.a != None: + data.append(child.a.string) + else: + data.append(child.string) + + data_new = [] + for item in data: + data_new.append(item.strip('\n')) + data_new = list(filter(None, data_new)) + + data_new = dict(zip(*[iter(data_new)]*2)) + + final_data = {'dependencies' : data_new['Dependencies'], 'dependent_projects' : data_new['Dependent projects'], 'dependent_repositories' : data_new['Dependent repositories']} + + return final_data + diff --git a/ghdata/localcsv.py b/ghdata/localcsv.py new file mode 100644 --- /dev/null +++ b/ghdata/localcsv.py @@ -0,0 +1,10 @@ +#SPDX-License-Identifier: MIT +import pandas as pd +from .util import get_data_path + +class LocalCSV(object): + + def __init__(self): + return + + name_gender = pd.read_csv(get_data_path('name_gender.csv'), index_col=0) \ No newline at end of file diff --git a/ghdata/server.py b/ghdata/server.py old mode 100755 new mode 100644 --- a/ghdata/server.py +++ b/ghdata/server.py @@ -1,477 +1,723 @@ #SPDX-License-Identifier: MIT -import ghdata - import os import sys +import ipdb +import traceback if (sys.version_info > (3, 0)): import configparser as configparser else: import ConfigParser as configparser -from flask import Flask, request, Response, send_from_directory +sys.path.append('..') + +import ghdata +from flask import Flask, request, Response from flask_cors import CORS +import json -GHDATA_API_VERSION = 'unstable' +GHDATA_API_VERSION = 'api/unstable' +# Location to load configuration from +GHDATA_CONFIG_FILE = open(os.getenv('GHDATA_CONFIG_FILE', 'ghdata.cfg'), 'r+') +# Options to export the loaded configuration as environment variables for Docker +GHDATA_ENV_EXPORT = os.getenv('GHDATA_ENV_EXPORT', '0') == '1' +if GHDATA_ENV_EXPORT: + GHDATA_ENV_EXPORT_FILE = open(os.getenv('GHDATA_ENV_EXPORT_FILE', 'lastrun.cfg.sh'), 'w+') + + +def serialize(data, orient='records'): + + if (orient is None): + orient = 'records' + + result = '' -def serialize(func, **args): - """ - Serailizes a function that returns a dataframe - """ - data = func(**args) if hasattr(data, 'to_json'): - return data.to_json(orient='records', date_format='iso', date_unit='ms') + result = data.to_json(orient=orient, date_format='iso', date_unit='ms') else: - return data + try: + result = json.dumps(data) + except: + result = data + return result -def flaskify_ghtorrent(ghtorrent, func): +def flaskify(func): """ Simplifies API endpoints that just accept owner and repo, serializes them and spits them out """ - def generated_function(owner, repo): - repoid = ghtorrent.repoid(owner=owner, repo=repo) - return Response(response=serialize(func, repoid=repoid), + def generated_function(*args, **kwargs): + kwargs.update(request.args.to_dict()) + df = func(*args, **kwargs) + return Response(response=serialize(df, orient=request.args.get('orient')), status=200, mimetype="application/json") generated_function.__name__ = func.__name__ return generated_function -def flaskify(func): +def addMetric(app, function, endpoint): + """Simplifies adding routes that only accept owner/repo""" + app.route('/{}/<owner>/<repo>/{}'.format(GHDATA_API_VERSION, endpoint))(flaskify(function)) + +def addTimeseries(app, function, endpoint): """ - Simplifies API endpoints that just accept owner and repo, - serializes them and spits them out + Simplifies adding routes that accept owner/repo and return timeseries + + :param app: Flask app + :param function: Function from a datasource to add + :param endpoint: GET endpoint to generate """ - def generated_function(owner, repo): - return Response(response=serialize(func, owner=owner, repo=repo), - status=200, - mimetype="application/json") - generated_function.__name__ = func.__name__ - return generated_function + addMetric(app, function, 'timeseries/{}'.format(endpoint)) + app.route('/{}/<owner>/<repo>/timeseries/{}/relative_to/<ownerRelativeTo>/<repoRelativeTo>'.format(GHDATA_API_VERSION, endpoint))(flaskify(ghdata.util.makeRelative(function))) + -def read_config(parser, section, name, environment_variable, default): +app = Flask(__name__) +CORS(app)# Try to open the config file and parse it +parser = configparser.RawConfigParser() +parser.readfp(GHDATA_CONFIG_FILE) + +if GHDATA_ENV_EXPORT: + GHDATA_ENV_EXPORT_FILE.write('#!/bin/bash\n') + +def read_config(section, name, environment_variable, default): + value = default try: value = os.getenv(environment_variable, parser.get(section, name)) - return value - except: + except Exception as e: if not parser.has_section(section): parser.add_section(section) + print('[' + section + '] -> ' + name + ' is missing. Adding to config...') parser.set(section, name, default) - with open('ghdata.cfg', 'w') as configfile: - parser.write(configfile) - return default - + parser.write(GHDATA_CONFIG_FILE) + value = default + if GHDATA_ENV_EXPORT: + GHDATA_ENV_EXPORT_FILE.write('export ' + environment_variable + '="' + value + '"\n') + return value + +host = read_config('Server', 'host', 'GHDATA_HOST', '0.0.0.0') +port = read_config('Server', 'port', 'GHDATA_PORT', '5000') + +publicwww = ghdata.PublicWWW(api_key=read_config('PublicWWW', 'APIKey', 'GHDATA_PUBLIC_WWW_API_KEY', 'None')) +github = ghdata.GitHubAPI(api_key=read_config('GitHub', 'APIKey', 'GHDATA_GITHUB_API_KEY', 'None')) +librariesio = ghdata.LibrariesIO(api_key=read_config('LibrariesIO', 'APIKey', 'GHDATA_LIBRARIESIO_API_KEY', 'None'), githubapi=github) +downloads = ghdata.Downloads(github) +localcsv = ghdata.LocalCSV() + +if (read_config('Development', 'developer', 'GHDATA_DEBUG', '0') == '1'): + debugmode = True +else: + debugmode = False + +dbstr = 'mysql+pymysql://{}:{}@{}:{}/{}'.format( + read_config('Database', 'user', 'GHDATA_DB_USER', 'root'), + read_config('Database', 'pass', 'GHDATA_DB_PASS', 'password'), + read_config('Database', 'host', 'GHDATA_DB_HOST', '127.0.0.1'), + read_config('Database', 'port', 'GHDATA_DB_PORT', '3306'), + read_config('Database', 'name', 'GHDATA_DB_NAME', 'msr14') +) +ghtorrent = ghdata.GHTorrent(dbstr=dbstr) + +""" +@api {get} / API Status +@apiName Status +@apiGroup Misc +""" [email protected]('/{}/'.format(GHDATA_API_VERSION)) +def api_root(): + """API status""" + # @todo: When we support multiple data sources this should keep track of their status + # @todo: Add GHTorrent test to determine status + ghtorrent_status = "good" + # @todo: Add GitHub API status + # @todo: Add PublicWWW API status + return """{"status": "healthy", "ghtorrent": "{}"}""".format(ghtorrent_status) + +####################### +# Timeseries # +####################### + +# @todo: Link to LF Metrics + +""" +@api {get} /:owner/:repo/commits/group_by=:group_by Commits +@apiName Commits +@apiGroup Timeseries + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository +@apiParam {String} group_by (Default to Week) Allows for reseults to be grouped by day, week, month, or year + +@apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "commits": 153 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "commits": 192 + } + ] +""" +addTimeseries(app, ghtorrent.commits, 'commits') + +""" +@api {get} /:owner/:repo/forks/group_by=:group_by Forks +@apiName Forks +@apiGroup Timeseries +@apiParam {String} group_by (Default to Week) Allows for reseults to be grouped by day, week, month, or year + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "forks": 13 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "forks": 12 + } + ] +""" +addTimeseries(app, ghtorrent.forks, 'forks') + +""" +@api {get} /:owner/:repo/issues/group_by=:group_by Issues +@apiName Issues +@apiGroup Timeseries +@apiParam {String} group_by (Default to Week) Allows for reseults to be grouped by day, week, month, or year + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "issues":13 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "issues":15 + } + ] +""" +addTimeseries(app, ghtorrent.issues, 'issues') + +""" +@api {get} /:owner/:repo/issues/response_time Issue Response Time +@apiName IssueResponseTime +@apiGroup Timeseries + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "created_at": "2013-09-16T17:00:54.000Z", + "responded_at": "2013-09-16T17:20:58.000Z" + }, + { + "created_at": "2013-09-16T09:31:34.000Z", + "responded_at": "2013-09-16T09:43:03.000Z" + } + ] +""" +addMetric(app, ghtorrent.issue_response_time, 'issues/response_time') + +""" +@api {get} /:owner/:repo/pulls Pull Requests by Week +@apiName PullRequestsByWeek +@apiGroup Timeseries + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "pull_requests": 1 + "comments": 11 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "pull_requests": 2 + "comments": 31 + } + ] +""" +addTimeseries(app, ghtorrent.pulls, 'pulls') + +""" +@api {get} /:owner/:repo/stargazers/group_by=:group_by Stargazers +@apiName Stargazers +@apiGroup Timeseries + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository +@apiParam {String} group_by (Default to Week) Allows for reseults to be grouped by day, week, month, or year + +@apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "watchers": 133 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "watchers": 54 + } + ] +""" +addTimeseries(app, ghtorrent.stargazers, 'stargazers') + +""" +@api {get} /:owner/:repo/pulls/acceptance_rate Pull Request Acceptance Rate by Week +@apiDescription For each week, the rate is calculated as (pull requests merged that week) / (pull requests opened that week) +@apiName PullRequestAcceptanceRate +@apiGroup Timeseries + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "rate": 0.5 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "rate": 0.33 + } + ] +""" +addTimeseries(app, ghtorrent.pull_acceptance_rate, 'pulls/acceptance_rate') + +""" +@api {get} /:owner/:repo/timeseries/tags Tags release timeseries +@apiDescription Timeseries of tags +@apiName Tags +@apiGroup Timeseries + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "release": 0.5 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "release": 0.5.1 + } + ] +""" +addTimeseries(app, github.tags, 'tags') + +""" +@api {get} /:owner/:repo/timeseries/tags/major Tags for major releases timeseries +@apiDescription Timeseries of Major release tags +@apiName Major Release Tags +@apiGroup Timeseries + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "release": 1.0.0 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "release": 2.0.0 + } + ] +""" +addTimeseries(app, github.major_tags, 'tags/major') + +""" +@api {get} /:owner/:repo/timeseries/downloads Number of downloads +@apiDescription Timeseries of downloads from package manager +@apiName Downloads +@apiGroup Timeseries + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "downlads": 235 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "dowloads": 327 + } + ] +""" +addTimeseries(app, downloads.downloads, 'downloads') + + + +# Contribution Trends +""" +@api {get} /:owner/:repo/contributors Total Contributions by User +@apiName TotalContributions +@apiGroup Users + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "login": "foo", + "location": "Springfield", + "commits": 1337.0, + "pull_requests": 60.0, + "issues": null, + "commit_comments": 158.0, + "pull_request_comments": 718.0, + "issue_comments": 1668.0 + }, + { + "login": "bar", + "location": null, + "commits": 3968.0, + "pull_requests": null, + "issues": 12.0, + "commit_comments": 158.0, + "pull_request_comments": 718.0, + "issue_comments": 1568.0 + } + ] +""" +addMetric(app, ghtorrent.contributors, 'contributors') + +####################### +# Contribution Trends # +####################### + +""" +@api {get} /:owner/:repo/contributions Contributions by Week +@apiName ContributionsByWeek +@apiGroup Timeseries + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository +@apiParam (String) user Limit results to the given user's contributions + +@apiSuccessExample {json} Success-Response: + [ + { + "date": "2015-01-01T00:00:00.000Z", + "commits": 37.0, + "pull_requests": null, + "issues": null, + "commit_comments": 7.0, + "pull_request_comments": 8.0, + "issue_comments": 17.0 + }, + { + "date": "2015-01-08T00:00:00.000Z", + "commits": 68.0, + "pull_requests": null, + "issues": 12.0, + "commit_comments": 18.0, + "pull_request_comments": 13.0, + "issue_comments": 28.0 + } + ] +""" [email protected]('/{}/<owner>/<repo>/contributions'.format(GHDATA_API_VERSION)) +def contributions(owner, repo): + repoid = ghtorrent.repoid(owner=owner, repo=repo) + user = request.args.get('user') + if (user): + userid = ghtorrent.userid(username=user) + contribs = ghtorrent.contributions(repoid=repoid, userid=userid) + else: + contribs = ghtorrent.contributions(repoid=repoid) + serialized_contributors = serialize(contribs, orient=request.args.get('orient')) + return Response(response=serialized_contributors, + status=200, + mimetype="application/json") + +""" +@api {get} /:owner/:repo/committer_locations Commits and Location by User +@apiName CommiterLocations +@apiGroup Diversity + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "login": "bonnie", + "location": "Rowena, TX", + "commits": 12 + }, + { + "login":"clyde", + "location":"Ellis County, TX", + "commits": 12 + } + ] +""" +addMetric(app, ghtorrent.committer_locations, 'committer_locations') + + + +""" +@api {get} /:owner/:repo/community_age Timeline of events to determine the age of a community +@apiName CommunityAge +@apiGroup Timeseries + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "login": "bonnie", + "location": "Rowena, TX", + "commits": 12 + }, + { + "login":"clyde", + "location":"Ellis County, TX", + "commits": 12 + } + ] +""" +addMetric(app, ghtorrent.community_age, 'community_age') + +""" +@api {get} /:owner/:repo/dependencies List of dependencies from libraries.io +@apiName Dependencies +@apiGroup Ecosystem + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { "full_name": "rails/rails" + "description": "Ruby on Rails", + "fork": false, "created_at": "2008-04-11T02:19:47.000Z", + "updated_at": "2017-09-20T20:16:47.181Z", + "pushed_at": "2017-09-20T19:39:08.000Z", + "homepage": "http://rubyonrails.org", + "size": 155199, "stargazers_count": 36993, + "language": "Ruby", "has_issues": true, + "has_wiki": false, + "has_pages": false, + "forks_count": 15130, + "mirror_url": null, + "open_issues_count": 1157, + "default_branch": "master", + "subscribers_count": 2452, + "uuid": "8514", "source_name": null, + "license": "MIT", "private": false, + "contributions_count": 2616, + "has_readme": "README.md", + "has_changelog": null, + "has_contributing": "CONTRIBUTING.md", + "has_license": "MIT-LICENSE", + "has_coc": "CODE_OF_CONDUCT.md", + "has_threat_model": null, + "has_audit": null, + "status": null, + "last_synced_at": "2017-09-20T20:16:47.153Z", + "rank": 28, "host_type": "GitHub", + "host_domain": null, + "name": null, + "scm": "git", + "fork_policy": null, + "github_id": "8514", + "pull_requests_enabled": null, + "logo_url": null, + "github_contributions_count": 2616, + "keywords": ["activejob", "activerecord", "html", "mvc", "rails", "ruby"], + "dependencies": [ + { "project_name": "websocket-driver", + "name": "websocket-driver", + "platform": "rubygems", + "requirements": "~> 0.6.1", + "latest_stable": "0.7.0", + "latest": "0.7.0", + "deprecated": false, "outdated": true, + "filepath": "actioncable/actioncable.gemspec", " + kind": "runtime" + } + ] +""" +addMetric(app, librariesio.dependencies, 'dependencies') + +""" +@api {get} /:owner/:repo/dependents List of dependants from libraries.io +@apiName Dependents +@apiGroup Ecosystem + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "login": "bonnie", + "location": "Rowena, TX", + "commits": 12 + }, + { + "login":"clyde", + "location":"Ellis County, TX", + "commits": 12 + } + ] +""" +addMetric(app, librariesio.dependents, 'dependents') + +""" +@api {get} /:owner/:repo/dependency_stats List of libraries.io stats +@apiName DependencyStats +@apiGroup Ecosystem + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "dependencies": "10", + "dependent_projects": "10.6K", + "dependent_repositories": "392K" + } + ] +""" +addMetric(app, librariesio.dependency_stats, 'dependency_stats') + + +""" +@api {get} /:owner/:repo/unique_committers Count of new committers weekly +@apiName UniqueCommiters +@apiGroup Timeseries + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { "date":"2009-02-16T00:00:00.000Z", + "total_unique_committers":1.0 + }, + { "date":"2009-07-12T00:00:00.000Z", + "total_unique_committers":2.0 + }, + ] +""" +addTimeseries(app, ghtorrent.unique_committers, 'unique_committers') + +# Popularity +""" +@api {get} /:owner/:repo/linking_websites Linking Websites +@apiDescription Returns an array of websites and their rank according to http://publicwww.com/ +@apiName LinkingWebsites +@apiGroup Popularity + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "url": "missouri.edu", + "rank": "1" + }, + { + "url": "unomaha.edu", + "rank": "2" + } + ] +""" +addMetric(app, publicwww.linking_websites, 'linking_websites') + +""" +@api {get} /ghtorrent_range Range of dates covered by GHTorrent +@apiName GhtorrentRange +@apiGroup Misc +""" [email protected]('/{}/ghtorrent_range'.format(GHDATA_API_VERSION)) + +def ghtorrent_range(): + ghtorrent_range = serialize(ghtorrent.ghtorrent_range()) + return Response(response=ghtorrent_range, + status=200, + mimetype="application/json") + +####################### +# GitHub API # +####################### + +""" +@api {get} /:owner/:repo/bus_factor Bus Factor +@apiDescription Returns an integer that is the number of develpers that have a summed percentage of contributions higher than the threshold +@apiName GitHub +@apiGroup Users + +@apiParam {String} owner Username of the owner of the GitHub repository +@apiParam {String} repo Name of the GitHub repository + +@apiSuccessExample {json} Success-Response: + [ + { + "min_date": "2009-02-16T00:00:00.000Z", + "max_date": "2017-02-16T00:00:00.000Z" + } + ] +""" +addMetric(app, github.bus_factor, 'bus_factor') + + + + +if (debugmode): + app.debug = True + +if read_config('Development', 'interactive', 'GHDATA_INTERACTIVE', '0') == '1': + ipdb.set_trace() def run(): + app.run(host=host, port=int(port), debug=debugmode) - app = Flask(__name__) - CORS(app) - # Try to open the config file and parse it - parser = configparser.RawConfigParser() - parser.read('ghdata.cfg') +# Close files +GHDATA_CONFIG_FILE.close() +if GHDATA_ENV_EXPORT: + GHDATA_ENV_EXPORT_FILE.close() +if __name__ == "__main__": try: - dbstr = 'mysql+pymysql://{}:{}@{}:{}/{}'.format( - read_config(parser, 'Database', 'user', 'GHDATA_DB_USER', 'root'), - read_config(parser, 'Database', 'pass', 'GHDATA_DB_PASS', 'password'), - read_config(parser, 'Database', 'host', 'GHDATA_DB_HOST', '127.0.0.1'), - read_config(parser, 'Database', 'port', 'GHDATA_DB_PORT', '3306'), - read_config(parser, 'Database', 'name', 'GHDATA_DB_NAME', 'msr14') - ) - print("Connecting with " + dbstr) - ghtorrent = ghdata.GHTorrent(dbstr=dbstr) + run() except Exception as e: - print("Failed to connect to database (" + str(e) + ")"); - - host = read_config(parser, 'Server', 'host', 'GHDATA_HOST', '0.0.0.0') - port = read_config(parser, 'Server', 'port', 'GHDATA_PORT', '5000') - - publicwww = ghdata.PublicWWW(api_key=read_config(parser, 'PublicWWW', 'APIKey', 'GHDATA_PUBLIC_WWW_API_KEY', 'None')) - github = ghdata.GitHubAPI(api_key=read_config(parser, 'GitHub', 'APIKey', 'GHDATA_GITHUB_API_KEY', 'None')) - - if (read_config(parser, 'Development', 'developer', 'GHDATA_DEBUG', '0') == '1'): - debugmode = True - else: - debugmode = False - - - - """ - @api {get} / API Status - @apiName Status - @apiGroup Misc - """ - @app.route('/{}/'.format(GHDATA_API_VERSION)) - def api_root(): - """API status""" - # @todo: When we support multiple data sources this should keep track of their status - # @todo: Add GHTorrent test to determine status - ghtorrent_status = "good" - # @todo: Add GitHub API status - # @todo: Add PublicWWW API status - return """{"status": "healthy", "ghtorrent": "{}"}""".format(ghtorrent_status) - - ####################### - # Timeseries # - ####################### - - # @todo: Link to LF Metrics - - """ - @api {get} /:owner/:repo/commits Commits by Week - @apiName CommitsByWeek - @apiGroup Timeseries - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "commits": 153 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "commits": 192 - } - ] - """ - app.route('/{}/<owner>/<repo>/timeseries/commits'.format(GHDATA_API_VERSION))( - flaskify_ghtorrent(ghtorrent, ghtorrent.commits)) - - """ - @api {get} /:owner/:repo/forks Forks by Week - @apiName ForksByWeek - @apiGroup Timeseries - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "forks": 13 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "forks": 12 - } - ] - """ - app.route('/{}/<owner>/<repo>/timeseries/forks'.format(GHDATA_API_VERSION))( - flaskify_ghtorrent(ghtorrent, ghtorrent.forks)) - - """ - @api {get} /:owner/:repo/issues Issues by Week - @apiName IssuesByWeek - @apiGroup Timeseries - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "issues":13 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "issues":15 - } - ] - """ - app.route('/{}/<owner>/<repo>/timeseries/issues'.format(GHDATA_API_VERSION))( - flaskify_ghtorrent(ghtorrent, ghtorrent.issues)) - - """ - @api {get} /:owner/:repo/issues/response_time Issue Response Time - @apiName IssueResponseTime - @apiGroup Timeseries - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "created_at": "2013-09-16T17:00:54.000Z", - "responded_at": "2013-09-16T17:20:58.000Z" - }, - { - "created_at": "2013-09-16T09:31:34.000Z", - "responded_at": "2013-09-16T09:43:03.000Z" - } - ] - """ - app.route('/{}/<owner>/<repo>/timeseries/issues/response_time'.format(GHDATA_API_VERSION))( - flaskify_ghtorrent(ghtorrent, ghtorrent.issue_response_time)) - - """ - @api {get} /:owner/:repo/pulls Pull Requests by Week - @apiName PullRequestsByWeek - @apiGroup Timeseries - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "pull_requests": 1 - "comments": 11 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "pull_requests": 2 - "comments": 31 - } - ] - """ - app.route('/{}/<owner>/<repo>/timeseries/pulls'.format(GHDATA_API_VERSION))( - flaskify_ghtorrent(ghtorrent, ghtorrent.pulls)) - - """ - @api {get} /:owner/:repo/stargazers Stargazers by Week - @apiName StargazersByWeek - @apiGroup Timeseries - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "watchers": 133 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "watchers": 54 - } - ] - """ - app.route('/{}/<owner>/<repo>/timeseries/stargazers'.format(GHDATA_API_VERSION))( - flaskify_ghtorrent(ghtorrent, ghtorrent.stargazers)) - - """ - @api {get} /:owner/:repo/pulls/acceptance_rate Pull Request Acceptance Rate by Week - @apiDescription For each week, the rate is calculated as (pull requests merged that week) / (pull requests opened that week) - @apiName Stargazers - @apiGroup Timeseries - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "rate": 0.5 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "rate": 0.33 - } - ] - """ - app.route('/{}/<owner>/<repo>/pulls/acceptance_rate'.format(GHDATA_API_VERSION))( - flaskify_ghtorrent(ghtorrent, ghtorrent.pull_acceptance_rate)) - - # Contribution Trends - """ - @api {get} /:owner/:repo/contributors Total Contributions by User - @apiName TotalContributions - @apiGroup Users - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "login": "foo", - "location": "Springfield", - "commits": 1337.0, - "pull_requests": 60.0, - "issues": null, - "commit_comments": 158.0, - "pull_request_comments": 718.0, - "issue_comments": 1668.0 - }, - { - "login": "bar", - "location": null, - "commits": 3968.0, - "pull_requests": null, - "issues": 12.0, - "commit_comments": 158.0, - "pull_request_comments": 718.0, - "issue_comments": 1568.0 - } - ] - """ - app.route('/{}/<owner>/<repo>/contributors'.format(GHDATA_API_VERSION))( - flaskify_ghtorrent(ghtorrent, ghtorrent.contributors)) - - ####################### - # Contribution Trends # - ####################### - - """ - @api {get} /:owner/:repo/contributions Contributions by Week - @apiName ContributionsByWeek - @apiGroup Timeseries - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - @apiParam (String) user Limit results to the given user's contributions - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2015-01-01T00:00:00.000Z", - "commits": 37.0, - "pull_requests": null, - "issues": null, - "commit_comments": 7.0, - "pull_request_comments": 8.0, - "issue_comments": 17.0 - }, - { - "date": "2015-01-08T00:00:00.000Z", - "commits": 68.0, - "pull_requests": null, - "issues": 12.0, - "commit_comments": 18.0, - "pull_request_comments": 13.0, - "issue_comments": 28.0 - } - ] - """ - @app.route('/{}/<owner>/<repo>/contributions'.format(GHDATA_API_VERSION)) - def contributions(owner, repo): - repoid = ghtorrent.repoid(owner=owner, repo=repo) - user = request.args.get('user') - if (user): - userid = ghtorrent.userid(username=user) - contribs = ghtorrent.contributions(repoid=repoid, userid=userid) - else: - contribs = ghtorrent.contributions(repoid=repoid) - return Response(response=contribs, - status=200, - mimetype="application/json") - - # Diversity - - """ - @api {get} /:owner/:repo/commits/locations Commits and Location by User - @apiName Stargazers - @apiGroup Diversity - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "login": "bonnie", - "location": "Rowena, TX", - "commits": 12 - }, - { - "login":"clyde", - "location":"Ellis County, TX", - "commits": 12 - } - ] - """ - app.route('/{}/<owner>/<repo>/commits/locations'.format(GHDATA_API_VERSION))( - flaskify_ghtorrent(ghtorrent, ghtorrent.committer_locations)) - - # Popularity - """ - @api {get} /:owner/:repo/linking_websites Linking Websites - @apiDescription Returns an array of websites and their rank according to http://publicwww.com/ - @apiName LinkingWebsites - @apiGroup Popularity - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "url": "missouri.edu", - "rank": "1" - }, - { - "url": "unomaha.edu", - "rank": "2" - } - ] - """ - app.route('/{}/<owner>/<repo>/linking_websites'.format(GHDATA_API_VERSION))(flaskify(publicwww.linking_websites)) + print(e) + type, value, tb = sys.exc_info() + traceback.print_exc() + if (debugmode): + ipdb.post_mortem(tb) + exit(1) - ####################### - # GitHub API # - ####################### - """ - @api {get} /:owner/:repo/bus_factor Bus Factor - @apiDescription Returns an integer that is the number of develpers that have a summed percentage of contributions higher than the threshold - @apiName GitHub - @apiGroup Users - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - @apiParam {String} filename: optional; file or directory for function to run on - @apiParam {String} start: optional; start time for analysis - @apiParam {String} end: optional; end time for analysis - @apiParam {String} threshold: Default 50; - @apiParam {String} best: Default False; If true, sums from lowest to highest - - @apiSuccessExample {json} Success-Response: - [ - { - "repo": "ghdata", - "bus_factor": "2" - } - ] - """ - @app.route('/{}/<owner>/<repo>/bus_factor'.format(GHDATA_API_VERSION)) - def bus_factor(owner,repo): - kwargs = request.args.to_dict() - return Response(response=github.bus_factor(owner, repo, **kwargs).to_json(), status=200, mimetype="application/json") - - - - if (debugmode): - print(" * Serving static routes") - # Serve the front-end files in debug mode to make it easier for developers to work on the interface - # @todo: Figure out why this isn't working. - @app.route('/') - def index(): - root_dir = os.path.dirname(os.getcwd()) - print(root_dir + '/ghdata/static') - return send_from_directory(root_dir + '/ghdata/ghdata/static', 'index.html') - - @app.route('/scripts/<path>') - def send_scripts(path): - root_dir = os.path.dirname(os.getcwd()) - return send_from_directory(root_dir + '/ghdata/ghdata/static/scripts', path) - - @app.route('/styles/<path>') - def send_styles(path): - root_dir = os.path.dirname(os.getcwd()) - return send_from_directory(root_dir+ '/ghdata/ghdata/static/styles', path) - - app.debug = True - - app.run(host=host, port=int(port), debug=debugmode) -if __name__ == '__main__': - run() + \ No newline at end of file diff --git a/ghdata/util.py b/ghdata/util.py new file mode 100644 --- /dev/null +++ b/ghdata/util.py @@ -0,0 +1,27 @@ +#SPDX-License-Identifier: MIT +import pandas as pd +import os + +def makeRelative(function): + """ + Decorator that makes a timeseries relative to another timeseries + """ + def generated_function(owner, repo, ownerRelativeTo, repoRelativeTo): + baseData = function(ownerRelativeTo, repoRelativeTo) + comparableData = function(owner, repo) + columns = list(baseData.columns) + columns.remove('date') + relativeData = ( + pd + .merge(baseData, comparableData, on='date', how='left') + .dropna() + ) + for col in columns: + relativeData[col + '_ratio'] = relativeData[col + '_y'] / relativeData[col + '_x'] + return relativeData + generated_function.__name__ = function.__name__ + '_relative' + return generated_function + +_ROOT = os.path.abspath(os.path.dirname(__file__)) +def get_data_path(path): + return os.path.join(_ROOT, 'data', path) \ No newline at end of file diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ Install ghdata package with pip. ''' -from setuptools import setup, find_packages +from setuptools import setup from codecs import open from os import path @@ -16,7 +16,8 @@ setup( name='ghdata', - version='0.2.2', + version='0.4.0', + include_package_data = True, description='Library/Server for data related to the health and sustainability of GitHub projects', long_description=long_description, url='https://github.com/OSSHealth/ghdata', @@ -39,7 +40,7 @@ 'Programming Language :: Python :: 3.5', ], keywords='ghtorrent github api data science', - install_requires=['flask', 'flask-cors', 'PyMySQL', 'requests', 'python-dateutil', 'sqlalchemy', 'pandas', 'pytest', 'PyGithub', 'pyevent'], + install_requires=['ipdb', 'setuptools-git', 'beautifulsoup4', 'flask', 'flask-cors', 'PyMySQL', 'requests', 'python-dateutil', 'sqlalchemy', 'pandas', 'pytest', 'PyGithub', 'pyevent', 'gunicorn'], extras_require={ 'dev': ['check-manifest'], 'test': ['coverage'], diff --git a/busFactor/pythonBlameAuthorEmail.py b/unintegrated-python/busFactor/pythonBlameAuthorEmail.py similarity index 100% rename from busFactor/pythonBlameAuthorEmail.py rename to unintegrated-python/busFactor/pythonBlameAuthorEmail.py diff --git a/busFactor/pythonBlameLinesInRepo.py b/unintegrated-python/busFactor/pythonBlameLinesInRepo.py similarity index 100% rename from busFactor/pythonBlameLinesInRepo.py rename to unintegrated-python/busFactor/pythonBlameLinesInRepo.py diff --git a/organizationHistory/pythonBlameHistoryTree.py b/unintegrated-python/organizationHistory/pythonBlameHistoryTree.py similarity index 100% rename from organizationHistory/pythonBlameHistoryTree.py rename to unintegrated-python/organizationHistory/pythonBlameHistoryTree.py diff --git a/views.py b/unintegrated-python/views.py similarity index 100% rename from views.py rename to unintegrated-python/views.py
diff --git a/test/test_ghtorrent.py b/test/test_ghtorrent.py --- a/test/test_ghtorrent.py +++ b/test/test_ghtorrent.py @@ -1,6 +1,5 @@ import os import pytest -import pandas @pytest.fixture def ghtorrent(): @@ -10,10 +9,10 @@ def ghtorrent(): return ghdata.GHTorrent(dbstr) def test_repoid(ghtorrent): - assert ghtorrent.repoid('rails', 'rails') == 78852 + assert ghtorrent.repoid('rails', 'rails') >= 1000 def test_userid(ghtorrent): - assert ghtorrent.userid('howderek') == 417486 + assert ghtorrent.userid('howderek') >= 1000 """ Pandas testing format @@ -47,7 +46,7 @@ def test_committer_locations(ghtorrent): assert ghtorrent.committer_locations(ghtorrent.repoid('mavam', 'stat-cookbook')).isin(["Berkeley, CA"]).any def test_issue_response_time(ghtorrent): - assert ghtorrent.issue_response_time(ghtorrent.repoid('hadley', 'devtools')).isin(["2013-09-16 17:00:54"]).any + assert ghtorrent.issue_response_time(ghtorrent.repoid('hadley', 'devtools')).isin([1]).any def test_pull_acceptance_rate(ghtorrent): assert ghtorrent.pull_acceptance_rate(ghtorrent.repoid('akka', 'akka')).isin([0.5]).any diff --git a/test/test_github.py b/test/test_github.py --- a/test/test_github.py +++ b/test/test_github.py @@ -3,6 +3,20 @@ import pandas @pytest.fixture -def publicwww(): - import ghdata - return ghdata.GitHub(os.getenv("GITHUB_API_KEY")) \ No newline at end of file +def github(): + import ghdata + return ghdata.GitHubAPI(os.getenv("GITHUB_API_KEY")) + +""" +Pandas testing format + +assert ghtorrent.<function>(ghtorrent.repoid('owner', 'repo')).isin(['<data that should be in dataframe>']).any + +The tests check if a value is anywhere in the dataframe +""" + +def test_bus_factor(github): + assert github.bus_factor("OSSHealth", "ghdata",start="1-1-17", end="5-12-17").isin(["9"]).any + +# def test_tags(github): +# assert github.tags("OSSHealth", "ghdata").isin(["v0.2"]).any
Comparisons should be updated with new dates The date edit seems to only affect the base repository. I would expect it to change the rendering for comparisons as well.
2018-01-17T16:56:47Z
[]
[]
chaoss/augur
161
chaoss__augur-161
[ "156" ]
0d64909e8f9efedfcbeec29ed21ee05bce8c61e0
diff --git a/augur/__init__.py b/augur/__init__.py --- a/augur/__init__.py +++ b/augur/__init__.py @@ -1,3 +1,5 @@ +#SPDX-License-Identifier: MIT + # Metadata from .metadata import __version__ @@ -6,12 +8,6 @@ # Classes from .application import Application -# from .downloads import Downloads -# from .ghtorrent import GHTorrent -# from .ghtorrentplus import GHTorrentPlus -# from .git import Git -# from .githubapi import GitHubAPI -# from .librariesio import LibrariesIO -# from .localcsv import LocalCSV -# from .publicwww import PublicWWW -# from .server import Server + +# Plugins +from .augurplugin import AugurPlugin diff --git a/augur/application.py b/augur/application.py --- a/augur/application.py +++ b/augur/application.py @@ -1,9 +1,16 @@ +#SPDX-License-Identifier: MIT +""" +Handles global context, I/O, and configuration +""" + import os import time import multiprocessing as mp import logging import configparser as configparser import json +import importlib +import pkgutil import coloredlogs from beaker.cache import CacheManager from beaker.util import parse_cache_config_options @@ -25,6 +32,11 @@ def updater_process(name, delay): except: raise +def load_plugins(): + if not hasattr(load_plugins, 'already_loaded'): + import augur.plugins + load_plugins.already_loaded = True + class Application(object): """Initalizes all classes form Augur using a config file or environment variables""" @@ -42,6 +54,7 @@ def __init__(self, config_file='augur.config.json', no_config_file=0, descriptio self.__config_bad = False self.__config_file_path = os.path.abspath(os.getenv('AUGUR_CONFIG_FILE', config_file)) self.__config_location = os.path.dirname(self.__config_file_path) + self.__runtime_location = 'runtime/' self.__export_env = os.getenv('AUGUR_ENV_EXPORT', '0') == '1' if os.getenv('AUGUR_ENV_ONLY', '0') != '1' and no_config_file == 0: try: @@ -63,7 +76,6 @@ def __init__(self, config_file='augur.config.json', no_config_file=0, descriptio # Load the config file try: config_text = self.__config_file.read() - config_text = config_text.replace('$(AUGUR)', self.__config_location) self.__config = json.loads(config_text) except json.decoder.JSONDecodeError as e: if not self.__config_bad: @@ -80,11 +92,14 @@ def __init__(self, config_file='augur.config.json', no_config_file=0, descriptio self.__processes = [] # Create cache - cache_config = self.read_config('Cache', 'config', None, { + cache_config = { 'cache.type': 'file', - 'cache.data_dir': 'runtime/cache/', - 'cache.lock_dir': 'runtime/cache/' - }) + 'cache.data_dir': self.path('$(RUNTIME)/cache/'), + 'cache.lock_dir': self.path('$(RUNTIME)/cache/') + } + cache_config.update(self.read_config('Cache', 'config', None, cache_config)) + cache_config['cache.data_dir'] = self.path(cache_config['cache.data_dir']) + cache_config['cache.lock_dir'] = self.path(cache_config['cache.lock_dir']) if not os.path.exists(cache_config['cache.data_dir']): os.makedirs(cache_config['cache.data_dir']) if not os.path.exists(cache_config['cache.lock_dir']): @@ -97,10 +112,37 @@ def __init__(self, config_file='augur.config.json', no_config_file=0, descriptio self.__ghtorrentplus = None self.__githubapi = None self.__git = None + self.__facade = None self.__librariesio = None self.__downloads = None - self.__publicwww = None self.__localCSV = None + self.__metrics_status = None + + # Load plugins + import augur.plugins + + @classmethod + def register_plugin(cls, plugin): + if not hasattr(plugin, 'name'): + raise NameError("{} didn't have a name") + cls.plugins[plugin.name] = plugin + + def replace_config_variables(self, string, reverse=False): + variable_map = { + 'AUGUR': self.__config_location, + 'RUNTIME': self.__runtime_location + } + for variable, source in variable_map.items(): + if not reverse: + string = string.replace('$({})'.format(variable), source) + else: + string = string.replace(source, '$({})'.format(variable)) + return string + + def path(self, path): + path = self.replace_config_variables(path) + path = os.path.abspath(os.path.expanduser(path)) + return path def __updater(self, updates=None): if updates is None: @@ -117,10 +159,11 @@ def init_all(self): self.ghtorrentplus() self.githubapi() self.git() + self.facade() self.librariesio() self.downloads() - self.publicwww() self.localcsv() + self.metrics_status() def read_config(self, section, name, environment_variable=None, default=None): value = None @@ -146,6 +189,11 @@ def read_config(self, section, name, environment_variable=None, default=None): logger.debug('{}:{} = {}'.format(section, name, value)) return value + def read_config_path(self, section, name, environment_variable=None, default=None): + path = self.read_config(section, name, environment_variable, default) + path = self.path(path) + return path + def set_config(self, section, name, value): if not section in self.__config: self.__config[section] = {} @@ -207,6 +255,20 @@ def ghtorrent(self): ) return self.__ghtorrent + def facade(self): + from augur.facade import Facade + if self.__facade is None: + logger.debug('Initializing Facade') + self.__facade = Facade( + user=self.read_config('Facade', 'user', 'AUGUR_FACADE_DB_USER', 'root'), + password=self.read_config('Facade', 'pass', 'AUGUR_FACADE_DB_PASS', ''), + host=self.read_config('Facade', 'host', 'AUGUR_FACADE_DB_HOST', '127.0.0.1'), + port=self.read_config('Facade', 'port', 'AUGUR_FACADE_DB_PORT', '3306'), + dbname=self.read_config('Facade', 'name', 'AUGUR_FACADE_DB_NAME', 'facade'), + projects=self.read_config('Facade', 'projects', None, []) + ) + return self.__facade + def ghtorrentplus(self): from augur.ghtorrentplus import GHTorrentPlus if self.__ghtorrentplus is None: @@ -223,7 +285,7 @@ def ghtorrentplus(self): def git(self, update=False): from augur.git import Git storage = self.path_relative_to_config( - self.read_config('Git', 'storage', 'AUGUR_GIT_STORAGE', 'runtime/git_repos/') + self.read_config_path('Git', 'storage', 'AUGUR_GIT_STORAGE', '$(RUNTIME)/git_repos/') ) repolist = self.read_config('Git', 'repositories', None, []) if self.__git is None: @@ -269,13 +331,6 @@ def downloads(self): self.__downloads = Downloads(self.githubapi()) return self.__downloads - def publicwww(self): - from augur.publicwww import PublicWWW - if self.__publicwww is None: - logger.debug('Initializing PublicWWW') - self.__publicwww = PublicWWW(api_key=self.read_config('PublicWWW', 'apikey', 'AUGUR_PUBLIC_WWW_API_KEY', 'None')) - return self.__publicwww - def localcsv(self): from augur.localcsv import LocalCSV if self.__localCSV is None: @@ -283,4 +338,12 @@ def localcsv(self): self.__localCSV = LocalCSV() return self.__localCSV + def metrics_status(self): + from augur.metrics_status import MetricsStatus + if self.__metrics_status is None: + logger.debug('Initializing MetricsStatus') + self.__metrics_status = MetricsStatus(self.githubapi()) + return self.__metrics_status + +Application.plugins = {} diff --git a/augur/augurplugin.py b/augur/augurplugin.py new file mode 100644 --- /dev/null +++ b/augur/augurplugin.py @@ -0,0 +1,17 @@ +#SPDX-License-Identifier: MIT +""" +Provides a class that can be used to extend Augur +""" + +class AugurPlugin(object): + """Defines a base class for Augur plugins to implement""" + def __init__(self, config): + self.config = config + + @classmethod + def register(cls, application): + application.register_plugin(cls) + + def create_routes(self, flask_app): + routes = __import__('routes') + routes.create(flask_app) \ No newline at end of file diff --git a/augur/downloads.py b/augur/downloads.py --- a/augur/downloads.py +++ b/augur/downloads.py @@ -1,9 +1,16 @@ +#SPDX-License-Identifier: MIT +""" +Data source that gathers download stats from package managers +""" + import json import pandas as pd import requests import datetime import base64 from augur import logger +from augur.util import annotate + # end imports # (don't remove the above line, it's for a script) @@ -11,14 +18,44 @@ class Downloads(object): """Class for retrieveing download information using APIs and web scrapers""" def __init__(self, githubapi): self.__githubapi = githubapi.api - + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + + ##################################### + ### RISK ### + ##################################### + + + ##################################### + ### VALUE ### + ##################################### + + + ##################################### + ### ACTIVITY ### + ##################################### + + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + @annotate(tag='downloads') def downloads(self, owner, repo): """ - Detects package file and calls correct function for download statistics + Timeseries that returns package file and calls the correct function to the create download statistics :param owner: repo owner username :param repo: repo name + :return: DataFrame with all downloads for that day """ root_dir = self.__githubapi.get_repo((owner + "/" + repo)).get_dir_contents("/") @@ -32,9 +69,10 @@ def downloads(self, owner, repo): def ruby_downloads(self, repo): """ - Returns daily downloads for ruby gems from bestgems.org API + Timeseries of daily downloads for ruby gems from bestgems.org API :param repo: repo name + :return: DataFrame with count of ruby downloads """ r = requests.get("http://bestgems.org/api/v1/gems/%s/daily_downloads.json" % (repo)) raw = r.text @@ -52,6 +90,7 @@ def npm_downloads(self, repo, contents): :param repo: repo name :param contents: contents of package.json + :return: DataFrame with count of npm downloads """ contents = json.loads(json.loads(json.dumps(contents))) name = contents["name"] diff --git a/augur/facade.py b/augur/facade.py new file mode 100644 --- /dev/null +++ b/augur/facade.py @@ -0,0 +1,92 @@ +#SPDX-License-Identifier: MIT +""" +Data source that uses Facade's tables +""" + +import pandas as pd +import sqlalchemy as s +import numpy as np +import re +from augur import logger +from augur.util import annotate +# end imports +# (don't remove the above line, it's for a script) + +class Facade(object): + """Queries Facade""" + + def __init__(self, user, password, host, port, dbname, projects=None): + """ + Connect to the database + + :param dbstr: The [database string](http://docs.sqlalchemy.org/en/latest/core/engines.html) to connect to the GHTorrent database + """ + self.DB_STR = 'mysql+pymysql://{}:{}@{}:{}/{}'.format( + user, password, host, port, dbname + ) + logger.debug('Facade: Connecting to {}:{}/{} as {}'.format(host, port, dbname, user)) + self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool) + self.projects = projects + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + + ##################################### + ### RISK ### + ##################################### + + + ##################################### + ### VALUE ### + ##################################### + + + ##################################### + ### ACTIVITY ### + ##################################### + + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + @annotate(tag='downloaded-repos') + def downloaded_repos(self): + repoSQL = s.sql.text(""" + SELECT git AS url, status, projects.name as project_name + FROM repos + JOIN projects + ON repos.projects_id = projects.id + """) + results = pd.read_sql(repoSQL, self.db) + results['url'] = results['url'].apply(lambda datum: datum.split('//')[1]) + if self.projects: + results = results[results.project_name.isin(self.projects)] + return results + + @annotate(tag='lines-changed-minus-whitespace') + def lines_changed_minus_whitespace(self, repo_url, from_commit=None, df=None, rebuild_cache=False): + pass + + @annotate(tag='lines-changed-by-author') + def lines_changed_by_author(self, repo_url): + """ + Makes sure the storageFolder contains updated versions of all the repos + """ + repoSQL = s.sql.text(""" + SELECT author_email, author_date, author_affiliation as affiliation, SUM(added) as additions, SUM(removed) as deletions, SUM(whitespace) as whitespace + FROM analysis_data + WHERE repos_id = (SELECT id FROM repos WHERE git LIKE :repourl LIMIT 1) + GROUP BY repos_id, author_date, author_affiliation, author_email + ORDER BY author_date ASC; + """) + results = pd.read_sql(repoSQL, self.db, params={"repourl": '%{}%'.format(repo_url)}) + return results + + diff --git a/augur/ghtorrent.py b/augur/ghtorrent.py --- a/augur/ghtorrent.py +++ b/augur/ghtorrent.py @@ -1,3 +1,8 @@ +#SPDX-License-Identifier: MIT +""" +Data source that uses the GHTorrent relational database of GitHub activity. +""" + import pandas as pd import sqlalchemy as s import numpy as np @@ -45,7 +50,7 @@ def __single_table_count_by_date(self, table, repo_col='project_id', user_col='a if group_by == "day": return """ SELECT date(created_at) AS "date", COUNT(*) AS "{0}" - FROM {0} + FROM {0} FROM {0} WHERE {1} = :repoid GROUP BY DATE(created_at) @@ -90,7 +95,7 @@ def __sub_table_count_by_date(self, parent_table, sub_table, parent_id, sub_id, :return: Query string """ return """ - SELECT date({1}.created_at) AS "date", COUNT(*) AS counter + SELECT date({1}.created_at) AS "date", COUNT(*) AS {1} FROM {1}, {0} WHERE {1}.{3} = {0}.{2} AND {0}.{4} = :repoid @@ -138,12 +143,16 @@ def userid(self, username): ### GROWTH, MATURITY, AND DECLINE ### ##################################### - @annotate(metric_name='closed-issues') + @annotate(tag='closed-issues') def closed_issues(self, owner, repo=None): """ Subgroup: Issue Resolution - Endpoint: issues/closed - chaoss-metric: closed-issues + + Timeseries of the count of the number of issues closed per week + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with newly closed issues/week """ repoid = self.repoid(owner, repo) issuesClosedSQL = s.sql.text(""" @@ -156,56 +165,31 @@ def closed_issues(self, owner, repo=None): """) return pd.read_sql(issuesClosedSQL, self.db, params={"repoid": str(repoid)}) - def closed_issue_resolution_duration(self, owner, repo=None): - """ - Subgroup: Issue Resolution - Endpoint: issues_with_close - chaoss-metric: closed-issue-resolution-duration - - How long on average each week it takes to close an issue - - :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. - :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with issues/day - """ - repoid = self.repoid(owner, repo) - issuesWithCloseSQL = s.sql.text(""" - SELECT issues.id as "id", - issues.created_at as "date", - DATEDIFF(closed.created_at, issues.created_at) AS "days_to_close" - FROM issues - - JOIN - (SELECT * FROM issue_events - WHERE issue_events.action = "closed") closed - ON issues.id = closed.issue_id - - WHERE issues.repo_id = :repoid""") - return pd.read_sql(issuesWithCloseSQL, self.db, params={"repoid": str(repoid)}) - + @annotate(tag='code-commits') def code_commits(self, owner, repo=None, group_by="week"): """ Subgroup: Code Development - Endpoint: commits - chaoss-metric: code-commits - Timeseries of all the commits on a repo + Timeseries of the count of commits :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with commits/day + :return: DataFrame with new commits/week """ repoid = self.repoid(owner, repo) commitsSQL = s.sql.text(self.__single_table_count_by_date('commits', group_by=group_by)) return pd.read_sql(commitsSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='code-review-iteration') def code_review_iteration(self, owner, repo=None): """ - Number of iterations (being closed and reopened) that a merge request (code review) goes through until it is finally merged + Subgroup: Code Development + + Timeseries of the count of iterations (being closed and reopened) that a merge request (code review) goes through until it is finally merged - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being a merge request's date of creation + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with iterations/issue for each issue that week """ repoid = self.repoid(owner, repo) @@ -216,7 +200,7 @@ def code_review_iteration(self, owner, repo=None): issues.issue_id AS "issue_id", pull_request_history.pull_request_id AS "pull_request_id", pull_request_history.action AS "action", - COUNT(CASE WHEN action = "closed" THEN 1 ELSE NULL END) AS "count" + COUNT(CASE WHEN action = "closed" THEN 1 ELSE NULL END) AS "iterations" FROM issues, pull_request_history WHERE find_in_set(pull_request_history.action, "closed,merged")>0 AND pull_request_history.pull_request_id IN( @@ -226,57 +210,46 @@ def code_review_iteration(self, owner, repo=None): AND pull_request_history.pull_request_id = issues.issue_id AND issues.pull_request = 1 AND issues.repo_id = :repoid - GROUP BY (issues.created_at) #YEARWEEK to get (iterations (all PRs in repo) / week) instead of (iterations / PR)? + GROUP BY YEARWEEK(issues.created_at) #YEARWEEK to get (iterations (all PRs in repo) / week) instead of (iterations / PR)? """) df = pd.read_sql(codeReviewIterationSQL, self.db, params={"repoid": str(repoid)}) - return pd.DataFrame({'date': df['created_at'], 'iterations': df['count']}) + return pd.DataFrame({'date': df['created_at'], 'iterations': df['iterations']}) + @annotate(tag='contribution-acceptance') def contribution_acceptance(self, owner, repo=None): """ - Rolling ratio between merged pull requests : unmerged pull requests + Subgroup: Community Development - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being a week - """ - repoid = self.repoid(owner, repo) - codeReviewIterationSQL = s.sql.text(""" - SELECT by_PR.created_at as date, - count(CASE WHEN by_PR.action = 'merged' then 1 else null end) / count(CASE WHEN by_PR.action = 'closed' then 1 else null end) as 'ratio' - FROM - (SELECT - DATE(issues.created_at) AS "created_at", - issues.issue_id AS "issue_id", - pull_request_history.pull_request_id AS "pull_request_id", - pull_request_history.action AS "action" - FROM issues, pull_request_history - WHERE find_in_set(pull_request_history.action, "closed,merged")>0 - AND pull_request_history.pull_request_id = issues.issue_id - AND issues.pull_request = 1 - AND issues.repo_id = :repoid - GROUP BY (issues.created_at)) by_PR - GROUP BY YEARWEEK(by_PR.created_at) - """) - - df = pd.read_sql(codeReviewIterationSQL, self.db, params={"repoid": str(repoid)}) + Timeseries of the rolling ratio between merged pull requests over unmerged pull requests + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with ratio/week + """ + source_df = self.community_engagement(owner, repo) + df = pd.DataFrame() + df['date'] = source_df['date'] + df['acceptance_rate'] = source_df['pull_requests_merged_rate_this_week'] return df - def contributing_github_organizations(self, owner, repo=None): + @annotate(tag='contributing-github-organizations') + def contributing_github_organizations(self, owner, repo=None): #needs clarification about return value """ - All the contributing organizations to a project and the counts of each organization's contributions + Subgroup: Community Development + + Returns of all the contributing organizations to a project and the counts of each organization's contributions - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being an outside contributing organization + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with each organization's information """ repoid = self.repoid(owner, repo) contributingOrgSQL = s.sql.text(""" SELECT id AS contributing_org, SUM(commits) AS commits, SUM(issues) AS issues, SUM(commit_comments) AS commit_comments, SUM(issue_comments) AS issue_comments, SUM(pull_requests) AS pull_requests, SUM(pull_request_comments) AS pull_request_comments, - SUM(contribution_fields.commits + contribution_fields.issues + contribution_fields.commit_comments + contribution_fields.issue_comments + contribution_fields.pull_requests + contribution_fields.pull_request_comments) AS total, COUNT(DISTINCT contribution_fields.user) AS count + SUM(contribution_fields.commits + contribution_fields.issues + contribution_fields.commit_comments + contribution_fields.issue_comments + contribution_fields.pull_requests + contribution_fields.pull_request_comments) AS total, COUNT(DISTINCT contribution_fields.user) AS distinct_users FROM ( (SELECT organization_members.org_id AS id, commits.author_id AS user, COUNT(*) AS commits, 0 AS issues, 0 AS commit_comments, 0 AS issue_comments, 0 AS pull_requests, 0 AS pull_request_comments FROM organization_members, projects, commits @@ -318,22 +291,21 @@ def contributing_github_organizations(self, owner, repo=None): AND pull_request_comments.user_id = organization_members.user_id GROUP BY pull_request_comments.user_id) ) contribution_fields group by id - having count > 1 + having distinct_users > 1 ORDER BY total DESC """) return pd.read_sql(contributingOrgSQL, self.db, params={"repoid": str(repoid)}) - def first_response_to_issue_duration(self, owner, repo): + @annotate(tag='first-response-to-issue-duration') + def first_response_to_issue_duration(self, owner, repo): #needs clarification about return value """ Subgroup: Issue Resolution - Endpoint: issues/response_time - chaoss-metric: first-response-to-issue-duration - Time to comment by issue + Timeseries of the time to first comment by issue - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being am issue + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame of issues with their response information """ repoid = self.repoid(owner, repo) issueCommentsSQL = s.sql.text(""" @@ -359,25 +331,30 @@ def first_response_to_issue_duration(self, owner, repo): rs = pd.read_sql(issueCommentsSQL, self.db, params={"repoid": str(repoid)}) return rs - def forks(self, owner, repo=None, group_by="week"): + @annotate(tag='forks') + def forks(self, owner, repo=None, group_by="week"): """ Subgroup: Code Development - chaoss-metric: forks + Timeseries of when a repo's forks were created + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with forks/day + :return: DataFrame with new forks/week """ repoid = self.repoid(owner, repo) forksSQL = s.sql.text(self.__single_table_count_by_date('projects', 'forked_from', 'owner_id', group_by=group_by)) return pd.read_sql(forksSQL, self.db, params={"repoid": str(repoid)}).drop(0) - def maintainer_response_to_merge_request_duration(self, owner, repo=None): + @annotate(tag='maintainer-response-to-merge-request-duration') + def maintainer_response_to_merge_request_duration(self, owner, repo=None): #needs clarification on return value """ - Duration of time between a merge request being created and a maintainer commenting on that request + Subgroup: Code Development + + Timeseries of duration of time between a merge request being created and a maintainer commenting on that request - :param owner: The name of the project owner - :param repo: The name of the repo + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with each row being a week """ repoid = self.repoid(owner, repo) @@ -402,13 +379,16 @@ def maintainer_response_to_merge_request_duration(self, owner, repo=None): df = pd.read_sql(maintainerResponseToMRSQL, self.db, params={"repoid": str(repoid)}) return df.iloc[:, 0:2] - def new_contributing_github_organizations(self, owner, repo=None): + @annotate(tag='new-contributing-github-organizations') + def new_contributing_github_organizations(self, owner, repo=None): #needs clarification about return value """ - Number of new contributing organizations on a certain date + Subgroup: Community Growth - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being a week + Timeseries of information about new contributing organizations on a certain date + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with each organization's information """ repoid = self.repoid(owner, repo) @@ -416,7 +396,7 @@ def new_contributing_github_organizations(self, owner, repo=None): SELECT fields.date AS "date", fields.id AS "contributing_org", - count(DISTINCT fields.user) AS count + count(DISTINCT fields.user) AS distinct_users FROM ( (SELECT organization_members.org_id AS id, commits.created_at AS date, commits.author_id AS user FROM organization_members, projects, commits WHERE projects.id = :repoid @@ -457,7 +437,7 @@ def new_contributing_github_organizations(self, owner, repo=None): AND pull_request_comments.user_id = organization_members.user_id GROUP BY pull_request_comments.user_id)) fields Group BY contributing_org - HAVING count > 1 + HAVING distinct_users > 1 ORDER BY YEARWEEK(date) """) df = pd.read_sql(contributingOrgSQL, self.db, params={"repoid": str(repoid)}) @@ -468,48 +448,46 @@ def new_contributing_github_organizations(self, owner, repo=None): numOrgs = np.append(numOrgs, count) return pd.DataFrame({'date': df["date"], 'organizations': numOrgs}) + @annotate(tag='open-issues') def open_issues(self, owner, repo=None, group_by="week"): """ Subgroup: Individual Diversity - Endpoint: issues - chaoss-metric: open-issues - Timeseries of issues opened per day + Timeseries of the count of newly issues opened per week :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with issues/day + :return: DataFrame with opened issues/week """ repoid = self.repoid(owner, repo) issuesSQL = s.sql.text(self.__single_table_count_by_date('issues', 'repo_id', 'reporter_id', group_by=group_by)) return pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='pull-request-comments') def pull_request_comments(self, owner, repo=None): """ Subgroup: Code Development - chaoss-metric: pull-request-comments - Timeseries of pull request comments + Timeseries of the count of new pull request comments :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with new by week + :return: DataFrame with new pull request comments/week """ repoid = self.repoid(owner, repo) pullRequestCommentsSQL = s.sql.text(self.__sub_table_count_by_date("pull_requests", "pull_request_comments", "pullreq_id", "pull_request_id", "base_repo_id")) return pd.read_sql(pullRequestCommentsSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='pull-requests-open') def pull_requests_open(self, owner, repo=None): """ Subgroup: Code Development - Endpoint: pulls - chaoss-metric: pull-requests-open - Timeseries of pull requests creation, also gives their associated activity + Timeseries of pull requests creation and their associated activity :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with pull requests by day + :return: DataFrame with pull request information/week """ repoid = self.repoid(owner, repo) pullsSQL = s.sql.text(""" @@ -538,40 +516,72 @@ def pull_requests_open(self, owner, repo=None): ### ACTIVITY ### ##################################### - def watchers(self, owner, repo=None, group_by="week"): + @annotate(tag='issue-comments') + def issue_comments(self, owner, repo=None): """ - Timeseries of when people starred a repo + Timeseries of the count of new issue comments :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with stargazers/day + :return: DataFrame with new issue comments/week """ repoid = self.repoid(owner, repo) - stargazersSQL = s.sql.text(self.__single_table_count_by_date('watchers', 'repo_id', 'user_id', group_by=group_by)) - df = pd.read_sql(stargazersSQL, self.db, params={"repoid": str(repoid)}) - df.drop(df.index[:1], inplace=True) - return df + issueCommentsSQL = s.sql.text(self.__sub_table_count_by_date("issues", "issue_comments", "issue_id", "issue_id", "repo_id")) + return pd.read_sql(issueCommentsSQL, self.db, params={"repoid": str(repoid)}) - def issue_comments(self, owner, repo=None): + @annotate(tag='pull-requests-made-closed') + def pull_requests_made_closed(self, owner, repo=None): """ - Timeseries of issue comments + Timeseries of the ratio of pull requests made/closed :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with new by week + :return: DataFrame with the ratio of pull requests made/closed """ repoid = self.repoid(owner, repo) - issueCommentsSQL = s.sql.text(self.__sub_table_count_by_date("issues", "issue_comments", "issue_id", "issue_id", "repo_id")) - return pd.read_sql(issueCommentsSQL, self.db, params={"repoid": str(repoid)}) + pullRequestsMadeClosedSQL = s.sql.text(""" + SELECT DATE(closed_on) AS "date", CAST(num_opened AS DECIMAL)/CAST(num_closed AS DECIMAL) AS "rate" + FROM + (SELECT COUNT(DISTINCT pull_request_id) AS num_opened, DATE(pull_request_history.created_at) AS opened_on + FROM pull_request_history + JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id + WHERE action = 'opened' AND pull_requests.base_repo_id = :repoid + GROUP BY opened_on) opened + JOIN + (SELECT count(distinct pull_request_id) AS num_closed, DATE(pull_request_history.created_at) AS closed_on + FROM pull_request_history + JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id + WHERE action = 'closed' + AND pull_requests.base_repo_id = :repoid + GROUP BY closed_on) closed + ON closed.closed_on = opened.opened_on + """) + return pd.read_sql(pullRequestsMadeClosedSQL, self.db, params={"repoid": str(repoid)}) + + @annotate(tag='watchers') + def watchers(self, owner, repo=None, group_by="week"): + """ + Returns of the count of people who starred the repo on that date + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with new stargazers + """ + repoid = self.repoid(owner, repo) + stargazersSQL = s.sql.text(self.__single_table_count_by_date('watchers', 'repo_id', 'user_id', group_by=group_by)) + df = pd.read_sql(stargazersSQL, self.db, params={"repoid": str(repoid)}) + df.drop(df.index[:1], inplace=True) + return df ##################################### ### EXPERIMENTAL ### ##################################### # COMMIT RELATED + @annotate(tag='commits100') def commits100(self, owner, repo=None, group_by="week"): """ - Timeseries of all the commits on a repo + Timeseries of the count of commits, limited to the first 100 overall :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. @@ -583,11 +593,10 @@ def commits100(self, owner, repo=None, group_by="week"): tem = temp['commits'] > 100 return temp[tem].reset_index(drop=True) + @annotate(tag='commit-comments') def commit_comments(self, owner, repo=None, group_by="week"): """ - augur-metric: commit-comments - - Timeseries of commit comments + Timeseries of the count of new commit comments :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. @@ -597,16 +606,17 @@ def commit_comments(self, owner, repo=None, group_by="week"): commitCommentsSQL = s.sql.text(self.__sub_table_count_by_date("commits", "commit_comments", "id", "commit_id", "project_id")) return pd.read_sql(commitCommentsSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='committer-locations') def committer_locations(self, owner, repo=None): """ - Return committers and their locations + Returns committers and their locations - @todo: Group by country code instead of users, needs the new schema :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. - :return: DataFrame with users and locations sorted by commtis + :return: DataFrame with users and locations sorted by descending count of commits """ + #TODO: Group by country code instead of users, needs the new schema repoid = self.repoid(owner, repo) rawContributionsSQL = s.sql.text(""" SELECT users.login, users.location, COUNT(*) AS "commits" @@ -621,19 +631,18 @@ def committer_locations(self, owner, repo=None): """) return pd.read_sql(rawContributionsSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='total-committers') def total_committers(self, owner, repo=None): """ - augur-metric: total-committers - - Number of total committers as of each week + Timeseries of total committers as of each week - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being a week + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with total committers/week """ repoid = self.repoid(owner, repo) totalCommittersSQL = s.sql.text(""" - SELECT total_committers.created_at AS "date", COUNT(total_committers.author_id) total_total_committers + SELECT total_committers.created_at AS "date", COUNT(total_committers.author_id) total_committers FROM ( SELECT author_id, MIN(DATE(created_at)) created_at FROM commits @@ -643,13 +652,18 @@ def total_committers(self, owner, repo=None): GROUP BY YEARWEEK(total_committers.created_at) """) df = pd.read_sql(totalCommittersSQL, self.db, params={"repoid": str(repoid)}) - df['total_total_committers'] = df['total_total_committers'].cumsum() + df['total_committers'] = df['total_committers'].cumsum() return df # ISSUE RELATED + @annotate(tag='issue-activity') def issue_activity(self, owner, repo=None): """ - augur-metric: issue-activity + Timeseries of issue related activity: issues opened, closed, reopened, and currently open + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with total committers/week """ repoid = self.repoid(owner, repo) issueActivity = s.sql.text(""" @@ -659,6 +673,7 @@ def issue_activity(self, owner, repo=None): WHERE issues.repo_id = :repoid GROUP BY YEARWEEK(issues.created_at) """) + #TODO: clean this up df = pd.read_sql(issueActivity, self.db, params={"repoid": str(repoid)}) df = df.assign(issues_open = 0) globalIssuesOpened = 0 @@ -685,15 +700,14 @@ def issue_activity(self, owner, repo=None): return df4 # PULL REQUEST RELATED + @annotate(tag='pull-request-acceptance-rate') def pull_request_acceptance_rate(self, owner, repo=None): """ - augur-metric: pull-request-acceptance-rate - - Timeseries of pull request acceptance rate (Number of pull requests merged on a date over Number of pull requests opened on a date) + Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date) :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with the pull acceptance rate and the dates + :return: DataFrame with ratio/day """ repoid = self.repoid(owner, repo) pullAcceptanceSQL = s.sql.text(""" @@ -716,10 +730,13 @@ def pull_request_acceptance_rate(self, owner, repo=None): return pd.read_sql(pullAcceptanceSQL, self.db, params={"repoid": str(repoid)}) # COMMUNITY / CONRIBUTIONS + @annotate(tag='community-age') def community_age(self, owner, repo=None): """ Information helpful to determining a community's age + (Currently broken) + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with the first event of each type (commits, fork, ...) @@ -753,19 +770,38 @@ def community_age(self, owner, repo=None): return pd.read_sql(communityAgeSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='community-engagement') def community_engagement(self, owner, repo): """ - Lots of information about issues and pull requests + Timeseries with lots of information about issues and pull requests + + DataFrame returns these columns: + date + issues_opened + issues_closed + pull_requests_opened + pull_requests_merged + pull_requests_closed + issues_opened_total + issues_closed_total + issues_closed_rate_this_window + issues_closed_rate_total + issues_delta + issues_open + pull_requests_opened_total + pull_requests_closed_total + pull_requests_closed_rate_this_window + pull_requests_closed_rate_total + pull_requests_delta + pull_requests - TODO: More documentation - - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being a week + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with the associated information about a repo's activity on that specific date """ repoid = self.repoid(owner, repo) issuesFullSQL = s.sql.text(""" - SELECT DATE(date) as "date", + SELECT STR_TO_DATE(CONCAT(YEARWEEK(DATE,0),' Sunday'), '%X%V %W') as "date", SUM(issues_opened) AS "issues_opened", SUM(issues_closed) AS "issues_closed", SUM(pull_requests_opened) AS "pull_requests_opened", @@ -774,8 +810,8 @@ def community_engagement(self, owner, repo): FROM ( - SELECT issue_events.created_at as "date", - issue_events.action = "closed" AND issues.pull_request = 0 AS issues_closed, + SELECT STR_TO_DATE(CONCAT(YEARWEEK(issue_events.created_at,0),' Sunday'), '%X%V %W') as "date", + issue_events.action = "closed" AND issues.pull_request = 0 AS issues_closed, 0 AS pull_requests_closed, 0 AS pull_requests_merged, issue_events.action = "reopened" AND issues.pull_request = 0 AS issues_opened, @@ -786,12 +822,13 @@ def community_engagement(self, owner, repo): LEFT JOIN pull_request_history ON pull_request_history.pull_request_id = issues.pull_request_id WHERE issues.repo_id = :repoid + AND issue_events.action IN ('closed', 'reopened') UNION ALL - SELECT pull_request_history.created_at as "date", + SELECT STR_TO_DATE(CONCAT(YEARWEEK(pull_request_history.created_at,0),' Sunday'), '%X%V %W') as "date", 0 AS issues_closed, - pull_request_history.action = "closed" AND issues.pull_request = 1 AS pull_requests_closed, + pull_request_history.action = "closed" AND issues.pull_request = 1 AS pull_requests_closed, pull_request_history.action = "merged" AND issues.pull_request = 1 AS pull_requests_merged, 0 AS issues_opened, pull_request_history.action = "reopened" AND issues.pull_request = 1 AS pull_requests_opened @@ -799,10 +836,11 @@ def community_engagement(self, owner, repo): LEFT JOIN pull_request_history ON pull_request_history.pull_request_id = issues.pull_request_id WHERE issues.repo_id = :repoid + AND pull_request_history.action IN ('closed', 'merged', 'reopened') UNION ALL - SELECT issues.created_at as "date", + SELECT STR_TO_DATE(CONCAT(YEARWEEK(issues.created_at ,0),' Sunday'), '%X%V %W') as "date", 0 AS issues_closed, 0 AS pull_requests_closed, 0 AS pull_requests_merged, @@ -814,37 +852,41 @@ def community_engagement(self, owner, repo): ) summary - GROUP BY YEARWEEK(date) + GROUP BY YEARWEEK(date, 1) + + """) counts = pd.read_sql(issuesFullSQL, self.db, params={"repoid": str(repoid)}) - # counts.drop(0, inplace=True) + counts.drop(0, inplace=True) counts['issues_opened_total'] = counts.issues_opened.cumsum() counts['issues_closed_total'] = counts.issues_closed.cumsum() - counts['issues_closed_rate_this_window'] = counts.issues_closed / counts.issues_opened + counts['issues_closed_rate_this_week'] = counts.issues_closed / counts.issues_opened counts['issues_closed_rate_total'] = counts.issues_closed_total / counts.issues_opened_total counts['issues_delta'] = counts.issues_opened - counts.issues_closed counts['issues_open'] = counts['issues_delta'].cumsum() counts['pull_requests_opened_total'] = counts.pull_requests_opened.cumsum() counts['pull_requests_closed_total'] = counts.pull_requests_closed.cumsum() - counts['pull_requests_closed_rate_this_window'] = counts.pull_requests_closed / counts.pull_requests_opened + counts['pull_requests_merged_total'] = counts.pull_requests_merged.cumsum() + counts['pull_requests_closed_rate_this_week'] = counts.pull_requests_closed / counts.pull_requests_opened + counts['pull_requests_merged_rate_this_week'] = counts.pull_requests_merged / counts.pull_requests_opened counts['pull_requests_closed_rate_total'] = counts.pull_requests_closed_total / counts.pull_requests_opened_total + counts['pull_requests_merged_rate_total'] = counts.pull_requests_merged_total / counts.pull_requests_opened_total counts['pull_requests_delta'] = counts.pull_requests_opened - counts.pull_requests_closed counts['pull_requests_open'] = counts['pull_requests_delta'].cumsum() return counts + @annotate(tag='contributors') def contributors(self, owner, repo=None): """ - augur-metric: contributors - All the contributors to a project and the counts of their contributions :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with users id, users login, and their contributions by type + :return: DataFrame with user's id and contributions by type, separated by user """ repoid = self.repoid(owner, repo) contributorsSQL = s.sql.text(""" - SELECT id AS user, SUM(commits) AS commits, SUM(issues) AS issues, + SELECT users.login as name, a.id AS user, SUM(commits) AS commits, SUM(issues) AS issues, SUM(commit_comments) AS commit_comments, SUM(issue_comments) AS issue_comments, SUM(pull_requests) AS pull_requests, SUM(pull_request_comments) AS pull_request_comments, SUM(a.commits + a.issues + a.commit_comments + a.issue_comments + a.pull_requests + a.pull_request_comments) AS total @@ -861,22 +903,32 @@ def contributors(self, owner, repo=None): (SELECT actor_id AS id, 0, 0, 0, 0, COUNT(*) AS pull_requests, 0 FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.id WHERE pull_request_history.action = 'opened' AND pull_requests.`base_repo_id` = :repoid GROUP BY actor_id) UNION ALL (SELECT user_id AS id, 0, 0, 0, 0, 0, COUNT(*) AS pull_request_comments FROM pull_request_comments JOIN pull_requests ON pull_requests.base_commit_id = pull_request_comments.commit_id WHERE pull_requests.base_repo_id = :repoid GROUP BY user_id) - ) a - WHERE id IS NOT NULL - GROUP BY id + ) a JOIN users ON users.id = a.id + WHERE a.id IS NOT NULL + GROUP BY a.id ORDER BY total DESC; """) return pd.read_sql(contributorsSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='contributions') def contributions(self, owner, repo=None, userid=None): """ - augur metric: contributions Timeseries of all the contributions to a project, optionally limited to a specific user + DataFrame has these columns: + date + commits + pull_requests + issues + commit_comments + pull_request_comments + issue_comments + tota + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table :param repo: The name of the repo. Unneeded if repository id was passed as owner. :param userid: The id of user if you want to limit the contributions to a specific user. - :return: DataFrame with all of the contributions seperated by day. + :return: DataFrame with all of the contributions separated by day """ repoid = self.repoid(owner, repo) rawContributionsSQL = """ @@ -932,7 +984,7 @@ def classify_contributors(self, owner, repo=None): :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with the login and role of contributors + :return: DataFrame with the id and role of contributors """ repoid = self.repoid(owner, repo) contributors = self.contributors(repoid, repo=None) @@ -957,7 +1009,15 @@ def classify(row): roles = contributors.apply(classify, axis=1) return roles + @annotate(tag='project-age') def project_age(self, owner, repo=None): + """ + Date of the project's creation + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with the date of the project's creation + """ repoid = self.repoid(owner, repo) projectAgeSQL = s.sql.text(""" SELECT date(created_at) AS "date", COUNT(*) AS "{0}" @@ -970,9 +1030,14 @@ def project_age(self, owner, repo=None): # DEPENDENCY RELATED # OTHER - def fakes(self, owner, repo=None): + @annotate(tag='fakes') + def fakes(self, owner, repo=None): #should this be for users who contribute to the given repo? """ - augur-metric: fakes + Timeseries of new fake users per week + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with new fake users/week """ repoid = self.repoid(owner, repo) contributorsSQL = s.sql.text(""" @@ -982,10 +1047,3 @@ def fakes(self, owner, repo=None): GROUP BY YEARWEEK(date) """) return pd.read_sql(contributorsSQL, self.db, params={"repoid": str(repoid)}) - - def ghtorrent_range(self): - ghtorrentRangeSQL = s.sql.text(""" - SELECT MIN(date(created_at)) AS "min_date", MAX(date(created_at)) AS "max_date" - FROM commits - """) - return pd.read_sql(ghtorrentRangeSQL, self.db) diff --git a/augur/ghtorrentplus.py b/augur/ghtorrentplus.py --- a/augur/ghtorrentplus.py +++ b/augur/ghtorrentplus.py @@ -1,8 +1,14 @@ +#SPDX-License-Identifier: MIT +""" +Data source that extends GHTorrent with summary tables +""" + import pandas as pd import sqlalchemy as s import numpy as np import re from augur import logger +from augur.util import annotate # end imports # (don't remove the above line, it's for a script) @@ -40,11 +46,21 @@ def update(self): ##################################### ### GROWTH, MATURITY, AND DECLINE ### ##################################### - + + @annotate(tag='closed-issue-resolution-duration') def closed_issue_resolution_duration(self, owner, repo=None): """ - Endpoint: issue_close_time - augur-metric: closed-issue-resolution-duration + Returns a DataFrame with these columns: + id + repo_id + closed + pull_request + minutes_to_close + z-score + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with the above columns """ repoid = self.ghtorrent.repoid(owner, repo) issuesClosedSQL = s.sql.text(""" diff --git a/augur/git.py b/augur/git.py --- a/augur/git.py +++ b/augur/git.py @@ -1,6 +1,6 @@ #SPDX-License-Identifier: MIT """ -Analyzes Git repos directly using dulwich +Analyzes Git repos directly using git """ import os @@ -11,7 +11,7 @@ import pandas as pd import git from lockfile import LockFile, AlreadyLocked -from augur.util import logger, get_cache +from augur.util import logger, get_cache, annotate # end imports # (don't remove the above line, it's for a script @@ -101,6 +101,12 @@ def __init__(self, list_of_repositories, storage_folder, csv, cache=None): self.is_updater = False def get_repo(self, repo_url): + """ + Create a repo object from the given url + + :param repo_url: URL of the repository + :return: a Repo obeject + """ if repo_url in self._git_repos: return self._git_repos[repo_url] else: @@ -129,9 +135,41 @@ def update(self): self.is_updater = False + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + @annotate(tag='downloaded-repos') def downloaded_repos(self): + """ + Get all downloaded repositories and the date they were last updated + :return: a JSON object with the URL and date of last update for all downloaded repos + """ downloaded = [] for repo_url in self._repo_urls: repo = self.get_repo(repo_url) @@ -148,7 +186,7 @@ def downloaded_repos(self): return downloaded - + @annotate(tag='lines-changed-minus-whitespace') def lines_changed_minus_whitespace(self, repo_url, from_commit=None, df=None, rebuild_cache=False): """ Makes sure the storageFolder contains updated versions of all the repos @@ -214,7 +252,8 @@ def heavy_lifting(): results = new_results return results - def changes_by_author(self, repo_url, freq='M', rebuild_cache=False): + @annotate(tag='lines-changed-by-author') + def lines_changed_by_author(self, repo_url, freq='M', rebuild_cache=False): """ Makes sure the storageFolder contains updated versions of all the repos """ @@ -229,4 +268,4 @@ def heavy_lifting(): if rebuild_cache: self.__cache.remove_value(key='cba-{}-{}'.format(freq, repo_url)) results = self.__cache.get(key='cba-{}-{}'.format(freq, repo_url), createfunc=heavy_lifting) - return results \ No newline at end of file + return result5 diff --git a/augur/githubapi.py b/augur/githubapi.py --- a/augur/githubapi.py +++ b/augur/githubapi.py @@ -1,3 +1,8 @@ +#SPDX-License-Identifier: MIT +""" +Data source that uses the GitHub API +""" + from augur.localcsv import LocalCSV import json import re @@ -8,6 +13,7 @@ import datetime import requests from augur import logger +from augur.util import annotate # end imports # (don't remove the above line, it's for a script) @@ -33,14 +39,14 @@ def __init__(self, api_key): ### GROWTH, MATURITY, AND DECLINE ### ##################################### + @annotate(tag='lines-of-code-changed') def lines_of_code_changed(self, owner, repo=None): """ - chaoss-metric: lines-of-code-changed - Additions and deletions each week + Timeseries of the count of lines added, deleted, and the net change each week :param owner: The name of the project owner :param repo: The name of the repo - :return: DataFrame with each row being am issue + :return: DataFrame with the associated lines changed information/week """ # get the data we need from the GitHub API # see <project_root>/augur/githubapi.py for examples using the GraphQL API @@ -76,81 +82,91 @@ def lines_of_code_changed(self, owner, repo=None): ### EXPERIMENTAL ### ##################################### - def bus_factor(self, owner, repo, filename=None, start=None, end=None, threshold=50): + @annotate(tag='bus-factor') + def bus_factor(self, owner, repo, threshold=50): """ - augur-metric: bus-factor - Calculates bus factor by adding up percentages from highest to lowest until they exceed threshold :param owner: repo owner username :param repo: repo name - :param filename: optional; file or directory for function to run on - :param start: optional; start time for analysis - :param end: optional; end time for analysis :param threshold: Default 50; """ + cursor = "" + url = "https://api.github.com/graphql" + commit_count = [] + hasNextPage = True + threshold = threshold / 100 + while hasNextPage: + query = {"query" : + """ + query{ + repository(name: "%s", owner: "%s") { + ref(qualifiedName: "master") { + target { + ... on Commit { + id + history(first: 100%s) { + pageInfo { + hasNextPage + } + edges { + cursor + node { + author { + email + } + } + } + } + } + } + } + } + } + """ % (repo, owner, cursor) + } + r = requests.post(url, auth=requests.auth.HTTPBasicAuth('user', self.GITHUB_API_KEY), json=query) + raw = r.text + data = json.loads(json.loads(json.dumps(raw))) + hasNextPage = data['data']['repository']['ref']['target']['history']['pageInfo']['hasNextPage'] + commits = data['data']['repository']['ref']['target']['history']['edges'] + for i in commits: + commit_count.append({'email' : i['node']['author']['email']}) + cursor = ", after: \"%s\"" % (commits[-1]['cursor']) - if start != None: - start = parse(start) - else: - start = github.GithubObject.NotSet - - if end != None: - end = parse(end) - else: - end = github.GithubObject.NotSet - - commits = self.api.get_repo((owner + "/" + repo)).get_commits(since=start, until=end) - - if filename != None: - self.api.get_repo((owner + "/" + repo)).get_contents(filename) - - df = [] - - if filename != None: - for commit in commits: - for file in commit.files: - if file.filename == filename: - try: - df.append({'userid': commit.author.id}) - except AttributeError: - pass - break - else: - for commit in commits: - try: - df.append({'userid': commit.author.id}) - except AttributeError: - pass - df = pd.DataFrame(df) + df = pd.DataFrame(commit_count) + + total = df.email.count() - df = df.groupby(['userid']).userid.count() / df.groupby(['userid']).userid.count().sum() * 100 + df = df.groupby(['email']).email.count() / df.groupby(['email']).email.count().sum() * 100 i = 0 - for num in df.cumsum(): + for num in df.sort_values(ascending=False).cumsum(): i = i + 1 if num >= threshold: - worst = i break + worst = i - i = 0 + j = 0 for num in df.sort_values(ascending=True).cumsum(): - i = i + 1 + j = j + 1 if num >= threshold: - best = i break + best = j bus_factor = [{'worst': worst, 'best' : best}] return pd.DataFrame(bus_factor) + @annotate(tag='major-tags') def major_tags(self, owner, repo): """ - Returns dates and names of major version (according to semver) tags. May return blank if no major versions + Timeseries of the dates and names of major version (according to semver) tags. May return blank if no major versions :param owner: repo owner username :param repo: repo name + :return: DataFrame with major versions and their release date """ cursor = "null" tags_list = [] @@ -205,13 +221,15 @@ def major_tags(self, owner, repo): return pd.DataFrame(major_versions) + @annotate(tag='tags') def tags(self, owner, repo, raw=False): """ - Returns dates and names of tags + Timeseries of the dates and names of tags :param owner: repo owner username :param repo: repo name :param raw: Default False; Returns list of dicts + :return: DataFrame with all tags and their release date """ cursor = "null" diff --git a/augur/librariesio.py b/augur/librariesio.py --- a/augur/librariesio.py +++ b/augur/librariesio.py @@ -1,8 +1,14 @@ +""" +Data source that uses the LibrariesIO dependency data +""" + import requests import pandas as pd import numpy as np from bs4 import BeautifulSoup from augur import logger +from augur.util import annotate + # end imports # (don't remove the above line, it's for a script) @@ -41,10 +47,9 @@ def __init__(self, api_key, githubapi): ### EXPERIMENTAL ### ##################################### - + @annotate(tag='dependencies') def dependencies(self, owner, repo): """ - Finds the packages that a project depends on :param owner: GitHub username of the owner of the repo @@ -55,6 +60,7 @@ def dependencies(self, owner, repo): r = requests.get(url, params={"api_key": self.API_KEY}) return r.json() + @annotate(tag='dependency-stats') def dependency_stats(self, owner, repo): """ Finds the number of dependencies, dependant projects, and dependent repos by scrapping it off of the libraries.io website @@ -110,9 +116,9 @@ def dependency_stats(self, owner, repo): return final_data + @annotate(tag='dependents') def dependents(self, owner, repo): """ - Finds the packages depend on this repository :param owner: GitHub username of the owner of the repo diff --git a/augur/localcsv.py b/augur/localcsv.py --- a/augur/localcsv.py +++ b/augur/localcsv.py @@ -1,4 +1,7 @@ #SPDX-License-Identifier: MIT +""" +Loads small included datasets +""" import pandas as pd import tldextract from urllib.parse import urlparse diff --git a/augur/metadata.py b/augur/metadata.py --- a/augur/metadata.py +++ b/augur/metadata.py @@ -1 +1 @@ -__version__ = '0.6.1' \ No newline at end of file +__version__ = '0.7.0' \ No newline at end of file diff --git a/augur/metrics_status.py b/augur/metrics_status.py new file mode 100644 --- /dev/null +++ b/augur/metrics_status.py @@ -0,0 +1,286 @@ +#SPDX-License-Identifier: MIT +""" +Analyzes Augur source and CHAOSS repos to determine metric implementation status +""" + +import re +import json +import glob +import requests +from augur.util import metric_metadata + +class FrontendStatusExtractor(object): + + def __init__(self): + self.api_text = open("frontend/app/AugurAPI.js", 'r').read() + self.attributes = re.findall(r'(?:(GitEndpoint|Endpoint|Timeseries)\(repo, )\'(.*)\', \'(.*)\'', self.api_text) + self.timeseries_attributes = [attribute for attribute in self.attributes if attribute[0] == "Timeseries"] + self.endpoint_attributes = [attribute for attribute in self.attributes if attribute[0] == "Endpoint"] + self.git_endpoint_attributes = [attribute for attribute in self.attributes if attribute[0] == "GitEndpoint"] + + def determine_frontend_status(self, endpoint, metric_type): + attribute = None + + if metric_type is "timeseries": + attribute = next((attribute[1] for attribute in self.timeseries_attributes if attribute[2] in endpoint), None) + + elif metric_type is "metric": + attribute = next((attribute[1] for attribute in self.endpoint_attributes if attribute[2] in endpoint), None) + + elif metric_type is "git": + attribute = next((attribute[1] for attribute in self.git_endpoint_attributes if attribute[2] in endpoint), None) + + if attribute is not None: + status = 'implemented' + else: + status = 'unimplemented' + + return status + +class Metric(object): + + def __init__(self): + self.ID = 'none' + self.tag = 'none' + self.name = 'none' + self.group = 'none' + self.backend_status = 'unimplemented' + self.frontend_status = 'unimplemented' + self.endpoint = 'none' + self.source = 'none' + self.metric_type = 'none' + self.url = '/' + self.is_defined = 'false' + +class GroupedMetric(Metric): + + def __init__(self, raw_name, group): + Metric.__init__(self) + self.name = re.sub('/', '-', re.sub(r'-$|\*', '', re.sub('-', ' ', raw_name).title())) + self.tag = re.sub(' ', '-', self.name).lower() + self.ID = re.sub(r'-$|\*', '', self.source + '-' + self.tag) + self.group = group + +class ImplementedMetric(Metric): + + def __init__(self, metadata, frontend_status_extractor): + Metric.__init__(self) + + self.ID = metadata['ID'] + self.tag = metadata['tag'] + self.name = metadata['metric_name'] + self.backend_status = 'implemented' + self.source = metadata['source'] + self.group = "experimental" + + if 'metric_type' in metadata: + self.metric_type = metadata['metric_type'] + else: + self.metric_type = 'metric' + + if 'endpoint' in metadata: + self.endpoint = metadata['endpoint'] + self.frontend_status = frontend_status_extractor.determine_frontend_status(self.endpoint, self.metric_type) + +class MetricsStatus(object): + + diversity_inclusion_urls = [ + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_communication.md", "has_links": True }, + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_contribution.md", "has_links": True }, + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_events.md", "has_links": False }, + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_governance.md", "has_links": False }, + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_leadership.md", "has_links": False }, + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_project_places.md", "has_links": True }, + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_recognition.md", "has_links": False } + ] + + growth_maturity_decline_urls = [ + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/2_Growth-Maturity-Decline.md", "has_links": True }, + ] + + risk_urls = [ + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/3_Risk.md", "has_links": False }, + ] + + value_urls = [ + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/4_Value.md", "has_links": False }, + ] + + activity_urls = [ + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/activity-metrics-list.md", "has_links": False }, + ] + + activity_repo = "augurlabs/metrics" + + def __init__(self, githubapi): + self.__githubapi = githubapi.api + + self.groups = { + "diversity-inclusion": "Diversity and Inclusion", + "growth-maturity-decline": "Growth, Maturity, and Decline", + "risk": "Risk", + "value": "Value", + "activity": "Activity", + "experimental": "Experimental", + "all": "All" + }, + + self.sources = [] + self.metric_types = [] + self.tags = {} + + self.implemented_metrics = [] + + self.raw_metrics_status = [] + self.metadata = [] + + def create_metrics_status(self): + + self.build_implemented_metrics() + + self.diversity_inclusion_metrics = self.create_grouped_metrics(self.diversity_inclusion_urls, "diversity-inclusion") + self.growth_maturity_decline_metrics = self.create_grouped_metrics(self.growth_maturity_decline_urls, "growth-maturity-decline") + self.risk_metrics = self.create_grouped_metrics(self.risk_urls, "risk") + self.value_metrics = self.create_grouped_metrics(self.value_urls, "value") + + self.metrics_by_group = [self.diversity_inclusion_metrics, self.growth_maturity_decline_metrics, self.risk_metrics, self.value_metrics] + + self.activity_metrics = self.create_activity_metrics() + self.metrics_by_group.append(self.activity_metrics) + + self.create_experimental_metrics() + self.metrics_by_group.append(self.experimental_metrics) + + self.copy_implemented_metrics() + + self.find_defined_metrics() + + self.get_raw_metrics_status() + + self.get_metadata() + + def build_implemented_metrics(self): + frontend_status_extractor = FrontendStatusExtractor() + for metric in metric_metadata: + if "ID" in metric.keys(): + self.implemented_metrics.append(ImplementedMetric(metric, frontend_status_extractor)) + + def extract_grouped_metric_names(self, remote): + metric_file = requests.get(remote["raw_content_url"]).text + + regEx = r'^(?!Name)(.*[^-])(?:\ \|)' + if remote["has_links"] == True: + regEx = r'\[(.*?)\]\((?:.*?\.md)\)' + + return re.findall(regEx, metric_file, re.M) + + def create_grouped_metrics(self, remotes_list, group): + remote_names = [] + + for remote in remotes_list: + for name in self.extract_grouped_metric_names(remote): + remote_names.append(name) + + remote_metrics = [] + + for name in remote_names: + remote_metrics.append(GroupedMetric(name, group)) + + return remote_metrics + + def create_activity_metrics(self): + activity_metrics_raw_text = requests.get(self.activity_urls[0]["raw_content_url"]).text + + raw_activity_names = re.findall(r'\|(?:\[|)(.*)\|(?:\]|)(?:\S| )', activity_metrics_raw_text) + + activity_names = [re.sub(r'(?:\]\(.*\))', '', name) for name in raw_activity_names if '---' not in name and 'Name' not in name] + + activity_metrics = [] + + for raw_name in activity_names: + metric = GroupedMetric(raw_name, "activity") + + is_grouped_metric = True + for group in self.metrics_by_group: + if metric.tag not in [metric.tag for metric in group]: + is_grouped_metric = False + + if is_grouped_metric == False: + activity_metrics.append(metric) + + return activity_metrics + + def create_experimental_metrics(self): + tags = [] + for group in self.metrics_by_group: + for metric in group: + tags.append(metric.tag) + + self.experimental_metrics = [metric for metric in self.implemented_metrics if metric.tag not in tags] + + def copy_implemented_metrics(self): + # takes implemented metrics and copies their data to the appropriate metric object + # I'm sorry + implemented_metric_tags = [metric.tag for metric in self.implemented_metrics] + for group in self.metrics_by_group: + if group is not self.experimental_metrics: #experimental metrics don't need to be copied, since they don't have a definition + for grouped_metric in group: + if grouped_metric.tag in implemented_metric_tags: + metric = next(metric for metric in self.implemented_metrics if metric.tag == grouped_metric.tag) + for key in metric.__dict__.keys(): + if key != 'group': #don't copy the group over, since the metrics are already grouped + grouped_metric.__dict__[key] = metric.__dict__[key] + + def find_defined_metrics(self): + activity_files = self.__githubapi.get_repo(self.activity_repo).get_dir_contents("activity-metrics") + defined_tags = [re.sub(".md", '', file.name) for file in activity_files] + + for group in self.metrics_by_group: + for metric in group: + if metric.tag in defined_tags: + metric.is_defined = 'true' + metric.url = "https://github.com/{}/blob/wg-gmd/activity-metrics/{}.md".format(MetricsStatus.activity_repo, metric.tag) + + def get_raw_metrics_status(self): + for group in self.metrics_by_group: + for metric in group: + self.raw_metrics_status.append(metric.__dict__) + + def get_metadata(self): + self.get_metric_sources() + self.get_metric_types() + self.get_metric_tags() + + self.metadata = { + "remotes": { + "diversity_inclusion_urls": self.diversity_inclusion_urls, + "growth_maturity_decline_urls": self.growth_maturity_decline_urls, + "risk_urls": self.risk_urls, + "value_urls": self.value_urls, + "activity_repo_urls": self.activity_urls + }, + "groups": self.groups, + "sources": self.sources, + "metric_types": self.metric_types, + "tags": self.tags + } + + def get_metric_sources(self): + for source in [metric['source'] for metric in self.raw_metrics_status]: + source = source.lower() + if source not in self.sources and source != "none": + self.sources.append(source) + self.sources.append("all") + + def get_metric_types(self): + for metric_type in [metric['metric_type'] for metric in self.raw_metrics_status]: + metric_type = metric_type.lower() + if metric_type not in self.metric_types and metric_type != "none": + self.metric_types.append(metric_type) + self.metric_types.append("all") + + def get_metric_tags(self): + for tag in [(metric['tag'], metric['group']) for metric in self.raw_metrics_status]: + # tag[0] = tag[0].lower() + if tag[0] not in [tag[0] for tag in self.tags] and tag[0] != "none": + self.tags[tag[0]] = tag[1] diff --git a/augur/plugins/__init__.py b/augur/plugins/__init__.py new file mode 100644 --- /dev/null +++ b/augur/plugins/__init__.py @@ -0,0 +1,12 @@ +import pkgutil +import importlib + +__all__ = [] +loaded = [] + +__path__ = pkgutil.extend_path(__path__, __name__) +for importer, modname, ispkg in pkgutil.walk_packages(path=__path__, prefix=__name__+'.'): + if ispkg: + module = importlib.import_module(modname) + __all__.append(modname) + loaded.append(module) \ No newline at end of file diff --git a/augur/plugins/example_plugin/__init__.py b/augur/plugins/example_plugin/__init__.py new file mode 100644 --- /dev/null +++ b/augur/plugins/example_plugin/__init__.py @@ -0,0 +1,2 @@ +from .example_plugin import ExamplePlugin +__all__ = ['ExamplePlugin'] \ No newline at end of file diff --git a/augur/plugins/example_plugin/example_plugin.py b/augur/plugins/example_plugin/example_plugin.py new file mode 100644 --- /dev/null +++ b/augur/plugins/example_plugin/example_plugin.py @@ -0,0 +1,24 @@ +#SPDX-License-Identifier: MIT +from augur import AugurPlugin, Application, logger +# (don't remove the above line, it's for a script) + +class ExamplePlugin(AugurPlugin): + """ + This plugin serves as an example as to how to load plugins into Augur + """ + def __init__(self, app): + self.augur_app = app + logger.info('example-plugin loaded') + return + + def example_metric(self, owner, repo): + return 'Hello, {}/{}'.format(owner, repo) + + def add_routes(self, flask_app): + """ + Responsible for adding this plugin's data sources to the API + """ + flask_app.addMetric(self.example_metric, 'example_metric') + +ExamplePlugin.name = 'example-plugin' +Application.register_plugin(ExamplePlugin) \ No newline at end of file diff --git a/augur/publicwww.py b/augur/publicwww.py deleted file mode 100644 --- a/augur/publicwww.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -PublicWWW is a class for making API requests to https://publicwww.com/ a -search engine for the source of websites -""" -import sys -import pandas as pd -if sys.version_info > (3, 0): - import urllib.parse as url -else: - import urllib as url -# end imports -# (don't remove the above line, it's for a script) - -class PublicWWW(object): - """ - PublicWWW is a class for making API requests to https://publicwww.com/ a - search engine for the source of websites - """ - - def __init__(self, api_key): - """ - Initalizes a PublicWWW instance - - :param api_key: The API key for PublicWWW. This is required to get the full names of more results - """ - self.__api_key = api_key - - ##################################### - ### DIVERSITY AND INCLUSION ### - ##################################### - - - ##################################### - ### GROWTH, MATURITY, AND DECLINE ### - ##################################### - - - ##################################### - ### RISK ### - ##################################### - - - ##################################### - ### VALUE ### - ##################################### - - - ##################################### - ### ACTIVITY ### - ##################################### - - - ##################################### - ### EXPERIMENTAL ### - ##################################### - - def linking_websites(self, owner, repo): - """ - Finds the repo's popularity on the internet - - :param owner: The username of a project's owner - :param repo: The name of the repository - :return: DataFrame with the issues' id the date it was - opened, and the date it was first responded to - """ - - # Find websites that link to that repo - repo_url = "https://github.com/{owner}/{repo}".format(owner=owner, repo=repo) - query = '<a+href%3D"{repourl}"'.format(repourl=url.quote_plus(repo_url)) - req = 'https://publicwww.com/websites/{query}/?export=csv&apikey={apikey}' - req.format(query=query, apikey=self.__api_key) - result = pd.read_csv(req, delimiter=';', header=None, names=['url', 'rank']) - return result \ No newline at end of file diff --git a/augur/routes/__git_routes.py b/augur/routes/__git_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/__git_routes.py @@ -0,0 +1,86 @@ +from flask import Response + +def create_routes(server): + + git = server.augur_app.git() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ##s/closed# + ##################################### + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + # @server.app.route('/{}/git/repos'.format(server.api_version)) + # def git_downloaded_repos(): #TODO: make this name automatic - wrapper? + # drs = server.transform(git.downloaded_repos) + # return Response(response=drs, + # status=200, + # mimetype="application/json") + # server.updateMetricMetadata(function=git.downloaded_repos, endpoint='/{}/git/repos'.format(server.api_version), metric_type='git') + + """ + @api {get} /git/lines_changed_minus_whitespace/:git_repo_url Lines Changed Minus Whitespace + @apiName LinesChangedMinusWhitespace + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + @apiParam {String} git_repo_url URL of the GitHub repository + @apiSuccessExample {json} Success-Response: + [ + { + "additions":2, + "author_date":"2018-05-14 10:09:57 -0500", + "author_email":"[email protected]", + "author_name":"Sean P. Goggins", + "commit_date":"2018-05-16 10:12:22 -0500", + "committer_email":"[email protected]", + "committer_name":"Derek Howard", + "deletions":0, + "hash":"77e603a", + "message":"merge dev", + "parents":"b8ec0ed" + } + ] + """ + server.addGitMetric(git.lines_changed_minus_whitespace, 'lines_changed') + + """ + @api {get} /git/lines_changed_by_author/:git_repo_url Lines Changed by Author + @apiName LinesChangedByAuthor + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + @apiParam {String} git_repo_url URL of the GitHub repository + @apiSuccessExample {json} Success-Response: + [ + { + "additions":2, + "author_date":"2018-05-14 10:09:57 -0500", + "author_email":"[email protected]", + "author_name":"Sean P. Goggins", + "commit_date":"2018-05-16 10:12:22 -0500", + "committer_email":"[email protected]", + "committer_name":"Derek Howard", + "deletions":0,"hash":"77e603a", + "message":"merge dev", + "parents":"b8ec0ed" + } + ] + """ + server.addGitMetric(git.lines_changed_by_author, 'changes_by_author') diff --git a/augur/routes/__init__.py b/augur/routes/__init__.py new file mode 100644 --- /dev/null +++ b/augur/routes/__init__.py @@ -0,0 +1,24 @@ +import importlib +import os +import glob +from augur.util import getFileID + +def getRouteFiles(): + route_files = [] + + for filename in glob.iglob("**/routes/*"): + if not getFileID(filename).startswith('__'): + route_files.append(getFileID(filename)) + + return route_files + +route_files = getRouteFiles() + +def create_all_datasource_routes(server): + for route_file in route_files: + module = importlib.import_module('.' + route_file, 'augur.routes') + module.create_routes(server) + +def create_status_routes(server): + module = importlib.import_module('.__metric_status_routes', 'augur.routes') + module.create_routes(server) \ No newline at end of file diff --git a/augur/routes/__metric_status_routes.py b/augur/routes/__metric_status_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/__metric_status_routes.py @@ -0,0 +1,182 @@ +from flask import Response, request +from augur.metrics_status import Metric +import json + +def filterBy(status, key, value): + if value == "all" or value == '' or value is None: + return status + elif value is not None: + return [metric for metric in status if metric[key].lower() == value.lower()] + +def create_routes(server): + + metrics_status = server.augur_app.metrics_status() + metrics_status.create_metrics_status() + metrics_status_URL = "metrics/status" + + """ + @api {get} metrics/status Metrics Status + @apiName metrics-status + @apiGroup Metrics-Status + @apiDescription Information about the Augur implementation status of CHAOSS metrics. + + @apiSuccessExample {json} Success-Response: + [ + { + + "ID": "ghtorrent-fakes", + "tag": "fakes", + "name": "Fakes", + "group": "experimental", + "backend_status": "implemented", + "frontend_status": "implemented", + "endpoint": "/api/unstable/<owner>/<repo>/timeseries/fakes", + "source": "ghtorrent", + "metric_type": "timeseries", + "url": "/", + "is_defined": "false" + }, + { + "ID": "ghtorrentplus-closed-issue-resolution-duration", + "tag": "closed-issue-resolution-duration", + "name": "Closed Issue Resolution Duration", + "group": "experimental", + "backend_status": "implemented", + "frontend_status": "unimplemented", + "endpoint": "/api/unstable/<owner>/<repo>/issues/time_to_close", + "source": "ghtorrentplus", + "metric_type": "metric", + "url": "activity-metrics/closed-issue-resolution-duration.md", + "is_defined": "true" + }, + { + "ID": "githubapi-lines-of-code-changed", + "tag": "lines-of-code-changed", + "name": "Lines Of Code Changed", + "group": "experimental", + "backend_status": "implemented", + "frontend_status": "implemented", + "endpoint": "/api/unstable/<owner>/<repo>/timeseries/lines_changed", + "source": "githubapi", + "metric_type": "timeseries", + "url": "activity-metrics/lines-of-code-changed.md", + "is_defined": "true" + } + ] + """ + @server.app.route("/{}/{}".format(server.api_version, metrics_status_URL)) + def metrics_status_view(): + return Response(response=json.dumps(metrics_status.raw_metrics_status), + status=200, + mimetype="application/json") + + """ + @api {get} metrics/status/metadata Metrics Status Metadata + @apiName metrics-status-metadata + @apiGroup Metrics-Status + @apiDescription Metadata about the Augur implemntation status of CHAOSS metrics. + + @apiSuccessExample {json} Success-Response: + [ + { + "groups": [ + { + "diversity-inclusion": "Diversity and Inclusion", + "growth-maturity-decline": "Growth, Maturity, and Decline", + "risk": "Risk", + "value": "Value", + "activity": "Activity", + "experimental": "Experimental" + } + ], + "sources": [ + "ghtorrent", + "ghtorrentplus", + "githubapi", + "downloads", + "facade", + "publicwww", + "librariesio", + "git" + ], + "metric_types": [ + "timeseries", + "metric", + "git" + ], + "tags": { + "listening": "diversity-inclusion", + "speaking": "diversity-inclusion", + ... + } + } + ] + """ + @server.app.route("/{}/{}/metadata".format(server.api_version, metrics_status_URL)) + def metrics_status_metadata_view(): + return Response(response=json.dumps(metrics_status.metadata), + status=200, + mimetype="application/json") + + """ + @api {get} metrics/status/filter?ID=:ID&tag=:tag&group=:group&backend_status=:backend_status&frontend_status=:frontend_status&source=:source&metric_type=:metric_type&is_defined=:is_defined Filtered Metrics Status + @apiName filter-metrics-status + @apiGroup Metrics-Status + @apiDescription Metrics Status that allows for filtering of the results via the query string. Filters can be combined. + + @apiParam {string} [ID] Returns the status of the metric that matches this ID + @apiParam {string} [tag] Returns all the statuses of all metrics that have this tag + @apiParam {string} [group] Returns all the metrics in this metric grouping + @apiParam {string="unimplemented", "undefined", "implemented"} [backend_status] + @apiParam {string="unimplemented", "implemented"} [frontend_status] + @apiParam {string} [source] Returns the statuses of all metrics from this data source + @apiParam {string} [metric_type] Returns the statuses of the metrics of this metric type + @apiParam {string="true", "false"} [is_defined] Returns the statuses of metrics that are or aren't defined + + @apiParamExample {string} Sample Query String: + metrics/status/filter?group=growth-maturity-decline&metric_type=metric + + + @apiSuccessExample {json} Success-Response: + [ + { + "ID": "ghtorrentplus-closed-issue-resolution-duration", + "tag": "closed-issue-resolution-duration", + "name": "Closed Issue Resolution Duration", + "group": "growth-maturity-decline", + "backend_status": "implemented", + "frontend_status": "unimplemented", + "endpoint": "/api/unstable/<owner>/<repo>/issues/time_to_close", + "source": "ghtorrentplus", + "metric_type": "metric", + "url": "activity-metrics/closed-issue-resolution-duration.md", + "is_defined": "true" + }, + { + "ID": "ghtorrent-contributors", + "tag": "contributors", + "name": "Contributors", + "group": "growth-maturity-decline", + "backend_status": "implemented", + "frontend_status": "implemented", + "endpoint": "/api/unstable/<owner>/<repo>/contributors", + "source": "ghtorrent", + "metric_type": "metric", + "url": "activity-metrics/contributors.md", + "is_defined": "true" + } + ] + """ + @server.app.route("/{}/{}/filter".format(server.api_version, metrics_status_URL)) + def filtered_metrics_status_view(): + + filtered_metrics_status = metrics_status.raw_metrics_status + + valid_filters = [key for key in Metric().__dict__.keys() if key != 'name' and key != 'endpoint' and key != 'url'] + + for valid_filter in valid_filters: + filtered_metrics_status = filterBy(filtered_metrics_status, valid_filter, request.args.get(valid_filter)) + + return Response(response=json.dumps(filtered_metrics_status), + status=200, + mimetype="application/json") diff --git a/augur/routes/downloads_routes.py b/augur/routes/downloads_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/downloads_routes.py @@ -0,0 +1,51 @@ +def create_routes(server): + + downloads = server.augur_app.downloads() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + """ + @api {get} /:owner/:repo/timeseries/downloads Downloads + @apiName downloads + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2018-06-14", + "downloads": 129148 + }, + { + "date": "2018-06-13", + "downloads": 131262 + } + ] + """ + server.addTimeseries(downloads.downloads, 'downloads') + diff --git a/augur/routes/facade_routes.py b/augur/routes/facade_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/facade_routes.py @@ -0,0 +1,65 @@ +from flask import Response + +def create_routes(server): + + facade = server.augur_app.facade() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + @server.app.route('/{}/git/repos'.format(server.api_version)) + def facade_downloaded_repos(): #TODO: make this name automatic - wrapper? + drs = server.transform(facade.downloaded_repos) + return Response(response=drs, + status=200, + mimetype="application/json") + server.updateMetricMetadata(function=facade.downloaded_repos, endpoint='/{}/git/repos'.format(server.api_version), metric_type='git') + + """ + @api {get} /git/lines_changed/:git_repo_url Lines Changed by Author + @apiName lines-changed-by-author + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "additions":2, + "author_date":"2018-05-14 10:09:57 -0500", + "author_email":"[email protected]", + "author_name":"Sean P. Goggins", + "commit_date":"2018-05-16 10:12:22 -0500", + "committer_email":"[email protected]", + "committer_name":"Derek Howard", + "deletions":0,"hash":"77e603a", + "message":"merge dev", + "parents":"b8ec0ed" + } + ] + """ + server.addGitMetric(facade.lines_changed_by_author, 'changes_by_author') + diff --git a/augur/routes/ghtorrent_routes.py b/augur/routes/ghtorrent_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/ghtorrent_routes.py @@ -0,0 +1,803 @@ +from flask import request, Response + +def create_routes(server): + + ghtorrent = server.augur_app.ghtorrent() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + """ + @api {get} /:owner/:repo/timeseries/issues/closed Closed Issues + @apiName closed-issues + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/issues-closed.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + @apiParam {string} group_by (default to week) allows for results to be grouped by day, week, month, or year + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2011-03-19T00:00:00.000Z", + "issues_closed": 3 + }, + { + "date": "2011-03-25T00:00:00.000Z", + "issues_closed": 6 + } + ] + """ + server.addTimeseries(ghtorrent.closed_issues, 'issues/closed') + + """ + @api {get} /:owner/:repo/timeseries/commits?group_by=:group_by Code Commits + @apiName code-commits + @apiGroup Growth-Maturity-Decline + @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/code-commits.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + @apiParam {String} group_by (Default to week) Allows for results to be grouped by day, week, month, or year + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2017-08-27T00:00:00.000Z", + "commits": 44 + }, + { + "date": "2017-08-20T00:00:00.000Z", + "commits": 98 + } + ] + """ + server.addTimeseries(ghtorrent.code_commits, 'commits') + + """ + @api {get} /:owner/:repo/timeseries/code_review_iteration Code Review Iteration + @apiName code-review-iteration + @apiGroup Growth-Maturity-Decline + @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/code-review-iteration.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2012-05-16T00:00:00.000Z", + "iterations": 2 + }, + { + "date": "2012-05-16T00:00:00.000Z", + "iterations": 1 + } + ] + """ + server.addTimeseries(ghtorrent.code_review_iteration, 'code_review_iteration') + + """ + @api {get} /:owner/:repo/timeseries/contribution_acceptance Contribution Acceptance + @apiName contribution-acceptance + @apiGroup Growth-Maturity-Decline + @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/contribution-acceptance.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2012-05-16T00:00:00.000Z", + "ratio": 1.1579 + }, + { + "date": "2012-05-20T00:00:00.000Z", + "ratio": 1.3929 + } + ] + """ + server.addTimeseries(ghtorrent.contribution_acceptance, 'contribution_acceptance') + + """ + @api {get} /:owner/:repo/timeseries/contributing_github_organizations Contributing Github Organizations + @apiName contributing-github-organizations + @apiGroup Growth-Maturity-Decline + @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/contributing-organizations.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "contributing_org": 4066, + "commits": 36069, + "issues": 432, + "commit_comments": 1597, + "issue_comments": 15421, + "pull_requests": 808, + "pull_request_comments": 0, + "total": 54327, + "count": 35 + }, + { + "contributing_org": 16465, + "commits": 39111, + "issues": 332, + "commit_comments": 524, + "issue_comments": 3188, + "pull_requests": 57, + "pull_request_comments": 18, + "total": 43230, + "count": 11 + } + ] + """ + server.addMetric(ghtorrent.contributing_github_organizations, 'contributing_github_organizations') + + """ + @api {get} /:owner/:repo/timeseries/issues/response_time First Response To Issue Duration + @apiName first-response-to-issue-duration + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/first-response-to-issue-duration.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "id": 2, + "opened": "2012-01-19T05:24:55.000Z", + "first_commented": "2012-01-19T05:30:13.000Z", + "pull_request": 0, + "minutes_to_comment": 5 + }, + { + "id": 3, + "opened": "2012-01-26T15:07:56.000Z", + "first_commented": "2012-01-26T15:09:28.000Z", + "pull_request": 0, + "minutes_to_comment": 1 + } + ] + """ + server.addTimeseries(ghtorrent.first_response_to_issue_duration, 'issues/response_time') + + """ + @api {get} /:owner/:repo/timeseries/forks?group_by=:group_by Forks + @apiName forks + @apiGroup Growth-Maturity-Decline + @apiParam {String} group_by (Default to week) Allows for results to be grouped by day, week, month, or year + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/forks.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2017-08-20T00:00:00.000Z", + "projects": 48 + }, + { + "date": "2017-08-13T00:00:00.000Z", + "projects": 53 + } + ] + """ + server.addTimeseries(ghtorrent.forks, 'forks') + + """ + @api {get} /:owner/:repo/pulls/maintainer_response_time Maintainer Response to Merge Request Duration + @apiName maintainer-response-to-merge-request-duration + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/OSSHealth/metrics/blob/master/activity-metrics/maintainer-response-to-merge-request-duration.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2011-05-10T00:00:00.000Z", + "days": 32 + }, + { + "date": "2011-05-21T00:00:00.000Z", + "days": 3 + } + ] + """ + server.addTimeseries(ghtorrent.maintainer_response_to_merge_request_duration, 'pulls/maintainer_response_time') + + """ + @api {get} /:owner/:repo/pulls/new_contributing_github_organizations New Contributing Github Organizations + @apiName new-github-contributing-organizations + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/OSSHealth/metrics/blob/master/activity-metrics/new-contributing-organizations.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2008-04-12T23:43:38.000Z", + "organizations": 1 + }, + { + "date": "2008-08-23T15:05:52.000Z", + "organizations": 2 + } + ] + """ + server.addTimeseries(ghtorrent.new_contributing_github_organizations, 'new_contributing_github_organizations') + + """ + @api {get} /:owner/:repo/timeseries/issues Open Issues + @apiName open-issues + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/open-issues.md">CHAOSS Metric Definition</a> + + @apiParam {string} group_by (default to week) allows for results to be grouped by day, week, month, or year + @apiParam {string} owner username of the owner of the github repository + @apiParam {string} repo name of the github repository + + @apiSucessExample {json} success-response: + [ + { + "date": "2017-08-27T00:00:00.000Z", + "issues": 67 + }, + { + "date": "2017-08-20T00:00:00.000Z", + "issues": 100 + } + ] + """ + server.addTimeseries(ghtorrent.open_issues, 'issues') + + """ + @api {get} /:owner/:repo/timeseries/pulls/comments?group_by=:group_by Pull Request Comments + @apiName pull-request-comments + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/pull-request-comments.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2011-11-15T00:00:00.000Z", + "counter": 3 + }, + { + "date": "2011-11-25T00:00:00.000Z", + "counter": 1 + } + ] + + """ + server.addTimeseries(ghtorrent.pull_request_comments, 'pulls/comments') + + """ + @api {get} /:owner/:repo/timeseries/pulls Pull Requests Open + @apiName pull-requests-open + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/pull-requests-open.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2013-01-09T00:00:00.000Z", + "pull_requests": 3 + }, + { + "date": "2016-01-14T00:00:00.000Z", + "pull_requests": 1 + } + ] + """ + server.addTimeseries(ghtorrent.pull_requests_open, 'pulls') + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + """ + @api {get} /:owner/:repo/timeseries/issue_comments Issue Comments + @apiName issue-comments + @apiGroup Activity + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/issue-comments.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2009-04-05T00:00:00.000Z", + "counter": 3 + }, + { + "date": "2009-04-16T00:00:00.000Z", + "counter": 5 + } + ] + """ + server.addTimeseries(ghtorrent.issue_comments, 'issue_comments') + + """ + @api {get} /:owner/:repo/timeseries/pulls/made_closed Pull Requests Made/Closed + @apiName pull-requests-made-closed + @apiGroup Activity + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/pull-requests-made-closed.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2010-09-11T00:00:00.000Z", + "rate": 0.3333 + }, + { + "date": "2010-09-13T00:00:00.000Z", + "rate": 0.3333 + } + ] + """ + server.addTimeseries(ghtorrent.pull_requests_made_closed, 'pulls/made_closed') + + """ + @api {get} /:owner/:repo/timeseries/watchers Watchers + @apiName watchers + @apiGroup Activity + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/activity-metrics-list.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2017-08-23T00:00:00.000Z", + "watchers": 86 + }, + { + "date": "2017-08-16T00:00:00.000Z", + "watchers": 113 + } + ] + """ + server.addTimeseries(ghtorrent.watchers, 'watchers') + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + """ + @api {get} /:owner/:repo/timeseries/commits100 Commits100 + @apiName commits100 + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2017-08-13T00:00:00.000Z", + "commits": 114 + }, + { + "date": "2017-08-06T00:00:00.000Z", + "commits": 113 + } + ] + """ + server.addTimeseries(ghtorrent.commits100, 'commits100') + + """ + @api {get} /:owner/:repo/timeseries/commits/comments Commit Comments + @apiName commit-comments + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2008-07-10T00:00:00.000Z", + "counter": 2 + }, + { + "date": "2008-07-25T00:00:00.000Z", + "counter": 1 + } + ] + + """ + server.addTimeseries(ghtorrent.commit_comments, 'commits/comments') + + """ + @api {get} /:owner/:repo/committer_locations Committer Locations + @apiName committer-locations + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "login": "rafaelfranca", + "location": "São Paulo, Brazil", + "commits": 7171 + }, + { + "login": "tenderlove", + "location": "Seattle", + "commits": 4605 + } + ] + """ + server.addMetric(ghtorrent.committer_locations, 'committer_locations') + + """ + @api {get} /:owner/:repo/timeseries/total_committers Total Committers + @apiName total-committers + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2004-11-24T00:00:00.000Z", + "total_total_committers": 1 + }, + { + "date": "2005-02-18T00:00:00.000Z", + "total_total_committers": 2 + } + ] + """ + server.addTimeseries(ghtorrent.total_committers, 'total_committers') + + """ + @api {get} /:owner/:repo/timeseries/issues/activity Issue Activity + @apiName issue-activity + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "0000-00-00", + "count": 2, + "action": "closed" + }, + { + "date": "0000-00-00", + "count": 70, + "action": "opened" + }, + { + "date": "0000-00-00", + "count": 0, + "action": "reopened" + }, + { + "date": "0000-00-00", + "count": 68, + "action": "open" + }, + { + "date": "2009-04-01T00:00:00.000Z", + "count": 0, + "action": "closed" + }, + { + "date": "2009-04-01T00:00:00.000Z", + "count": 29, + "action": "opened" + }, + { + "date": "2009-04-01T00:00:00.000Z", + "count": 0, + "action": "reopened" + }, + { + "date": "2009-04-01T00:00:00.000Z", + "count": 29, + "action": "open" + } + ] + """ + server.addTimeseries(ghtorrent.issue_activity, 'issues/activity') + + """ + @api {get} /:owner/:repo/timeseries/pulls/acceptance_rate Pull Request Acceptance Rate + @apiDeprecated This endpoint was removed. Please use (#Experimental:community-engagement) + @apiName pull-request-acceptance-rate + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2010-09-11T00:00:00.000Z", + "rate": 0.3333 + }, + { + "date": "2010-09-13T00:00:00.000Z", + "rate": 0.3333 + } + ] + """ + + """ + @api {get} /:owner/:repo/community_age Community Age + @apiName community-age + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "login": "bonnie", + "location": "Rowena, TX", + "commits": 12 + }, + { + "login":"clyde", + "location":"Ellis County, TX", + "commits": 12 + } + ] + """ + server.addMetric(ghtorrent.community_age, 'community_age') + + """ + @api {get} /:owner/:repo/timeseries/community_engagement Community Engagement + @apiName community-engagement + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2011-04-16T00:00:00.000Z", + "issues_opened": 0, + "issues_closed": 0, + "pull_requests_opened": 32, + "pull_requests_merged": 0, + "pull_requests_closed": 19, + "issues_opened_total": 4, + "issues_closed_total": 0, + "issues_closed_rate_this_window": null, + "issues_closed_rate_total": 0, + "issues_delta": 0, + "issues_open": 4, + "pull_requests_opened_total": 284, + "pull_requests_closed_total": 242, + "pull_requests_closed_rate_this_window": 0.59375, + "pull_requests_closed_rate_total": 0.8521126761, + "pull_requests_delta": 13, + "pull_requests_open": 42 + }, + { + "date": "2011-04-17T00:00:00.000Z", + "issues_opened": 0, + "issues_closed": 0, + "pull_requests_opened": 15, + "pull_requests_merged": 1, + "pull_requests_closed": 14, + "issues_opened_total": 4, + "issues_closed_total": 0, + "issues_closed_rate_this_window": null, + "issues_closed_rate_total": 0, + "issues_delta": 0, + "issues_open": 4, + "pull_requests_opened_total": 299, + "pull_requests_closed_total": 256, + "pull_requests_closed_rate_this_window": 0.9333333333, + "pull_requests_closed_rate_total": 0.856187291, + "pull_requests_delta": 1, + "pull_requests_open": 43 + } + ] + """ + server.addTimeseries(ghtorrent.community_engagement, 'community_engagement') + + """ + @api {get} /:owner/:repo/contributors Total Contributions by User + @apiName contributors + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "user": 8153, + "commits": 6825, + "issues": 127, + "commit_comments": 313, + "issue_comments": 13152, + "pull_requests": 1, + "pull_request_comments": 0, + "total": 20418 + }, + { + "user": 45381, + "commits": 2192, + "issues": 202, + "commit_comments": 130, + "issue_comments": 4633, + "pull_requests": 0, + "pull_request_comments": 0, + "total": 7157 + } + ] + """ + server.addMetric(ghtorrent.contributors, 'contributors') + + """ + @api {get} /:owner/:repo/timeseries/contributions Contributions + @apiName contributions + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + @apiParam (String) user Limit results to the given user's contributions + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2004-11-24T00:00:00.000Z", + "commits": 3, + "pull_requests": null, + "issues": null, + "commit_comments": null, + "pull_request_comments": null, + "issue_comments": null, + "total": null + }, + { + "date": "2004-11-30T00:00:00.000Z", + "commits": 7, + "pull_requests": null, + "issues": null, + "commit_comments": null, + "pull_request_comments": null, + "issue_comments": null, + "total": null + } + ] + """ + # ghtorrent.contributons, 'contributors' + # don't remove the above line it's for a script + @server.app.route('/{}/<owner>/<repo>/contributions'.format(server.api_version)) + def contributions(owner, repo): + repoid = ghtorrent.repoid(owner, repo) + user = request.args.get('user') + transformed_contributors = server.transform(ghtorrent.contributions, args=(owner, repo), orient=request.args.get('orient')) + return Response(response=transformed_contributors, + status=200, + mimetype="application/json") + server.updateMetricMetadata(ghtorrent.contributions, '/api/unstable/<owner>/<repo>/timeseries/contributions') + + """ + @api {get} /:owner/:repo/project_age Project Age + @apiName project-age + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2008-04-11T00:00:00.000Z", + "{0}": 1 + } + ] + + """ + server.addMetric(ghtorrent.project_age, 'project_age') + + """ + @api {get} /:owner/:repo/timeseries/fakes Fakes + @apiName fakes + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2010-04-09T00:00:00.000Z", + "fakes": 1 + }, + { + "date": "2010-04-27T00:00:00.000Z", + "fakes": 2 + } + ] + """ + server.addTimeseries(ghtorrent.fakes, 'fakes') + + """ + @api {get} /ghtorrent_range GHTorrent Date Range + @apiName GhtorrentRange + @apiGroup Utility + @apiDescription Utility endpoint to show the range of dates GHTorrent covers. + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2008-04-10T17:25:06-07:00", + "release": "v0.9.1" + }, + { + "date": "2008-04-10T17:25:07-07:00", + "release": "v0.9.2" + } + ] + """ + @server.app.route('/{}/ghtorrent_range'.format(server.api_version)) + + def ghtorrent_range(): + ghr = server.transform(ghtorrent.ghtorrent_range()) + return Response(response=ghr, + status=200, + mimetype="application/json") + # server.updateMetricMetadata(ghtorrent.ghtorrent_range, '/{}/ghtorrent_range'.format(server.api_version)) diff --git a/augur/routes/ghtorrentplus_routes.py b/augur/routes/ghtorrentplus_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/ghtorrentplus_routes.py @@ -0,0 +1,50 @@ +def create_routes(server): + + ghtorrentplus = server.augur_app.ghtorrentplus() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + """ + @api {get} /:owner/:repo/issue_close_time Closed Issue Resolution Duration + @apiName closed-issue-resolution-duration + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/issue-resolution-duration.md">CHAOSS Metric Definition</a> + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + @apiSuccessExample {json} Success-Response: + [ + { + "id": 2, + "date": "2012-01-19T05:24:55.000Z", + "days_to_close": 7 + }, + { + "id": 3, + "date": "2012-01-26T15:07:56.000Z", + "days_to_close": 0 + } + ] + """ + server.addMetric(ghtorrentplus.closed_issue_resolution_duration, 'issues/time_to_close') + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + ##################################### + ### EXPERIMENTAL ### + ##################################### diff --git a/augur/routes/githubapi_routes.py b/augur/routes/githubapi_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/githubapi_routes.py @@ -0,0 +1,117 @@ +def create_routes(server): + + github = server.augur_app.githubapi() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + """ + @api {get} /:owner/:repo/timeseries/lines_changed Lines of Code Changed + @apiName lines-of-code-changed + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/OSSHealth/metrics/blob/master/activity-metrics/lines-of-code-changed.md">CHAOSS Metric Definition</a> + + @apiGroup Growth-Maturity-Decline + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + 'date': '2015-11-01T00:00:00Z', + 'lines_changed': 396137.0 + }, + { + 'date': '2015-11-08T00:00:00Z', + 'lines_changed': 3896.0 + } + ] + """ + server.addTimeseries(github.lines_of_code_changed, 'lines_changed') + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + """ + @api {get} /:owner/:repo/bus_factor Bus Factor + @apiName bus-factor + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + @apiParam {Int} threshold Percentage used to determine how many lost people would kill the project + + @apiSuccessExample {json} Success-Response: + [ + { + "best": "5", + "worst": "1" + } + ] + """ + server.addMetric(github.bus_factor, "bus_factor") + + """ + @api {get} /:owner/:repo/timeseries/tags/major Major Tags + @apiName major-tags + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2008-04-10T17:25:14-07:00", + "release": "v1.0.0" + }, + { + "date": "2008-04-10T17:25:47-07:00", + "release": "v2.0.0" + } + ] + """ + server.addTimeseries(github.major_tags, 'tags/major') + + """ + @api {get} /:owner/:repo/timeseries/tags/major Tages + @apiName tags + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2008-04-10T17:25:06-07:00", + "release": "v0.9.1" + }, + { + "date": "2008-04-10T17:25:07-07:00", + "release": "v0.9.2" + } + ] + """ + server.addTimeseries(github.tags, 'tags') \ No newline at end of file diff --git a/augur/routes/librariesio_routes.py b/augur/routes/librariesio_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/librariesio_routes.py @@ -0,0 +1,196 @@ +def create_routes(server): + + librariesio = server.augur_app.librariesio() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + """ + @api {get} /:owner/:repo/dependencies Dependencies + @apiName dependencies + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + "full_name": "rails/rails", + "description": "Ruby on Rails", + "fork": false, + "created_at": "2008-04-11T02:19:47.000Z", + "updated_at": "2018-05-08T14:18:07.000Z", + "pushed_at": "2018-05-08T11:38:30.000Z", + "homepage": "http://rubyonrails.org", + "size": 163747, + "stargazers_count": 39549, + "language": "Ruby", + "has_issues": true, + "has_wiki": false, + "has_pages": false, + "forks_count": 16008, + "mirror_url": null, + "open_issues_count": 1079, + "default_branch": "master", + "subscribers_count": 2618, + "uuid": "8514", + "source_name": null, + "license": "MIT", + "private": false, + "contributions_count": 2627, + "has_readme": "README.md", + "has_changelog": null, + "has_contributing": "CONTRIBUTING.md", + "has_license": "MIT-LICENSE", + "has_coc": "CODE_OF_CONDUCT.md", + "has_threat_model": null, + "has_audit": null, + "status": null, + "last_synced_at": "2018-03-31T12:40:28.163Z", + "rank": 28, + "host_type": "GitHub", + "host_domain": null, + "name": null, + "scm": "git", + "fork_policy": null, + "github_id": "8514", + "pull_requests_enabled": null, + "logo_url": null, + "github_contributions_count": 2627, + "keywords": [ + "activejob", + "activerecord", + "framework", + "html", + "mvc", + "rails", + "ruby" + ], + "dependencies": [ + { + "project_name": "blade-sauce_labs_plugin", + "name": "blade-sauce_labs_plugin", + "platform": "rubygems", + "requirements": "0.7.2", + "latest_stable": "0.7.3", + "latest": "0.7.3", + "deprecated": false, + "outdated": true, + "filepath": "Gemfile.lock", + "kind": "runtime" + }, + { + "project_name": "blade-qunit_adapter", + "name": "blade-qunit_adapter", + "platform": "rubygems", + "requirements": "2.0.1", + "latest_stable": "2.0.1", + "latest": "2.0.1", + "deprecated": false, + "outdated": false, + "filepath": "Gemfile.lock", + "kind": "runtime" + } + ] + """ + server.addMetric(librariesio.dependencies, 'dependencies') + + """ + @api {get} /:owner/:repo/dependency_stats Dependency Stats + @apiName dependency-stats + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "dependencies": "10", + "dependent_projects": "10.6K", + "dependent_repositories": "392K" + } + ] + """ + server.addMetric(librariesio.dependency_stats, 'dependency_stats') + + """ + @api {get} /:owner/:repo/dependents Dependents + @apiName dependents + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "name": "rspec-rails", + "platform": "Rubygems", + "description": "rspec-rails is a testing framework for Rails 3+.", + "homepage": "https://github.com/rspec/rspec-rails", + "repository_url": "https://github.com/rspec/rspec-rails", + "normalized_licenses": [ + "MIT" + ], + "rank": 26, + "latest_release_published_at": "2017-11-20T09:27:22.144Z", + "latest_release_number": "3.7.2", + "language": "Ruby", + "status": null, + "package_manager_url": "https://rubygems.org/gems/rspec-rails", + "stars": 3666, + "forks": 732, + "keywords": [], + "latest_stable_release": { + "id": 11315605, + "project_id": 245284, + "number": "3.7.2", + "published_at": "2017-11-20T09:27:22.144Z", + "created_at": "2017-11-20T09:31:11.532Z", + "updated_at": "2017-11-20T09:31:11.532Z", + "runtime_dependencies_count": 7 + }, + "latest_download_url": "https://rubygems.org/downloads/rspec-rails-3.7.2.gem", + "dependents_count": 4116, + "dependent_repos_count": 129847, + "versions": [ + { + "number": "2.12.2", + "published_at": "2013-01-12T18:56:40.027Z" + }, + { + "number": "2.12.1", + "published_at": "2013-01-07T23:04:53.104Z" + }, + { + "number": "2.12.0", + "published_at": "2012-11-13T03:37:01.354Z" + } + ] + """ + server.addMetric(librariesio.dependents, 'dependents') diff --git a/augur/runtime.py b/augur/runtime.py --- a/augur/runtime.py +++ b/augur/runtime.py @@ -1,3 +1,8 @@ +#SPDX-License-Identifier: MIT +""" +Runs Augur with Gunicorn when called +""" + import multiprocessing as mp import sched import os @@ -10,11 +15,14 @@ from gunicorn.six import iteritems from gunicorn.arbiter import Arbiter + + class AugurGunicornApp(gunicorn.app.base.BaseApplication): def __init__(self, options=None): self.options = options or {} super(AugurGunicornApp, self).__init__() + # self.cfg.pre_request.set(pre_request) def load_config(self): config = dict([(key, value) for key, value in iteritems(self.options) @@ -27,6 +35,7 @@ def load(self): return server.app def run(): + mp.set_start_method('forkserver') app = augur.Application() app.arg_parser.add_argument("-u", "--updater", action="store_true", @@ -47,7 +56,6 @@ def exit(): os._exit(0) - if not args.updater: host = app.read_config('Server', 'host', 'AUGUR_HOST', '0.0.0.0') port = app.read_config('Server', 'port', 'AUGUR_PORT', '5000') @@ -55,7 +63,8 @@ def exit(): options = { 'bind': '%s:%s' % (host, port), 'workers': workers, - 'accesslog': '-' + 'accesslog': '-', + 'access_log_format': '%(h)s - %(t)s - %(r)s', } logger.info('Starting server...') master = Arbiter(AugurGunicornApp(options)).run() @@ -67,5 +76,4 @@ def exit(): exit() if __name__ == '__main__': - mp.set_start_method('forkserver') run() diff --git a/augur/server.py b/augur/server.py --- a/augur/server.py +++ b/augur/server.py @@ -1,27 +1,26 @@ -#spdx-license-identifier: mit +#SPDX-License-Identifier: MIT +""" +Creates a WSGI server that serves the Augur REST API +""" import os import sys import json +import re +import html from flask import Flask, request, Response, send_from_directory from flask_cors import CORS import pandas as pd import augur -from augur.util import annotate, metrics - -sys.path.append('..') +from augur.util import annotate, metric_metadata, logger +from augur.routes import create_all_datasource_routes, create_status_routes AUGUR_API_VERSION = 'api/unstable' -''' -make a try and accept condition -if its open the GH_DATA_CONFIG_FILE and then its open in read mode -and if the file does't open the it print Couldn\'t open config file, attempting to create. -''' - class Server(object): def __init__(self): # Create Flask application self.app = Flask(__name__) + self.api_version = AUGUR_API_VERSION app = self.app CORS(app) @@ -34,1228 +33,137 @@ def __init__(self): self.cache = augur_app.cache.get_cache('server', expire=expire) self.cache.clear() - # Initalize all of the classes - ghtorrent = augur_app.ghtorrent() - ghtorrentplus = augur_app.ghtorrentplus() - publicwww = augur_app.publicwww() - git = augur_app.git() - github = augur_app.githubapi() - librariesio = augur_app.librariesio() - downloads = augur_app.downloads() - localcsv = augur_app.localcsv() + self.show_metadata = False + create_all_datasource_routes(self) + + # this needs to be the last route creation function called so that all the metrics have their metadata updated + create_status_routes(self) ##################################### - ### API STATUS ### + ### UTILITY ### ##################################### - @app.route('/{}/'.format(AUGUR_API_VERSION)) + @app.route('/{}/'.format(self.api_version)) def status(): status = { - 'status': 'OK', - 'avaliable_metrics': metrics + 'status': 'OK' } - json = self.transform(status) - return Response(response=json, + return Response(response=json.dumps(status), status=200, mimetype="application/json") - - ##################################### - ### DIVERSITY AND INCLUSION ### - ##################################### - - - ##################################### - ### GROWTH, MATURITY, AND DECLINE ### - ##################################### - - """ - @api {get} /:owner/:repo/timeseries/issues/closed Closed Issues - @apiName ClosedIssues - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/issues-closed.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - @apiParam {string} group_by (default to week) allows for results to be grouped by day, week, month, or year - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2011-03-19T00:00:00.000Z", - "issues_closed": 3 - }, - { - "date": "2011-03-25T00:00:00.000Z", - "issues_closed": 6 - } - ] - """ - self.addTimeseries(ghtorrent.closed_issues, "issues/closed") - - """ - @api {get} /:owner/:repo/issue_close_time Issue Resolution Duration - @apiName IssueResolutionDuration - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/issue-resolution-duration.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "id": 2, - "date": "2012-01-19T05:24:55.000Z", - "days_to_close": 7 - }, - { - "id": 3, - "date": "2012-01-26T15:07:56.000Z", - "days_to_close": 0 - } - ] - """ - self.addMetric(ghtorrentplus.closed_issue_resolution_duration, 'issues/time_to_close') - - """ - @api {get} /:owner/:repo/timeseries/commits?group_by=:group_by Code Commits - @apiName CodeCommits - @apiGroup Growth-Maturity-Decline - @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/code-commits.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - @apiParam {String} group_by (Default to week) Allows for results to be grouped by day, week, month, or year - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2017-08-27T00:00:00.000Z", - "commits": 44 - }, - { - "date": "2017-08-20T00:00:00.000Z", - "commits": 98 - } - ] - """ - self.addTimeseries(ghtorrent.code_commits, 'commits') - - # self.addTimeseries(github.code_reviews, 'code_reviews') - - """ - @api {get} /:owner/:repo/timeseries/code_review_iteration Code Review Iteration - @apiName CodeReviewIteration - @apiGroup Growth-Maturity-Decline - @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/code-review-iteration.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2012-05-16T00:00:00.000Z", - "iterations": 2 - }, - { - "date": "2012-05-16T00:00:00.000Z", - "iterations": 1 - } - ] - """ - self.addTimeseries(ghtorrent.code_review_iteration, 'code_review_iteration') - - """ - @api {get} /:owner/:repo/timeseries/contribution_acceptance Contribution Acceptance - @apiName ContributionAcceptance - @apiGroup Growth-Maturity-Decline - @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/contribution-acceptance.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2012-05-16T00:00:00.000Z", - "ratio": 1.1579 - }, - { - "date": "2012-05-20T00:00:00.000Z", - "ratio": 1.3929 - } - ] - """ - self.addTimeseries(ghtorrent.contribution_acceptance, 'contribution_acceptance') - - """ - @api {get} /:owner/:repo/timeseries/contributing_github_organizations Contributing Github Organizations - @apiName ContributingGithubOrganizations - @apiGroup Growth-Maturity-Decline - @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/contributing-organizations.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "contributing_org": 4066, - "commits": 36069, - "issues": 432, - "commit_comments": 1597, - "issue_comments": 15421, - "pull_requests": 808, - "pull_request_comments": 0, - "total": 54327, - "count": 35 - }, - { - "contributing_org": 16465, - "commits": 39111, - "issues": 332, - "commit_comments": 524, - "issue_comments": 3188, - "pull_requests": 57, - "pull_request_comments": 18, - "total": 43230, - "count": 11 - } - ] - """ - self.addMetric(ghtorrent.contributing_github_organizations, 'contributing_github_organizations') - - """ - @api {get} /:owner/:repo/timeseries/issues/response_time First Response To Issue Duration - @apiName FirstResponseToIssueDuration - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/first-response-to-issue-duration.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "id": 2, - "opened": "2012-01-19T05:24:55.000Z", - "first_commented": "2012-01-19T05:30:13.000Z", - "pull_request": 0, - "minutes_to_comment": 5 - }, - { - "id": 3, - "opened": "2012-01-26T15:07:56.000Z", - "first_commented": "2012-01-26T15:09:28.000Z", - "pull_request": 0, - "minutes_to_comment": 1 - } - ] - """ - self.addTimeseries(ghtorrent.first_response_to_issue_duration, 'issues/response_time') - - """ - @api {get} /:owner/:repo/timeseries/forks?group_by=:group_by Forks - @apiName Forks - @apiGroup Growth-Maturity-Decline - @apiParam {String} group_by (Default to week) Allows for results to be grouped by day, week, month, or year - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/forks.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2017-08-20T00:00:00.000Z", - "projects": 48 - }, - { - "date": "2017-08-13T00:00:00.000Z", - "projects": 53 - } - ] - """ - self.addTimeseries(ghtorrent.forks, 'forks') - - """ - @api {get} /:owner/:repo/timeseries/lines_changed Lines of Code Changed - @apiName LinesOfCodeChanged - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/OSSHealth/metrics/blob/master/activity-metrics/lines-of-code-changed.md">CHAOSS Metric Definition</a> - - @apiGroup Growth-Maturity-Decline - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - 'date': '2015-11-01T00:00:00Z', - 'lines_changed': 396137.0 - }, - { - 'date': '2015-11-08T00:00:00Z', - 'lines_changed': 3896.0 - } - ] - """ - self.addTimeseries(github.lines_of_code_changed, 'lines_changed') - - """ - @api {get} /:owner/:repo/pulls/maintainer_response_time Maintainer to Merge Request Duration - @apiName MaintainerToMergeRequestDuration - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/OSSHealth/metrics/blob/master/activity-metrics/maintainer-response-to-merge-request-duration.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2011-05-10T00:00:00.000Z", - "days": 32 - }, - { - "date": "2011-05-21T00:00:00.000Z", - "days": 3 - } - ] - """ - self.addTimeseries(ghtorrent.maintainer_response_to_merge_request_duration, 'pulls/maintainer_response_time') - - """ - @api {get} /:owner/:repo/pulls/new_contributing_github_organizations New Contributing Github Organizations - @apiName NewContributingGithubOrganizations - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/OSSHealth/metrics/blob/master/activity-metrics/new-contributing-organizations.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2008-04-12T23:43:38.000Z", - "organizations": 1 - }, - { - "date": "2008-08-23T15:05:52.000Z", - "organizations": 2 - } - ] - """ - self.addTimeseries(ghtorrent.new_contributing_github_organizations, 'new_contributing_github_organizations') - - """ - @api {get} /:owner/:repo/timeseries/issues Open Issues - @apiName OpenIssues - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/open-issues.md">CHAOSS Metric Definition</a> - - @apiParam {string} group_by (default to week) allows for results to be grouped by day, week, month, or year - @apiParam {string} owner username of the owner of the github repository - @apiParam {string} repo name of the github repository - - @apiSucessExample {json} success-response: - [ - { - "date": "2017-08-27T00:00:00.000Z", - "issues": 67 - }, - { - "date": "2017-08-20T00:00:00.000Z", - "issues": 100 - } - ] - """ - self.addTimeseries(ghtorrent.open_issues, 'issues') - - """ - @api {get} /:owner/:repo/timeseries/pulls/comments?group_by=:group_by Pull Request Comments - @apiName PullRequestComments - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/pull-request-comments.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2011-11-15T00:00:00.000Z", - "counter": 3 - }, - { - "date": "2011-11-25T00:00:00.000Z", - "counter": 1 - } - ] - """ - self.addTimeseries(ghtorrent.pull_request_comments, 'pulls/comments') - - """ - @api {get} /:owner/:repo/timeseries/pulls Pull Requests Open - @apiName PullRequestsOpen - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/pull-requests-open.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2013-01-09T00:00:00.000Z", - "pull_requests": 3 - }, - { - "date": "2016-01-14T00:00:00.000Z", - "pull_requests": 1 - } - ] + @api {post} /batch Batch Requests + @apiName Batch + @apiGroup Batch + @apiDescription Returns results of batch requests + POST JSON of api requests """ - self.addTimeseries(ghtorrent.pull_requests_open, 'pulls') - - - ##################################### - ### RISK ### - ##################################### + @app.route('/{}/batch'.format(self.api_version), methods=['GET', 'POST']) + def batch(): + """ + Execute multiple requests, submitted as a batch. + :statuscode 207: Multi status + """ + """ + to have on future batch request for each individual chart: - ##################################### - ### VALUE ### - ##################################### + - timeseries/metric + - props that are in current card files (title) + - do any of these things act like the vuex states? + - what would singular card(dashboard) look like now? - ##################################### - ### ACTIVITY ### - ##################################### - """ - @api {get} /:owner/:repo/timeseries/issue_comments Issue Comments - @apiName IssueComments - @apiGroup Activity - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/issue-comments.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2009-04-05T00:00:00.000Z", - "counter": 3 - }, - { - "date": "2009-04-16T00:00:00.000Z", - "counter": 5 - } - ] - """ - self.addTimeseries(ghtorrent.issue_comments, 'issue/comments') + """ - """ - @api {get} /:owner/:repo/watchers Watchers - @apiName Watchers - @apiGroup Activity - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/activity-metrics-list.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2017-08-23T00:00:00.000Z", - "watchers": 86 - }, - { - "date": "2017-08-16T00:00:00.000Z", - "watchers": 113 - } - ] - """ - self.addMetric(ghtorrent.watchers, 'watchers') + self.show_metadata = False - ##################################### - ### EXPERIMENTAL ### - ##################################### + if request.method == 'GET': + """this will return sensible defaults in the future""" + return app.make_response('{"status": "501", "response": "Defaults for batch requests not implemented. Please POST a JSON array of requests to this endpoint for now."}') - ### COMMIT RELATED ### - """ - @api {get} /:owner/:repo/timeseries/commits100 Commits100 - @apiName Commits100 - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2017-08-13T00:00:00.000Z", - "commits": 114 - }, - { - "date": "2017-08-06T00:00:00.000Z", - "commits": 113 - } - ] - """ - self.addTimeseries(ghtorrent.commits100, 'commits100') + try: + requests = json.loads(request.data) + except ValueError as e: + request.abort(400) - """ - @api {get} /:owner/:repo/timeseries/commits/comments Commit Comments - @apiName CommitComments - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2008-07-10T00:00:00.000Z", - "counter": 2 - }, - { - "date": "2008-07-25T00:00:00.000Z", - "counter": 1 - } - ] + responses = [] - """ - self.addTimeseries(ghtorrent.commit_comments, 'commits/comments') + for index, req in enumerate(requests): - """ - @api {get} /:owner/:repo/committer_locations Committer Locations - @apiName CommitterLocations - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "login": "rafaelfranca", - "location": "São Paulo, Brazil", - "commits": 7171 - }, - { - "login": "tenderlove", - "location": "Seattle", - "commits": 4605 - } - ] - """ - self.addMetric(ghtorrent.committer_locations, 'committer_locations') - """ - @api {get} /:owner/:repo/timeseries/total_committers Total Committers - @apiName TotalCommitters - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2004-11-24T00:00:00.000Z", - "total_total_committers": 1 - }, - { - "date": "2005-02-18T00:00:00.000Z", - "total_total_committers": 2 - } - ] - """ - self.addTimeseries(ghtorrent.total_committers, 'total_committers') + method = req['method'] + path = req['path'] + body = req.get('body', None) - ### ISSUE RELATED ### - """ - @api {get} /:owner/:repo/timeseries/issues/activity Issue Activity - @apiName IssueActivity - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "0000-00-00", - "count": 2, - "action": "closed" - }, - { - "date": "0000-00-00", - "count": 70, - "action": "opened" - }, - { - "date": "0000-00-00", - "count": 0, - "action": "reopened" - }, - { - "date": "0000-00-00", - "count": 68, - "action": "open" - }, - { - "date": "2009-04-01T00:00:00.000Z", - "count": 0, - "action": "closed" - }, - { - "date": "2009-04-01T00:00:00.000Z", - "count": 29, - "action": "opened" - }, - { - "date": "2009-04-01T00:00:00.000Z", - "count": 0, - "action": "reopened" - }, - { - "date": "2009-04-01T00:00:00.000Z", - "count": 29, - "action": "open" - } - ] - """ - self.addTimeseries(ghtorrent.issue_activity, 'issues/activity') + try: - # PULL REQUEST RELATED - """ - @api {get} /:owner/:repo/timeseries/pulls/acceptance_rate Pull Request Acceptance Rate - @apiName PullRequestAcceptanceRate - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2010-09-11T00:00:00.000Z", - "rate": 0.3333 - }, - { - "date": "2010-09-13T00:00:00.000Z", - "rate": 0.3333 - } - ] - """ - self.addTimeseries(ghtorrent.pull_request_acceptance_rate, 'pulls/acceptance_rate') + logger.debug('batch-internal-loop: %s %s' % (method, path)) - # COMMUNITY / CONTRIBUTIONS - """ - @api {get} /:owner/:repo/timeseries/community_age Community Age - @apiName CommunityAge - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "login": "bonnie", - "location": "Rowena, TX", - "commits": 12 - }, - { - "login":"clyde", - "location":"Ellis County, TX", - "commits": 12 - } - ] - """ - self.addMetric(ghtorrent.community_age, 'community_age') + with app.app_context(): + with app.test_request_context(path, + method=method, + data=body): + try: + # Can modify flask.g here without affecting + # flask.g of the root request for the batch - """ - @api {get} /:owner/:repo/timeseries/community_engagement Community Engagement - @apiName CommunityEngagement - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2011-04-16T00:00:00.000Z", - "issues_opened": 0, - "issues_closed": 0, - "pull_requests_opened": 32, - "pull_requests_merged": 0, - "pull_requests_closed": 19, - "issues_opened_total": 4, - "issues_closed_total": 0, - "issues_closed_rate_this_window": null, - "issues_closed_rate_total": 0, - "issues_delta": 0, - "issues_open": 4, - "pull_requests_opened_total": 284, - "pull_requests_closed_total": 242, - "pull_requests_closed_rate_this_window": 0.59375, - "pull_requests_closed_rate_total": 0.8521126761, - "pull_requests_delta": 13, - "pull_requests_open": 42 - }, - { - "date": "2011-04-17T00:00:00.000Z", - "issues_opened": 0, - "issues_closed": 0, - "pull_requests_opened": 15, - "pull_requests_merged": 1, - "pull_requests_closed": 14, - "issues_opened_total": 4, - "issues_closed_total": 0, - "issues_closed_rate_this_window": null, - "issues_closed_rate_total": 0, - "issues_delta": 0, - "issues_open": 4, - "pull_requests_opened_total": 299, - "pull_requests_closed_total": 256, - "pull_requests_closed_rate_this_window": 0.9333333333, - "pull_requests_closed_rate_total": 0.856187291, - "pull_requests_delta": 1, - "pull_requests_open": 43 - } - ] - """ - self.addTimeseries(ghtorrent.community_engagement, 'community_engagement') + # Pre process Request + rv = app.preprocess_request() - """ - @api {get} /:owner/:repo/contributors Total Contributions by User - @apiName TotalContributions - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "user": 8153, - "commits": 6825, - "issues": 127, - "commit_comments": 313, - "issue_comments": 13152, - "pull_requests": 1, - "pull_request_comments": 0, - "total": 20418 - }, - { - "user": 45381, - "commits": 2192, - "issues": 202, - "commit_comments": 130, - "issue_comments": 4633, - "pull_requests": 0, - "pull_request_comments": 0, - "total": 7157 - } - ] - """ - self.addMetric(ghtorrent.contributors, 'contributors') + if rv is None: + # Main Dispatch + rv = app.dispatch_request() - """ - @api {get} /:owner/:repo/timeseries/contributions Contributions - @apiName ContributionsByWeek - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - @apiParam (String) user Limit results to the given user's contributions - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2004-11-24T00:00:00.000Z", - "commits": 3, - "pull_requests": null, - "issues": null, - "commit_comments": null, - "pull_request_comments": null, - "issue_comments": null, - "total": null - }, - { - "date": "2004-11-30T00:00:00.000Z", - "commits": 7, - "pull_requests": null, - "issues": null, - "commit_comments": null, - "pull_request_comments": null, - "issue_comments": null, - "total": null - } - ] - """ - @app.route('/{}/<owner>/<repo>/contributions'.format(AUGUR_API_VERSION)) - def contributions(owner, repo): - repoid = ghtorrent.repoid(owner, repo) - user = request.args.get('user') - contribs = ghtorrent.contributions(owner, repo) - transformed_contributors = self.transform(contribs, orient=request.args.get('orient')) - return Response(response=transformed_contributors, - status=200, - mimetype="application/json") + except Exception as e: + rv = app.handle_user_exception(e) - """ - @api {get} /:owner/:repo/timeseries/project_age Project Age - @apiName ProjectAge - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2008-04-11T00:00:00.000Z", - "{0}": 1 - } - ] - - """ - self.addMetric(ghtorrent.project_age, 'project_age') + response = app.make_response(rv) - ### DEPENDENCY RELATED ### - """ - @api {get} /:owner/:repo/dependencies Dependencies - @apiName Dependencies - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - "full_name": "rails/rails", - "description": "Ruby on Rails", - "fork": false, - "created_at": "2008-04-11T02:19:47.000Z", - "updated_at": "2018-05-08T14:18:07.000Z", - "pushed_at": "2018-05-08T11:38:30.000Z", - "homepage": "http://rubyonrails.org", - "size": 163747, - "stargazers_count": 39549, - "language": "Ruby", - "has_issues": true, - "has_wiki": false, - "has_pages": false, - "forks_count": 16008, - "mirror_url": null, - "open_issues_count": 1079, - "default_branch": "master", - "subscribers_count": 2618, - "uuid": "8514", - "source_name": null, - "license": "MIT", - "private": false, - "contributions_count": 2627, - "has_readme": "README.md", - "has_changelog": null, - "has_contributing": "CONTRIBUTING.md", - "has_license": "MIT-LICENSE", - "has_coc": "CODE_OF_CONDUCT.md", - "has_threat_model": null, - "has_audit": null, - "status": null, - "last_synced_at": "2018-03-31T12:40:28.163Z", - "rank": 28, - "host_type": "GitHub", - "host_domain": null, - "name": null, - "scm": "git", - "fork_policy": null, - "github_id": "8514", - "pull_requests_enabled": null, - "logo_url": null, - "github_contributions_count": 2627, - "keywords": [ - "activejob", - "activerecord", - "framework", - "html", - "mvc", - "rails", - "ruby" - ], - "dependencies": [ - { - "project_name": "blade-sauce_labs_plugin", - "name": "blade-sauce_labs_plugin", - "platform": "rubygems", - "requirements": "0.7.2", - "latest_stable": "0.7.3", - "latest": "0.7.3", - "deprecated": false, - "outdated": true, - "filepath": "Gemfile.lock", - "kind": "runtime" - }, - { - "project_name": "blade-qunit_adapter", - "name": "blade-qunit_adapter", - "platform": "rubygems", - "requirements": "2.0.1", - "latest_stable": "2.0.1", - "latest": "2.0.1", - "deprecated": false, - "outdated": false, - "filepath": "Gemfile.lock", - "kind": "runtime" - } - ] - """ - self.addMetric(librariesio.dependencies, 'dependencies') - - """ - @api {get} /:owner/:repo/dependency_stats Dependency Stats - @apiName DependencyStats - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "dependencies": "10", - "dependent_projects": "10.6K", - "dependent_repositories": "392K" - } - ] - """ - self.addMetric(librariesio.dependency_stats, 'dependency_stats') + # Post process Request + response = app.process_response(response) - """ - @api {get} /:owner/:repo/dependents Dependents - @apiName Dependents - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "name": "rspec-rails", - "platform": "Rubygems", - "description": "rspec-rails is a testing framework for Rails 3+.", - "homepage": "https://github.com/rspec/rspec-rails", - "repository_url": "https://github.com/rspec/rspec-rails", - "normalized_licenses": [ - "MIT" - ], - "rank": 26, - "latest_release_published_at": "2017-11-20T09:27:22.144Z", - "latest_release_number": "3.7.2", - "language": "Ruby", - "status": null, - "package_manager_url": "https://rubygems.org/gems/rspec-rails", - "stars": 3666, - "forks": 732, - "keywords": [], - "latest_stable_release": { - "id": 11315605, - "project_id": 245284, - "number": "3.7.2", - "published_at": "2017-11-20T09:27:22.144Z", - "created_at": "2017-11-20T09:31:11.532Z", - "updated_at": "2017-11-20T09:31:11.532Z", - "runtime_dependencies_count": 7 - }, - "latest_download_url": "https://rubygems.org/downloads/rspec-rails-3.7.2.gem", - "dependents_count": 4116, - "dependent_repos_count": 129847, - "versions": [ - { - "number": "2.12.2", - "published_at": "2013-01-12T18:56:40.027Z" - }, - { - "number": "2.12.1", - "published_at": "2013-01-07T23:04:53.104Z" - }, - { - "number": "2.12.0", - "published_at": "2012-11-13T03:37:01.354Z" - } - ] - """ - self.addMetric(librariesio.dependents, 'dependents') + # Response is a Flask response object. + # _read_response(response) reads response.response + # and returns a string. If your endpoints return JSON object, + # this string would be the response as a JSON string. + responses.append({ + "path": path, + "status": response.status_code, + "response": str(response.get_data(), 'utf8'), + }) - ### OTHER ### - """ - @api {get} /:owner/:repo/bus_factor Bus Factor - @apiName BusFactor - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "best": "5", - "worst": "1" - } - ] - """ - self.addMetric(github.bus_factor, "bus_factor") + except Exception as e: - """ - @api {get} /git/lines_changed/:git_repo_url Lines Changed by Author - @apiName ChangesByAuthor - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "additions":2, - "author_date":"2018-05-14 10:09:57 -0500", - "author_email":"[email protected]", - "author_name":"Sean P. Goggins", - "commit_date":"2018-05-16 10:12:22 -0500", - "committer_email":"[email protected]", - "committer_name":"Derek Howard", - "deletions":0,"hash":"77e603a", - "message":"merge dev", - "parents":"b8ec0ed" - } - ] - """ - self.addGitMetric(git.changes_by_author, 'changes_by_author') + responses.append({ + "path": path, + "status": 500, + "response": str(e) + }) - """ - @api {get} /:owner/:repo/timeseries/downloads Downloads - @apiName Downloads - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2018-06-14", - "downloads": 129148 - }, - { - "date": "2018-06-13", - "downloads": 131262 - } - ] - """ - self.addTimeseries(downloads.downloads, 'downloads') - @app.route('/{}/git/repos'.format(AUGUR_API_VERSION)) - def downloaded_repos(): - drs = self.transform(git.downloaded_repos()) - return Response(response=drs, - status=200, + return Response(response=json.dumps(responses), + status=207, mimetype="application/json") - """ - @api {get} /:owner/:repo/timeseries/fakes Fakes - @apiName Fakes - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2010-04-09T00:00:00.000Z", - "fakes": 1 - }, - { - "date": "2010-04-27T00:00:00.000Z", - "fakes": 2 - } - ] - """ - self.addTimeseries(ghtorrent.fakes, 'fakes') - - """ - @api {get} /git/lines_changed/:git_repo_url Lines Changed (minus whitespace) - @apiName LinesChanged - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "additions":2, - "author_date":"2018-05-14 10:09:57 -0500", - "author_email":"[email protected]", - "author_name":"Sean P. Goggins", - "commit_date":"2018-05-16 10:12:22 -0500", - "committer_email":"[email protected]", - "committer_name":"Derek Howard", - "deletions":0, - "hash":"77e603a", - "message":"merge dev", - "parents":"b8ec0ed" - } - ] - """ - self.addGitMetric(git.lines_changed_minus_whitespace, 'lines_changed') - - """ - @api {get} /:owner/:repo/linking_websites Linking Websites - @apiName LinkingWebsites - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "url": "missouri.edu", - "rank": "1" - }, - { - "url": "unomaha.edu", - "rank": "2" - } - ] - """ - self.addMetric(publicwww.linking_websites, 'linking_websites') - - """ - @api {get} /:owner/:repo/timeseries/tags/major Major Tags - @apiName MajorTags - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2008-04-10T17:25:14-07:00", - "release": "v1.0.0" - }, - { - "date": "2008-04-10T17:25:47-07:00", - "release": "v2.0.0" - } - ] - """ - self.addTimeseries(github.major_tags, 'tags/major') - - """ - @api {get} /:owner/:repo/timeseries/tags/major Tages - @apiName Tags - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2008-04-10T17:25:06-07:00", - "release": "v0.9.1" - }, - { - "date": "2008-04-10T17:25:07-07:00", - "release": "v0.9.2" - } - ] - """ - self.addTimeseries(github.tags, 'tags') """ - @api {get} /ghtorrent_range GHTorrent Date Range - @apiName GhtorrentRange - @apiGroup Utility - @apiDescription Utility endpoint to show the range of dates GHTorrent covers. - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2008-04-10T17:25:06-07:00", - "release": "v0.9.1" - }, - { - "date": "2008-04-10T17:25:07-07:00", - "release": "v0.9.2" - } - ] + @api {post} /batch Batch Request Metadata + @apiName BatchMetadata + @apiGroup Batch + @apiDescription Returns metadata of batch requests + POST JSON of API requests metadata """ - @app.route('/{}/ghtorrent_range'.format(AUGUR_API_VERSION)) - - def ghtorrent_range(): - ghtorrent_range = self.transform(ghtorrent.ghtorrent_range()) - return Response(response=ghtorrent_range, - status=200, - mimetype="application/json") + @app.route('/{}/batch/metadata'.format(self.api_version), methods=['GET', 'POST']) + def batch_metadata(): - ####################### - # Batch Requests # - ####################### + self.show_metadata = True - """ - @api {post} /batch Batch Requests - @apiName Batch - @apiGroup Batch - @apiDescription Returns results of batch requests - POST JSON of api requests - """ - #TODO: documentation - @app.route('/{}/batch'.format(AUGUR_API_VERSION), methods=['GET', 'POST']) - def batch(): - """ - Execute multiple requests, submitted as a batch. - :statuscode 207: Multi status - """ if request.method == 'GET': """this will return sensible defaults in the future""" - return app.make_response('{"status": "501", "response": "Defaults for batch requests not implemented. Please POST a JSON array of requests to this endpoint for now."}') + return app.make_response(json.dumps(metric_metadata)) try: requests = json.loads(request.data) @@ -1266,13 +174,14 @@ def batch(): for index, req in enumerate(requests): - method = req['method'] path = req['path'] body = req.get('body', None) try: + augur.logger.info('batch endpoint: ' + path) + with app.app_context(): with app.test_request_context(path, method=method, @@ -1300,10 +209,11 @@ def batch(): # _read_response(response) reads response.response # and returns a string. If your endpoints return JSON object, # this string would be the response as a JSON string. + responses.append({ "path": path, "status": response.status_code, - "response": str(response.get_data(), 'utf8') + "response": str(response.get_data(), 'utf8'), }) except Exception as e: @@ -1314,15 +224,14 @@ def batch(): "response": str(e) }) + self.show_metadata = False return Response(response=json.dumps(responses), status=207, mimetype="application/json") - augur_app.finalize_config() - - def transform(self, data, orient='records', + def transform(self, func, args=None, kwargs=None, orient='records', group_by=None, on=None, aggregate='sum', resample=None, date_col='date'): if orient is None: @@ -1330,20 +239,31 @@ def transform(self, data, orient='records', result = '' - if hasattr(data, 'to_json'): - if group_by is not None: - data = data.group_by(group_by).aggregate(aggregate) - if resample is not None: - data['idx'] = pd.to_datetime(data[date_col]) - data = data.set_index('idx') - data = data.resample(resample).aggregate(aggregate) - data['date'] = data.index - result = data.to_json(orient=orient, date_format='iso', date_unit='ms') + if not self.show_metadata: + + if not args and not kwargs: + data = func() + elif args and not kwargs: + data = func(*args) + else: + data = func(*args, **kwargs) + + if hasattr(data, 'to_json'): + if group_by is not None: + data = data.group_by(group_by).aggregate(aggregate) + if resample is not None: + data['idx'] = pd.to_datetime(data[date_col]) + data = data.set_index('idx') + data = data.resample(resample).aggregate(aggregate) + data['date'] = data.index + result = data.to_json(orient=orient, date_format='iso', date_unit='ms') + else: + try: + result = json.dumps(data) + except: + result = data else: - try: - result = json.dumps(data) - except: - result = data + result = json.dumps(func.metadata) return result @@ -1355,33 +275,31 @@ def flaskify(self, func, cache=True): if cache: def generated_function(*args, **kwargs): def heavy_lifting(): - return self.transform(func(*args, **kwargs), **request.args.to_dict()) + return self.transform(func, args, kwargs, **request.args.to_dict()) body = self.cache.get(key=str(request.url), createfunc=heavy_lifting) return Response(response=body, status=200, mimetype="application/json") - generated_function.__name__ = func.__name__ + generated_function.__name__ = func.__self__.__class__.__name__ + " _" + func.__name__ return generated_function else: def generated_function(*args, **kwargs): kwargs.update(request.args.to_dict()) - return Response(response=self.transform(func(*args, **kwargs)), + return Response(response=self.transform(func, args, kwargs, **request.args.to_dict()), status=200, mimetype="application/json") - generated_function.__name__ = func.__name__ + generated_function.__name__ = func.__self__.__class__.__name__ + " _" + func.__name__ return generated_function def addMetric(self, function, endpoint, cache=True, **kwargs): """Simplifies adding routes that only accept owner/repo""" - endpoint = '/{}/<owner>/<repo>/{}'.format(AUGUR_API_VERSION, endpoint) + endpoint = '/{}/<owner>/<repo>/{}'.format(self.api_version, endpoint) self.app.route(endpoint)(self.flaskify(function, cache=cache)) self.updateMetricMetadata(function, endpoint, **kwargs) - - def addGitMetric(self, function, endpoint, cache=True): """Simplifies adding routes that accept""" - endpoint = '/{}/git/{}/<path:repo_url>/'.format(AUGUR_API_VERSION, endpoint) + endpoint = '/{}/git/{}/<path:repo_url>/'.format(self.api_version, endpoint) self.app.route(endpoint)(self.flaskify(function, cache=cache)) self.updateMetricMetadata(function, endpoint=endpoint, metric_type='git') @@ -1400,7 +318,7 @@ def updateMetricMetadata(self, function, endpoint, **kwargs): # Get the unbound function from the bound function's class so that we can modify metadata # across instances of that class. real_func = getattr(function.__self__.__class__, function.__name__) - annotate(endpoint=endpoint, source=function.__self__.__class__.__name__, **kwargs)(real_func) + annotate(endpoint=endpoint, **kwargs)(real_func) def run(): server = Server() diff --git a/augur/util.py b/augur/util.py --- a/augur/util.py +++ b/augur/util.py @@ -1,6 +1,11 @@ #SPDX-License-Identifier: MIT +""" +Provides shared functions that do not fit in a class of their own +""" import pandas as pd import os +import re +import json import logging import coloredlogs import beaker @@ -12,6 +17,8 @@ # end imports # (don't remove the above line, it's for a script) +def getFileID(path): + return os.path.splitext(os.path.basename(path))[0] __ROOT = os.path.abspath(os.path.dirname(__file__)) def get_data_path(path): @@ -31,7 +38,7 @@ def get_cache(namespace, cache_manager=None): cache_manager = __memory_cache return cache_manager.get_cache(namespace) -metrics = [] +metric_metadata = [] def annotate(metadata=None, **kwargs): """ Decorate a function as being a metric @@ -41,8 +48,13 @@ def annotate(metadata=None, **kwargs): def decorate(func): if not hasattr(func, 'metadata'): func.metadata = {} - metrics.append(func.metadata) + metric_metadata.append(func.metadata) func.metadata.update(metadata) func.metadata.update(dict(kwargs)) + + func.metadata['metric_name'] = re.sub('_', ' ', func.__name__).title() + func.metadata['source'] = re.sub(r'(.*\.)', '', func.__module__) + func.metadata['ID'] = "{}-{}".format(func.metadata['source'].lower(), func.metadata['tag']) + return func - return decorate \ No newline at end of file + return decorate diff --git a/docs/metrics/status.py b/docs/metrics/status.py deleted file mode 100644 --- a/docs/metrics/status.py +++ /dev/null @@ -1,85 +0,0 @@ -import os -import re -import json -import glob -import webbrowser -from flask import Flask, request, Response - -metric_files = ['upstream/1_Diversity-Inclusion.md', 'upstream/2_Growth-Maturity-Decline.md', 'upstream/3_Risk.md', 'upstream/4_Value.md'] - -metric_type_by_file = { - 'upstream/1_Diversity-Inclusion.md': 'Diversity and Inclusion', - 'upstream/2_Growth-Maturity-Decline.md': 'Growth, Maturity, and Decline', - 'upstream/3_Risk.md': 'Risk', - 'upstream/4_Value.md': 'Value', -} - -color_by_status = { - 'unimplemented': '<span style="color: #C00">unimplemented</span>', - 'in_progress': '<span style="color: #CC0">in progress</span>', - 'implemented': '<span style="color: #0C0">implemented</span>' -} - -statusMap = json.loads(open('status.json', 'r').read()) -statusHTML = """ -<html> -<head> - <title>Augur Metrics Status</title> - <style> - td { padding: 5px } - </style> -</head> -<body> - <h1>Augur Metrics Status</h1> - -""" - -def getFileID(path): - return os.path.splitext(os.path.basename(path))[0] - -def printMetric(title, path): - global statusHTML - status = 'unimplemented' - fileID = getFileID(path) - if fileID in statusMap: - status = statusMap[fileID] - if status != 'printed': - statusHTML += '<tr><td>{}</td><td><a href="https://github.com/chaoss/wg-gmd/tree/master/{}"> {} ({})</td></tr>'.format(color_by_status[status], path, title, fileID) - statusMap[fileID] = 'printed' - return fileID - -# Iterate through the category Markdown files to categorize links -for filename in metric_files: - file = open(filename, 'r') - matches = re.findall(r'\[(.*?)\]\((.*?\.md)\)', file.read()) - if len(matches) > 0: - statusHTML += '<h2>' + metric_type_by_file[filename] + '</h2><table><tr><td>status</td><td>metric</td></tr>' - for match in matches: - printMetric(match[0], match[1]) - statusHTML += '</table>' - - -# Iterate through the files in activity-metrics to find uncategorized metrics -statusHTML += '<h2>Uncategorized</h2><table><tr><td>status</td><td>metric</td></tr>' -for filename in glob.iglob('upstream/activity-metrics/*.md'): - printMetric(getFileID(filename).replace('-', ' ').title(), 'activity-metrics/' + getFileID(filename) + '.md') - - -statusHTML += """ - </table> -</body> -</html> -""" - -app = Flask(__name__) - [email protected]("/") -def root(): - return statusHTML - -def run(): - webbrowser.open_new_tab('http://localhost:5001/') - app.run(port=5001) - -if __name__ == "__main__": - run() diff --git a/docs/python/source/conf.py b/docs/python/source/conf.py old mode 100644 new mode 100755 --- a/docs/python/source/conf.py +++ b/docs/python/source/conf.py @@ -32,6 +32,7 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', + 'sphinx.ext.autosummary', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', @@ -85,7 +86,7 @@ # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True - +html_sidebars = { '**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'] } # -- Options for HTML output ---------------------------------------------- @@ -164,6 +165,8 @@ ] +autosummary_generate = True + # Example configuration for intersphinx: refer to the Python standard library. diff --git a/plugins/example-plugin/plugin.py b/plugins/example-plugin/plugin.py deleted file mode 100644 --- a/plugins/example-plugin/plugin.py +++ /dev/null @@ -1,26 +0,0 @@ -#SPDX-License-Identifier: MIT -from augur import register_plugin, logger -from augur.server import addMetric -# (don't remove the above line, it's for a script) - -class ExamplePlugin(object): - """ - This plugin serves as an example as to how to load plugins into Augur - """ - def __init__(self): - logger.info('example-plugin loaded') - return - - def example_metric(self, owner, repo): - return [] - - -def add_routes(app, instance): - """ - Responsible for adding this plugin's data sources to the API - """ - addMetric(app, instance.example_metric, 'example-metric') - - - -register_plugin(ExamplePlugin, 'example-plugin', routes='routes.py') \ No newline at end of file diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -26,8 +26,7 @@ url='https://github.com/OSSHealth/augur', author='Derek Howard', author_email='[email protected]', - packages=['augur'], - package_dir={'augur': 'augur'}, + packages=['augur', 'augur.plugins', 'augur.routes'], license='MIT', classifiers=[ 'Development Status :: 1 - Planning', @@ -40,8 +39,7 @@ install_requires=[ 'cython', 'protobuf', 'ipdb', 'setuptools-git', 'beautifulsoup4', 'flask', 'flask-cors', 'PyMySQL', 'requests', 'python-dateutil', 'sqlalchemy', 'pandas', 'pytest', 'PyGithub', 'GitPython', - 'pyevent', 'gunicorn', 'datetime', 'traitlets', 'coloredlogs', 'tldextract', 'python-daemon', 'beaker', - 'lockfile'], + 'gunicorn', 'traitlets', 'coloredlogs', 'tldextract', 'beaker', 'lockfile'], extras_require={ 'dev': ['check-manifest'], 'test': ['coverage'],
diff --git a/docs/testing.md b/docs/testing.md old mode 100644 new mode 100755 --- a/docs/testing.md +++ b/docs/testing.md @@ -10,3 +10,6 @@ If you don't have both Python 2 and 3, you can run the tests individually - Python 2: `python2 -m pytest` - Python 3: `python3 -m pytest` + +To test the Augur API, run `make test-api`. +- You will need to add a Postman API key to your `augur.config.json`. diff --git a/notebooks/Python Function Testing.ipynb b/notebooks/Python Function Testing.ipynb new file mode 100644 --- /dev/null +++ b/notebooks/Python Function Testing.ipynb @@ -0,0 +1,300 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Python Function Testing\n", + "\n", + "This notebook is for testing all the Python functions. Each cell is a data source class; feel free to experiment to your heart's content." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import augur\n", + "\n", + "# import everything that githubapi.py imports so we can just copy and paste our function later\n", + "augur_app = augur.Application('../augur.config.json')" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import sqlalchemy as s\n", + "import numpy as np\n", + "import re\n", + "from augur import logger\n", + "from augur.util import annotate\n", + "\n", + "ghtorrent = augur_app.ghtorrent()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "# ghtorrent.closed_issues(owner, repo)\n", + "# ghtorrent.code_commits(owner, repo)\n", + "# ghtorrent.code_review_iteration(owner, repo)\n", + "# ghtorrent.contribution_acceptance(owner, repo)\n", + "# ghtorrent.contributing_github_organizations(owner, repo)\n", + "# ghtorrent.first_response_to_issue_duration(owner, repo)\n", + "# ghtorrent.forks(owner, repo)\n", + "# ghtorrent.maintainer_response_to_merge_request_duration(owner, repo)\n", + "# ghtorrent.new_contributing_github_organizations(owner, repo)\n", + "# ghtorrent.open_issues(owner, repo)\n", + "# ghtorrent.pull_request_comments(owner, repo)\n", + "# ghtorrent.pull_requests_open(owner, repo)\n", + "# ghtorrent.issue_comments(owner, repo)\n", + "# ghtorrent.watchers(owner, repo)\n", + "# ghtorrent.commits100(owner, repo)\n", + "# ghtorrent.commit_comments(owner, repo)\n", + "# ghtorrent.committer_locations(owner, repo)\n", + "# ghtorrent.total_committers(owner, repo)\n", + "# ghtorrent.issue_activity(owner, repo)\n", + "# ghtorrent.pull_request_acceptance_rate(owner, repo)\n", + "# ghtorrent.community_age(owner, repo)\n", + "# ghtorrent.community_engagement(owner, repo)\n", + "# ghtorrent.contributors(owner, repo)\n", + "# ghtorrent.contributions(owner, repo)\n", + "# ghtorrent.classify_contributors(owner, repo)\n", + "# ghtorrent.project_age(owner, repo)\n", + "# ghtorrent.fakes(owner, repo)\n", + "# ghtorrent.ghtorrent_range(owner, repo)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import sqlalchemy as s\n", + "import numpy as np\n", + "import re\n", + "from augur import logger\n", + "\n", + "ghtorrentplus = augur_app.ghtorrentplus()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "# ghtorrentplus.closed_issue_resolution_duration(owner, repo)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import sys\n", + "import pandas as pd\n", + "if sys.version_info > (3, 0):\n", + " import urllib.parse as url\n", + "else:\n", + " import urllib as url\n", + "\n", + "publicwww = augur_app.publicwww()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "# publicwww.linking_websites(owner, repo)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "ename": "NoSuchPathError", + "evalue": "/Users/carterlandis/Documents/Code/augur/runtime/git_repos/repos/rails/repo", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNoSuchPathError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m<ipython-input-28-45c4dcdad33f>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;31m# git.update()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0;31m# git.downloaded_repos()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mgit\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlines_changed_minus_whitespace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"https://github.com/rails/rails\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 17\u001b[0m \u001b[0;31m# git.changes_by_author(\"https://github.com/rails/rails\")\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/Documents/Code/augur/augur/git.py\u001b[0m in \u001b[0;36mlines_changed_minus_whitespace\u001b[0;34m(self, repo_url, from_commit, df, rebuild_cache)\u001b[0m\n\u001b[1;32m 209\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'deletions'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdeletions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 210\u001b[0m \u001b[0mframes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindex\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'hash'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 211\u001b[0;31m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 212\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 213\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mframes\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/beaker/cache.py\u001b[0m in \u001b[0;36mget\u001b[0;34m(self, key, **kw)\u001b[0m\n\u001b[1;32m 320\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 321\u001b[0m \u001b[0;34m\"\"\"Retrieve a cached value from the container\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 322\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 323\u001b[0m \u001b[0mget_value\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 324\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/beaker/container.py\u001b[0m in \u001b[0;36mget_value\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 378\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 379\u001b[0m \u001b[0mdebug\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"get_value creating new value\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 380\u001b[0;31m \u001b[0mv\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcreatefunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 381\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mv\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 382\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mv\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/Documents/Code/augur/augur/git.py\u001b[0m in \u001b[0;36mheavy_lifting\u001b[0;34m()\u001b[0m\n\u001b[1;32m 162\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mlines_changed_minus_whitespace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrepo_url\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfrom_commit\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdf\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrebuild_cache\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 164\u001b[0;31m \"\"\"\n\u001b[0m\u001b[1;32m 165\u001b[0m \u001b[0mMakes\u001b[0m \u001b[0msure\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mstorageFolder\u001b[0m \u001b[0mcontains\u001b[0m \u001b[0mupdated\u001b[0m \u001b[0mversions\u001b[0m \u001b[0mof\u001b[0m \u001b[0mall\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mrepos\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 166\u001b[0m \"\"\"\n", + "\u001b[0;32m~/Documents/Code/augur/augur/git.py\u001b[0m in \u001b[0;36mgit\u001b[0;34m(self, is_updater)\u001b[0m\n\u001b[1;32m 74\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 75\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__git\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mis_updater\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 76\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__git\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgit\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mRepo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 77\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__git\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 78\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/git/repo/base.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, path, odbt, search_parent_directories, expand_vars)\u001b[0m\n\u001b[1;32m 122\u001b[0m \u001b[0mepath\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mexpand_path\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexpand_vars\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 123\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexists\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 124\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mNoSuchPathError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 125\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 126\u001b[0m \u001b[0;31m## Walk up the path to find the `.git` dir.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mNoSuchPathError\u001b[0m: /Users/carterlandis/Documents/Code/augur/runtime/git_repos/repos/rails/repo" + ] + } + ], + "source": [ + "import os\n", + "import shutil\n", + "import re\n", + "import json\n", + "import datetime\n", + "import pandas as pd\n", + "import git\n", + "from lockfile import LockFile, AlreadyLocked\n", + "from augur.util import logger, get_cache\n", + "\n", + "git = augur_app.git()\n", + "\n", + "# git.get_repo(\"https://github.com/rails/rails\")\n", + "# git.update()\n", + "# git.downloaded_repos()\n", + "# git.lines_changed_minus_whitespace(\"https://github.com/rails/rails\")\n", + "# git.changes_by_author(\"https://github.com/rails/rails\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "'str' object has no attribute 'GITHUB_API_KEY'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m<ipython-input-4-e86e635d5d49>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;31m# return the dataframe\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 44\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mdf\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 45\u001b[0;31m \u001b[0mlines_of_code_changed\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mowner\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrepo\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m<ipython-input-4-e86e635d5d49>\u001b[0m in \u001b[0;36mlines_of_code_changed\u001b[0;34m(self, owner, repo)\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0;31m# see <project_root>/augur/githubapi.py for examples using the GraphQL API\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 33\u001b[0m \u001b[0murl\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"https://api.github.com/repos/{}/{}/stats/code_frequency\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mowner\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrepo\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 34\u001b[0;31m \u001b[0mjson\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mrequests\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mauth\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'user'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGITHUB_API_KEY\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjson\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 35\u001b[0m \u001b[0;31m# get our data into a dataframe\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0mdf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mjson\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'date'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'additions'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'deletions'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mAttributeError\u001b[0m: 'str' object has no attribute 'GITHUB_API_KEY'" + ] + } + ], + "source": [ + "from augur.localcsv import LocalCSV\n", + "import json\n", + "import re\n", + "from dateutil.parser import parse\n", + "import pandas as pd\n", + "import github\n", + "import numpy as np\n", + "import datetime\n", + "import requests\n", + "from augur import logger\n", + "\n", + "github = augur_app.githubapi()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "github.lines_of_code_changed(owner, repo)\n", + "# github.bus_factor(owner, repo)\n", + "# github.major_tags(owner, repo)\n", + "# github.tags(owner, repo)\n", + "# github.contributors_gender(owner, repo)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import requests\n", + "import pandas as pd\n", + "import numpy as np\n", + "from bs4 import BeautifulSoup\n", + "from augur import logger\n", + "\n", + "librariesio = augur_app.librariesio()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "# librariesio.dependencies(owner, repo)\n", + "# librariesio.dependency_stats(owner, repo)\n", + "# librariesio.dependents(owner, repo)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import json\n", + "import pandas as pd\n", + "import requests\n", + "import datetime\n", + "import base64\n", + "from augur import logger\n", + "\n", + "downloads = augur_app.downloads()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "# downloads.downloads(owner, repo)\n", + "# downloads.ruby_downloads(owner)\n", + "# downloads.npm_downloads(owner, repo)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import tldextract\n", + "from urllib.parse import urlparse\n", + "from .util import get_data_path\n", + "\n", + "localcsv = augur_app.localcsv()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "localcsv.classify_emails(self, email_series)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python (augur)", + "language": "python", + "name": "augur" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/test.ipynb b/notebooks/test.ipynb --- a/notebooks/test.ipynb +++ b/notebooks/test.ipynb @@ -2,10 +2,8 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": true - }, + "execution_count": 9, + "metadata": {}, "outputs": [], "source": [ "import augur\n", @@ -23,15 +21,13 @@ }, { "cell_type": "code", - "execution_count": 2, - "metadata": { - "collapsed": true - }, + "execution_count": 5, + "metadata": {}, "outputs": [], "source": [ - "augurApp = augur.Application('../augur.cfg')\n", + "augurApp = augur.Application('../augur.config.json')\n", "# we only need an instance of the GitHubAPI class\n", - "github = augurApp.github()" + "github = augurApp.githubapi()" ] }, { @@ -656,6 +652,20 @@ "bus_factor(stan, \"rails\", \"rails\")" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "code", "execution_count": null, @@ -668,9 +678,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python (augur)", "language": "python", - "name": "python3" + "name": "augur" }, "language_info": { "codemirror_mode": { diff --git a/test/Augur.postman_collection.json b/test/Augur.postman_collection.json new file mode 100644 --- /dev/null +++ b/test/Augur.postman_collection.json @@ -0,0 +1,1244 @@ +{ + "info": { + "_postman_id": "ec950b0b-a5e9-4fe3-b1a5-ad4f49c209f9", + "name": "Augur", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" + }, + "item": [ + { + "name": "ghtorrent", + "item": [ + { + "name": "issues/closed", + "event": [ + { + "listen": "test", + "script": { + "id": "3596106b-7311-4099-b5fd-e5c1e40f5799", + "type": "text/javascript", + "exec": [ + "" + ] + } + } + ], + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/issues/closed", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "issues", + "closed" + ] + } + }, + "response": [] + }, + { + "name": "commits", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/commits", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "commits" + ] + } + }, + "response": [] + }, + { + "name": "code_review_iteration", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/code_review_iteration", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "code_review_iteration" + ] + } + }, + "response": [] + }, + { + "name": "contribution_acceptance", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/contribution_acceptance", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "contribution_acceptance" + ] + } + }, + "response": [] + }, + { + "name": "contributing_github_organizations", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/contributing_github_organizations", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "contributing_github_organizations" + ] + } + }, + "response": [] + }, + { + "name": "issues/response_time", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/issues/response_time", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "issues", + "response_time" + ] + } + }, + "response": [] + }, + { + "name": "forks", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/forks", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "forks" + ] + } + }, + "response": [] + }, + { + "name": "pulls/maintainer_response_time", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/pulls/maintainer_response_time", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "pulls", + "maintainer_response_time" + ] + } + }, + "response": [] + }, + { + "name": "new_contributing_github_organizations", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/new_contributing_github_organizations", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "new_contributing_github_organizations" + ] + } + }, + "response": [] + }, + { + "name": "issues", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/issues", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "issues" + ] + } + }, + "response": [] + }, + { + "name": "pulls/comments", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/pulls/comments", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "pulls", + "comments" + ] + } + }, + "response": [] + }, + { + "name": "pulls", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/pulls", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "pulls" + ] + } + }, + "response": [] + }, + { + "name": "issue_comments", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/issue_comments", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "issue_comments" + ] + } + }, + "response": [] + }, + { + "name": "pulls/made_closed", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/pulls/made_closed", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "pulls", + "made_closed" + ] + } + }, + "response": [] + }, + { + "name": "watchers", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/watchers", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "watchers" + ] + } + }, + "response": [] + }, + { + "name": "commits100", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/commits100", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "commits100" + ] + } + }, + "response": [] + }, + { + "name": "commits/comments", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/commits/comments", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "commits", + "comments" + ] + } + }, + "response": [] + }, + { + "name": "committer_locations", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/committer_locations", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "committer_locations" + ] + } + }, + "response": [] + }, + { + "name": "total_committers", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/total_committers", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "total_committers" + ] + } + }, + "response": [] + }, + { + "name": "issues/activity", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/issues/activity", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "issues", + "activity" + ] + } + }, + "response": [] + }, + { + "name": "community_age", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/community_engagement", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "community_engagement" + ] + } + }, + "response": [] + }, + { + "name": "community_engagement", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/community_engagement", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "community_engagement" + ] + } + }, + "response": [] + }, + { + "name": "contributors", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/contributors", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "contributors" + ] + } + }, + "response": [] + }, + { + "name": "contributions", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/contributions", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "contributions" + ] + } + }, + "response": [] + }, + { + "name": "project_age", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/project_age", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "project_age" + ] + } + }, + "response": [] + }, + { + "name": "fakes", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/fakes", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "fakes" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "8976e16c-77cc-4411-b1c1-fabf347058f0", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "83ad29a9-5409-455f-a669-6d327b29dba7", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "ghtorrentplus", + "item": [ + { + "name": "issues/time_to_close", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/issues/time_to_close", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "issues", + "time_to_close" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "5626f5d3-5538-456c-969d-97dee6349358", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "6d38b43a-4d2f-42d4-b2d2-20d29cf09767", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "facade", + "item": [ + { + "name": "downloaded_repos", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/git/repos", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "git", + "repos" + ] + } + }, + "response": [] + }, + { + "name": "lines_changed", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/git/changes_by_author", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "git", + "changes_by_author" + ] + } + }, + "response": [] + }, + { + "name": "lines_changed_minus_white_space", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/git/lines_changed", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "git", + "lines_changed" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "fc81708a-ca9f-40dc-9b00-3ca36b1fb66c", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "e2941d3b-33f6-4dea-811f-47e1534d4d78", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "githubapi", + "item": [ + { + "name": "lines_changed", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/lines_changed", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "lines_changed" + ] + } + }, + "response": [] + }, + { + "name": "tags", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/tags", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "tags" + ] + } + }, + "response": [] + }, + { + "name": "tags/major", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/tags/major", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "tags", + "major" + ] + } + }, + "response": [] + }, + { + "name": "bus_factor", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/bus_factor", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "bus_factor" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "04eb46cb-53f4-4450-90da-657fc8e87c40", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "ba64c471-d5e0-4963-b0a7-e50f3011b5c6", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "librariesio", + "item": [ + { + "name": "dependencies", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/dependencies", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "dependencies" + ] + } + }, + "response": [] + }, + { + "name": "dependency_stats", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/dependency_stats", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "dependency_stats" + ] + } + }, + "response": [] + }, + { + "name": "dependents", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/dependents", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "dependents" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "4e2f03ee-9040-47cf-b983-fb12dff72870", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "fec972a1-5955-413e-8e37-1a10a889ed8a", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "publicwww", + "item": [ + { + "name": "linking_websites", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/linking_websites", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "linking_websites" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "bce40f57-125b-4d9c-b66f-736e3e215b63", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "4c2489b5-4e5d-46e4-a75a-f31d0709155a", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "downloads", + "item": [ + { + "name": "downloads", + "request": { + "auth": { + "type": "noauth" + }, + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/downloads", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "downloads" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "4a6473a2-4606-41f0-b970-db4ef7fcbe9d", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "9d5c0c80-0211-4094-a953-ff37c17037bb", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "batch", + "item": [ + { + "name": "batch", + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "[{\"method\": \"GET\", \"path\": \"/api/unstable/rails/rails/timeseries/commits\"}, {\"method\": \"GET\", \"path\": \"/api/unstable/rails/rails/timeseries/issues\"}, {\"method\": \"GET\", \"path\": \"/api/unstable/rails/rails/timeseries/issues/closed\"}]\n" + }, + "url": { + "raw": "{{server}}/{{api_version}}/batch", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "batch" + ] + } + }, + "response": [] + }, + { + "name": "batch w metadata", + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "[{\"method\": \"GET\", \"path\": \"/api/unstable/rails/rails/timeseries/commits\"}, {\"method\": \"GET\", \"path\": \"/api/unstable/rails/rails/timeseries/issues\"}]\n" + }, + "url": { + "raw": "{{server}}/{{api_version}}/batch/metadata", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "batch", + "metadata" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "c996d09b-20d8-4f96-bec8-71de0bb8ff11", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "0d42076e-056b-4e2e-b11d-13c4dbfc5439", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(207);", + "});" + ] + } + } + ] + }, + { + "name": "metrics status", + "item": [ + { + "name": "metrics/status", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/metrics/status", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "metrics", + "status" + ] + } + }, + "response": [] + }, + { + "name": "metrics/status/metadata", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/metrics/status/metadata", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "metrics", + "status", + "metadata" + ] + } + }, + "response": [] + }, + { + "name": "metrics/status/filter", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/metrics/status/filter?source=ghtorrent", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "metrics", + "status", + "filter" + ], + "query": [ + { + "key": "source", + "value": "ghtorrent" + } + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "b22f156c-866f-4be6-8acc-178388602ee4", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "3504c981-5c77-4d0c-a96d-1c20ec8608c2", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "22463b9e-8286-4bba-928c-6b26ed9e3bab", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "b1db8fe4-1748-4bda-8632-5f7f2ddf0ec4", + "type": "text/javascript", + "exec": [ + "tests['JSON array is not empty'] = (JSON.parse(responseBody).length > 0);" + ] + } + } + ] +} \ No newline at end of file diff --git a/test/dev-unstable-rails.postman_environment.json b/test/dev-unstable-rails.postman_environment.json new file mode 100644 --- /dev/null +++ b/test/dev-unstable-rails.postman_environment.json @@ -0,0 +1,27 @@ +{ + "id": "392a724a-6e59-4dc9-b70f-50221adf4847", + "name": "dev - unstable - rails", + "values": [ + { + "key": "server", + "value": "dev.augurlabs.io", + "description": "", + "enabled": true + }, + { + "key": "api_version", + "value": "api/unstable", + "description": "", + "enabled": true + }, + { + "key": "repo", + "value": "rails/rails", + "description": "", + "enabled": true + } + ], + "_postman_variable_scope": "environment", + "_postman_exported_at": "2018-08-17T17:15:04.995Z", + "_postman_exported_using": "Postman/6.2.4" +} \ No newline at end of file diff --git a/test/test_api.py b/test/test_api.py new file mode 100644 --- /dev/null +++ b/test/test_api.py @@ -0,0 +1,7 @@ +import os +import augur + +augur_app = augur.Application(config_file="augur.config.json") +postman_api_key = augur_app.read_config("Postman", "apikey", "AUGUR_POSTMAN_API_KEY", "None") + +os.system("newman run https://api.getpostman.com/collections/4566755-ec950b0b-a5e9-4fe3-b1a5-ad4f49c209f9?apikey={} -e https://api.getpostman.com/environments/4566755-2eb8f02c-642f-4f12-892f-d75f4c5faa24?apikey={} --color off | tee test/api-test.log".format(postman_api_key, postman_api_key)) \ No newline at end of file diff --git a/test/test_ghtorrent.py b/test/test_ghtorrent.py --- a/test/test_ghtorrent.py +++ b/test/test_ghtorrent.py @@ -1,7 +1,7 @@ import os import pytest [email protected] [email protected](scope="module") def ghtorrent(): import augur augurApp = augur.Application() @@ -29,9 +29,6 @@ def test_userid(ghtorrent): def test_closed_issues(ghtorrent): assert ghtorrent.closed_issues('cashmusic', 'platform').isin(["2012-11-09T00:00:00.000Z"]).any -def test_closed_issue_resolution_duration(ghtorrent): - assert ghtorrent.closed_issue_resolution_duration('mopidy', 'mopidy').isin(["2012-11-10T09:51:19.000Z"]).any - def test_code_commits(ghtorrent): assert ghtorrent.code_commits('facebook', 'folly').isin(["2013-01-07"]).any @@ -115,10 +112,6 @@ def test_project_age(ghtorrent): def test_fakes(ghtorrent): assert ghtorrent.fakes('rails', 'rails').isin(["2008-09-24T00:00:00.000Z"]).any -def test_ghtorrent_range(ghtorrent): - assert ghtorrent.ghtorrent_range().isin(["0000-00-00"]).any - - diff --git a/test/test_ghtorrentplus.py b/test/test_ghtorrentplus.py --- a/test/test_ghtorrentplus.py +++ b/test/test_ghtorrentplus.py @@ -12,7 +12,7 @@ def ghtorrentplus(): # *** GROWTH, MATURITY, AND DECLINE *** # def test_closed_issue_resolution_duration(ghtorrentplus): - assert ghtorrentplus.closed_issue_resolution_duration('mopidy', 'mopidy').isin(["2012-11-10T09:51:19.000Z"]).any + assert ghtorrentplus.closed_issue_resolution_duration('TEST', 'TEST').isin(["DATE"]).any # *** RISK *** # diff --git a/test/test_publicwww.py b/test/test_publicwww.py deleted file mode 100644 --- a/test/test_publicwww.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -import pytest -import pandas - [email protected] -def publicwww(): - import augur - augurApp = augur.Application() - return augurApp.publicwww() - -# *** DIVERSITY AND INCLUSION *** # - -# *** GROWTH, MATURITY, AND DECLINE *** # - -# *** RISK *** # - -# *** VALUE *** # - -# *** ACTIVITY *** # - -# *** EXPERIMENTAL *** # -def test_linking_websites(publicwww): - assert publicwww.linking_websites(owner='yihui', repo='knitr').isin(["sohu.com"]).any \ No newline at end of file
Batch API Logging update the development logger for the Batch API needs to output the underlying endpoints being called for debugging purposes.
2018-08-27T14:20:23Z
[]
[]
chaoss/augur
162
chaoss__augur-162
[ "156" ]
0d64909e8f9efedfcbeec29ed21ee05bce8c61e0
diff --git a/augur/__init__.py b/augur/__init__.py --- a/augur/__init__.py +++ b/augur/__init__.py @@ -1,3 +1,5 @@ +#SPDX-License-Identifier: MIT + # Metadata from .metadata import __version__ @@ -6,12 +8,6 @@ # Classes from .application import Application -# from .downloads import Downloads -# from .ghtorrent import GHTorrent -# from .ghtorrentplus import GHTorrentPlus -# from .git import Git -# from .githubapi import GitHubAPI -# from .librariesio import LibrariesIO -# from .localcsv import LocalCSV -# from .publicwww import PublicWWW -# from .server import Server + +# Plugins +from .augurplugin import AugurPlugin diff --git a/augur/application.py b/augur/application.py --- a/augur/application.py +++ b/augur/application.py @@ -1,9 +1,16 @@ +#SPDX-License-Identifier: MIT +""" +Handles global context, I/O, and configuration +""" + import os import time import multiprocessing as mp import logging import configparser as configparser import json +import importlib +import pkgutil import coloredlogs from beaker.cache import CacheManager from beaker.util import parse_cache_config_options @@ -25,6 +32,11 @@ def updater_process(name, delay): except: raise +def load_plugins(): + if not hasattr(load_plugins, 'already_loaded'): + import augur.plugins + load_plugins.already_loaded = True + class Application(object): """Initalizes all classes form Augur using a config file or environment variables""" @@ -42,6 +54,7 @@ def __init__(self, config_file='augur.config.json', no_config_file=0, descriptio self.__config_bad = False self.__config_file_path = os.path.abspath(os.getenv('AUGUR_CONFIG_FILE', config_file)) self.__config_location = os.path.dirname(self.__config_file_path) + self.__runtime_location = 'runtime/' self.__export_env = os.getenv('AUGUR_ENV_EXPORT', '0') == '1' if os.getenv('AUGUR_ENV_ONLY', '0') != '1' and no_config_file == 0: try: @@ -63,7 +76,6 @@ def __init__(self, config_file='augur.config.json', no_config_file=0, descriptio # Load the config file try: config_text = self.__config_file.read() - config_text = config_text.replace('$(AUGUR)', self.__config_location) self.__config = json.loads(config_text) except json.decoder.JSONDecodeError as e: if not self.__config_bad: @@ -80,11 +92,14 @@ def __init__(self, config_file='augur.config.json', no_config_file=0, descriptio self.__processes = [] # Create cache - cache_config = self.read_config('Cache', 'config', None, { + cache_config = { 'cache.type': 'file', - 'cache.data_dir': 'runtime/cache/', - 'cache.lock_dir': 'runtime/cache/' - }) + 'cache.data_dir': self.path('$(RUNTIME)/cache/'), + 'cache.lock_dir': self.path('$(RUNTIME)/cache/') + } + cache_config.update(self.read_config('Cache', 'config', None, cache_config)) + cache_config['cache.data_dir'] = self.path(cache_config['cache.data_dir']) + cache_config['cache.lock_dir'] = self.path(cache_config['cache.lock_dir']) if not os.path.exists(cache_config['cache.data_dir']): os.makedirs(cache_config['cache.data_dir']) if not os.path.exists(cache_config['cache.lock_dir']): @@ -97,10 +112,37 @@ def __init__(self, config_file='augur.config.json', no_config_file=0, descriptio self.__ghtorrentplus = None self.__githubapi = None self.__git = None + self.__facade = None self.__librariesio = None self.__downloads = None - self.__publicwww = None self.__localCSV = None + self.__metrics_status = None + + # Load plugins + import augur.plugins + + @classmethod + def register_plugin(cls, plugin): + if not hasattr(plugin, 'name'): + raise NameError("{} didn't have a name") + cls.plugins[plugin.name] = plugin + + def replace_config_variables(self, string, reverse=False): + variable_map = { + 'AUGUR': self.__config_location, + 'RUNTIME': self.__runtime_location + } + for variable, source in variable_map.items(): + if not reverse: + string = string.replace('$({})'.format(variable), source) + else: + string = string.replace(source, '$({})'.format(variable)) + return string + + def path(self, path): + path = self.replace_config_variables(path) + path = os.path.abspath(os.path.expanduser(path)) + return path def __updater(self, updates=None): if updates is None: @@ -117,10 +159,11 @@ def init_all(self): self.ghtorrentplus() self.githubapi() self.git() + self.facade() self.librariesio() self.downloads() - self.publicwww() self.localcsv() + self.metrics_status() def read_config(self, section, name, environment_variable=None, default=None): value = None @@ -146,6 +189,11 @@ def read_config(self, section, name, environment_variable=None, default=None): logger.debug('{}:{} = {}'.format(section, name, value)) return value + def read_config_path(self, section, name, environment_variable=None, default=None): + path = self.read_config(section, name, environment_variable, default) + path = self.path(path) + return path + def set_config(self, section, name, value): if not section in self.__config: self.__config[section] = {} @@ -207,6 +255,20 @@ def ghtorrent(self): ) return self.__ghtorrent + def facade(self): + from augur.facade import Facade + if self.__facade is None: + logger.debug('Initializing Facade') + self.__facade = Facade( + user=self.read_config('Facade', 'user', 'AUGUR_FACADE_DB_USER', 'root'), + password=self.read_config('Facade', 'pass', 'AUGUR_FACADE_DB_PASS', ''), + host=self.read_config('Facade', 'host', 'AUGUR_FACADE_DB_HOST', '127.0.0.1'), + port=self.read_config('Facade', 'port', 'AUGUR_FACADE_DB_PORT', '3306'), + dbname=self.read_config('Facade', 'name', 'AUGUR_FACADE_DB_NAME', 'facade'), + projects=self.read_config('Facade', 'projects', None, []) + ) + return self.__facade + def ghtorrentplus(self): from augur.ghtorrentplus import GHTorrentPlus if self.__ghtorrentplus is None: @@ -223,7 +285,7 @@ def ghtorrentplus(self): def git(self, update=False): from augur.git import Git storage = self.path_relative_to_config( - self.read_config('Git', 'storage', 'AUGUR_GIT_STORAGE', 'runtime/git_repos/') + self.read_config_path('Git', 'storage', 'AUGUR_GIT_STORAGE', '$(RUNTIME)/git_repos/') ) repolist = self.read_config('Git', 'repositories', None, []) if self.__git is None: @@ -269,13 +331,6 @@ def downloads(self): self.__downloads = Downloads(self.githubapi()) return self.__downloads - def publicwww(self): - from augur.publicwww import PublicWWW - if self.__publicwww is None: - logger.debug('Initializing PublicWWW') - self.__publicwww = PublicWWW(api_key=self.read_config('PublicWWW', 'apikey', 'AUGUR_PUBLIC_WWW_API_KEY', 'None')) - return self.__publicwww - def localcsv(self): from augur.localcsv import LocalCSV if self.__localCSV is None: @@ -283,4 +338,12 @@ def localcsv(self): self.__localCSV = LocalCSV() return self.__localCSV + def metrics_status(self): + from augur.metrics_status import MetricsStatus + if self.__metrics_status is None: + logger.debug('Initializing MetricsStatus') + self.__metrics_status = MetricsStatus(self.githubapi()) + return self.__metrics_status + +Application.plugins = {} diff --git a/augur/augurplugin.py b/augur/augurplugin.py new file mode 100644 --- /dev/null +++ b/augur/augurplugin.py @@ -0,0 +1,17 @@ +#SPDX-License-Identifier: MIT +""" +Provides a class that can be used to extend Augur +""" + +class AugurPlugin(object): + """Defines a base class for Augur plugins to implement""" + def __init__(self, config): + self.config = config + + @classmethod + def register(cls, application): + application.register_plugin(cls) + + def create_routes(self, flask_app): + routes = __import__('routes') + routes.create(flask_app) \ No newline at end of file diff --git a/augur/downloads.py b/augur/downloads.py --- a/augur/downloads.py +++ b/augur/downloads.py @@ -1,9 +1,16 @@ +#SPDX-License-Identifier: MIT +""" +Data source that gathers download stats from package managers +""" + import json import pandas as pd import requests import datetime import base64 from augur import logger +from augur.util import annotate + # end imports # (don't remove the above line, it's for a script) @@ -11,14 +18,44 @@ class Downloads(object): """Class for retrieveing download information using APIs and web scrapers""" def __init__(self, githubapi): self.__githubapi = githubapi.api - + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + + ##################################### + ### RISK ### + ##################################### + + + ##################################### + ### VALUE ### + ##################################### + + + ##################################### + ### ACTIVITY ### + ##################################### + + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + @annotate(tag='downloads') def downloads(self, owner, repo): """ - Detects package file and calls correct function for download statistics + Timeseries that returns package file and calls the correct function to the create download statistics :param owner: repo owner username :param repo: repo name + :return: DataFrame with all downloads for that day """ root_dir = self.__githubapi.get_repo((owner + "/" + repo)).get_dir_contents("/") @@ -32,9 +69,10 @@ def downloads(self, owner, repo): def ruby_downloads(self, repo): """ - Returns daily downloads for ruby gems from bestgems.org API + Timeseries of daily downloads for ruby gems from bestgems.org API :param repo: repo name + :return: DataFrame with count of ruby downloads """ r = requests.get("http://bestgems.org/api/v1/gems/%s/daily_downloads.json" % (repo)) raw = r.text @@ -52,6 +90,7 @@ def npm_downloads(self, repo, contents): :param repo: repo name :param contents: contents of package.json + :return: DataFrame with count of npm downloads """ contents = json.loads(json.loads(json.dumps(contents))) name = contents["name"] diff --git a/augur/facade.py b/augur/facade.py new file mode 100644 --- /dev/null +++ b/augur/facade.py @@ -0,0 +1,92 @@ +#SPDX-License-Identifier: MIT +""" +Data source that uses Facade's tables +""" + +import pandas as pd +import sqlalchemy as s +import numpy as np +import re +from augur import logger +from augur.util import annotate +# end imports +# (don't remove the above line, it's for a script) + +class Facade(object): + """Queries Facade""" + + def __init__(self, user, password, host, port, dbname, projects=None): + """ + Connect to the database + + :param dbstr: The [database string](http://docs.sqlalchemy.org/en/latest/core/engines.html) to connect to the GHTorrent database + """ + self.DB_STR = 'mysql+pymysql://{}:{}@{}:{}/{}'.format( + user, password, host, port, dbname + ) + logger.debug('Facade: Connecting to {}:{}/{} as {}'.format(host, port, dbname, user)) + self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool) + self.projects = projects + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + + ##################################### + ### RISK ### + ##################################### + + + ##################################### + ### VALUE ### + ##################################### + + + ##################################### + ### ACTIVITY ### + ##################################### + + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + @annotate(tag='downloaded-repos') + def downloaded_repos(self): + repoSQL = s.sql.text(""" + SELECT git AS url, status, projects.name as project_name + FROM repos + JOIN projects + ON repos.projects_id = projects.id + """) + results = pd.read_sql(repoSQL, self.db) + results['url'] = results['url'].apply(lambda datum: datum.split('//')[1]) + if self.projects: + results = results[results.project_name.isin(self.projects)] + return results + + @annotate(tag='lines-changed-minus-whitespace') + def lines_changed_minus_whitespace(self, repo_url, from_commit=None, df=None, rebuild_cache=False): + pass + + @annotate(tag='lines-changed-by-author') + def lines_changed_by_author(self, repo_url): + """ + Makes sure the storageFolder contains updated versions of all the repos + """ + repoSQL = s.sql.text(""" + SELECT author_email, author_date, author_affiliation as affiliation, SUM(added) as additions, SUM(removed) as deletions, SUM(whitespace) as whitespace + FROM analysis_data + WHERE repos_id = (SELECT id FROM repos WHERE git LIKE :repourl LIMIT 1) + GROUP BY repos_id, author_date, author_affiliation, author_email + ORDER BY author_date ASC; + """) + results = pd.read_sql(repoSQL, self.db, params={"repourl": '%{}%'.format(repo_url)}) + return results + + diff --git a/augur/ghtorrent.py b/augur/ghtorrent.py --- a/augur/ghtorrent.py +++ b/augur/ghtorrent.py @@ -1,3 +1,8 @@ +#SPDX-License-Identifier: MIT +""" +Data source that uses the GHTorrent relational database of GitHub activity. +""" + import pandas as pd import sqlalchemy as s import numpy as np @@ -45,7 +50,7 @@ def __single_table_count_by_date(self, table, repo_col='project_id', user_col='a if group_by == "day": return """ SELECT date(created_at) AS "date", COUNT(*) AS "{0}" - FROM {0} + FROM {0} FROM {0} WHERE {1} = :repoid GROUP BY DATE(created_at) @@ -90,7 +95,7 @@ def __sub_table_count_by_date(self, parent_table, sub_table, parent_id, sub_id, :return: Query string """ return """ - SELECT date({1}.created_at) AS "date", COUNT(*) AS counter + SELECT date({1}.created_at) AS "date", COUNT(*) AS {1} FROM {1}, {0} WHERE {1}.{3} = {0}.{2} AND {0}.{4} = :repoid @@ -138,12 +143,16 @@ def userid(self, username): ### GROWTH, MATURITY, AND DECLINE ### ##################################### - @annotate(metric_name='closed-issues') + @annotate(tag='closed-issues') def closed_issues(self, owner, repo=None): """ Subgroup: Issue Resolution - Endpoint: issues/closed - chaoss-metric: closed-issues + + Timeseries of the count of the number of issues closed per week + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with newly closed issues/week """ repoid = self.repoid(owner, repo) issuesClosedSQL = s.sql.text(""" @@ -156,56 +165,31 @@ def closed_issues(self, owner, repo=None): """) return pd.read_sql(issuesClosedSQL, self.db, params={"repoid": str(repoid)}) - def closed_issue_resolution_duration(self, owner, repo=None): - """ - Subgroup: Issue Resolution - Endpoint: issues_with_close - chaoss-metric: closed-issue-resolution-duration - - How long on average each week it takes to close an issue - - :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. - :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with issues/day - """ - repoid = self.repoid(owner, repo) - issuesWithCloseSQL = s.sql.text(""" - SELECT issues.id as "id", - issues.created_at as "date", - DATEDIFF(closed.created_at, issues.created_at) AS "days_to_close" - FROM issues - - JOIN - (SELECT * FROM issue_events - WHERE issue_events.action = "closed") closed - ON issues.id = closed.issue_id - - WHERE issues.repo_id = :repoid""") - return pd.read_sql(issuesWithCloseSQL, self.db, params={"repoid": str(repoid)}) - + @annotate(tag='code-commits') def code_commits(self, owner, repo=None, group_by="week"): """ Subgroup: Code Development - Endpoint: commits - chaoss-metric: code-commits - Timeseries of all the commits on a repo + Timeseries of the count of commits :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with commits/day + :return: DataFrame with new commits/week """ repoid = self.repoid(owner, repo) commitsSQL = s.sql.text(self.__single_table_count_by_date('commits', group_by=group_by)) return pd.read_sql(commitsSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='code-review-iteration') def code_review_iteration(self, owner, repo=None): """ - Number of iterations (being closed and reopened) that a merge request (code review) goes through until it is finally merged + Subgroup: Code Development + + Timeseries of the count of iterations (being closed and reopened) that a merge request (code review) goes through until it is finally merged - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being a merge request's date of creation + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with iterations/issue for each issue that week """ repoid = self.repoid(owner, repo) @@ -216,7 +200,7 @@ def code_review_iteration(self, owner, repo=None): issues.issue_id AS "issue_id", pull_request_history.pull_request_id AS "pull_request_id", pull_request_history.action AS "action", - COUNT(CASE WHEN action = "closed" THEN 1 ELSE NULL END) AS "count" + COUNT(CASE WHEN action = "closed" THEN 1 ELSE NULL END) AS "iterations" FROM issues, pull_request_history WHERE find_in_set(pull_request_history.action, "closed,merged")>0 AND pull_request_history.pull_request_id IN( @@ -226,57 +210,46 @@ def code_review_iteration(self, owner, repo=None): AND pull_request_history.pull_request_id = issues.issue_id AND issues.pull_request = 1 AND issues.repo_id = :repoid - GROUP BY (issues.created_at) #YEARWEEK to get (iterations (all PRs in repo) / week) instead of (iterations / PR)? + GROUP BY YEARWEEK(issues.created_at) #YEARWEEK to get (iterations (all PRs in repo) / week) instead of (iterations / PR)? """) df = pd.read_sql(codeReviewIterationSQL, self.db, params={"repoid": str(repoid)}) - return pd.DataFrame({'date': df['created_at'], 'iterations': df['count']}) + return pd.DataFrame({'date': df['created_at'], 'iterations': df['iterations']}) + @annotate(tag='contribution-acceptance') def contribution_acceptance(self, owner, repo=None): """ - Rolling ratio between merged pull requests : unmerged pull requests + Subgroup: Community Development - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being a week - """ - repoid = self.repoid(owner, repo) - codeReviewIterationSQL = s.sql.text(""" - SELECT by_PR.created_at as date, - count(CASE WHEN by_PR.action = 'merged' then 1 else null end) / count(CASE WHEN by_PR.action = 'closed' then 1 else null end) as 'ratio' - FROM - (SELECT - DATE(issues.created_at) AS "created_at", - issues.issue_id AS "issue_id", - pull_request_history.pull_request_id AS "pull_request_id", - pull_request_history.action AS "action" - FROM issues, pull_request_history - WHERE find_in_set(pull_request_history.action, "closed,merged")>0 - AND pull_request_history.pull_request_id = issues.issue_id - AND issues.pull_request = 1 - AND issues.repo_id = :repoid - GROUP BY (issues.created_at)) by_PR - GROUP BY YEARWEEK(by_PR.created_at) - """) - - df = pd.read_sql(codeReviewIterationSQL, self.db, params={"repoid": str(repoid)}) + Timeseries of the rolling ratio between merged pull requests over unmerged pull requests + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with ratio/week + """ + source_df = self.community_engagement(owner, repo) + df = pd.DataFrame() + df['date'] = source_df['date'] + df['acceptance_rate'] = source_df['pull_requests_merged_rate_this_week'] return df - def contributing_github_organizations(self, owner, repo=None): + @annotate(tag='contributing-github-organizations') + def contributing_github_organizations(self, owner, repo=None): #needs clarification about return value """ - All the contributing organizations to a project and the counts of each organization's contributions + Subgroup: Community Development + + Returns of all the contributing organizations to a project and the counts of each organization's contributions - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being an outside contributing organization + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with each organization's information """ repoid = self.repoid(owner, repo) contributingOrgSQL = s.sql.text(""" SELECT id AS contributing_org, SUM(commits) AS commits, SUM(issues) AS issues, SUM(commit_comments) AS commit_comments, SUM(issue_comments) AS issue_comments, SUM(pull_requests) AS pull_requests, SUM(pull_request_comments) AS pull_request_comments, - SUM(contribution_fields.commits + contribution_fields.issues + contribution_fields.commit_comments + contribution_fields.issue_comments + contribution_fields.pull_requests + contribution_fields.pull_request_comments) AS total, COUNT(DISTINCT contribution_fields.user) AS count + SUM(contribution_fields.commits + contribution_fields.issues + contribution_fields.commit_comments + contribution_fields.issue_comments + contribution_fields.pull_requests + contribution_fields.pull_request_comments) AS total, COUNT(DISTINCT contribution_fields.user) AS distinct_users FROM ( (SELECT organization_members.org_id AS id, commits.author_id AS user, COUNT(*) AS commits, 0 AS issues, 0 AS commit_comments, 0 AS issue_comments, 0 AS pull_requests, 0 AS pull_request_comments FROM organization_members, projects, commits @@ -318,22 +291,21 @@ def contributing_github_organizations(self, owner, repo=None): AND pull_request_comments.user_id = organization_members.user_id GROUP BY pull_request_comments.user_id) ) contribution_fields group by id - having count > 1 + having distinct_users > 1 ORDER BY total DESC """) return pd.read_sql(contributingOrgSQL, self.db, params={"repoid": str(repoid)}) - def first_response_to_issue_duration(self, owner, repo): + @annotate(tag='first-response-to-issue-duration') + def first_response_to_issue_duration(self, owner, repo): #needs clarification about return value """ Subgroup: Issue Resolution - Endpoint: issues/response_time - chaoss-metric: first-response-to-issue-duration - Time to comment by issue + Timeseries of the time to first comment by issue - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being am issue + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame of issues with their response information """ repoid = self.repoid(owner, repo) issueCommentsSQL = s.sql.text(""" @@ -359,25 +331,30 @@ def first_response_to_issue_duration(self, owner, repo): rs = pd.read_sql(issueCommentsSQL, self.db, params={"repoid": str(repoid)}) return rs - def forks(self, owner, repo=None, group_by="week"): + @annotate(tag='forks') + def forks(self, owner, repo=None, group_by="week"): """ Subgroup: Code Development - chaoss-metric: forks + Timeseries of when a repo's forks were created + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with forks/day + :return: DataFrame with new forks/week """ repoid = self.repoid(owner, repo) forksSQL = s.sql.text(self.__single_table_count_by_date('projects', 'forked_from', 'owner_id', group_by=group_by)) return pd.read_sql(forksSQL, self.db, params={"repoid": str(repoid)}).drop(0) - def maintainer_response_to_merge_request_duration(self, owner, repo=None): + @annotate(tag='maintainer-response-to-merge-request-duration') + def maintainer_response_to_merge_request_duration(self, owner, repo=None): #needs clarification on return value """ - Duration of time between a merge request being created and a maintainer commenting on that request + Subgroup: Code Development + + Timeseries of duration of time between a merge request being created and a maintainer commenting on that request - :param owner: The name of the project owner - :param repo: The name of the repo + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with each row being a week """ repoid = self.repoid(owner, repo) @@ -402,13 +379,16 @@ def maintainer_response_to_merge_request_duration(self, owner, repo=None): df = pd.read_sql(maintainerResponseToMRSQL, self.db, params={"repoid": str(repoid)}) return df.iloc[:, 0:2] - def new_contributing_github_organizations(self, owner, repo=None): + @annotate(tag='new-contributing-github-organizations') + def new_contributing_github_organizations(self, owner, repo=None): #needs clarification about return value """ - Number of new contributing organizations on a certain date + Subgroup: Community Growth - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being a week + Timeseries of information about new contributing organizations on a certain date + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with each organization's information """ repoid = self.repoid(owner, repo) @@ -416,7 +396,7 @@ def new_contributing_github_organizations(self, owner, repo=None): SELECT fields.date AS "date", fields.id AS "contributing_org", - count(DISTINCT fields.user) AS count + count(DISTINCT fields.user) AS distinct_users FROM ( (SELECT organization_members.org_id AS id, commits.created_at AS date, commits.author_id AS user FROM organization_members, projects, commits WHERE projects.id = :repoid @@ -457,7 +437,7 @@ def new_contributing_github_organizations(self, owner, repo=None): AND pull_request_comments.user_id = organization_members.user_id GROUP BY pull_request_comments.user_id)) fields Group BY contributing_org - HAVING count > 1 + HAVING distinct_users > 1 ORDER BY YEARWEEK(date) """) df = pd.read_sql(contributingOrgSQL, self.db, params={"repoid": str(repoid)}) @@ -468,48 +448,46 @@ def new_contributing_github_organizations(self, owner, repo=None): numOrgs = np.append(numOrgs, count) return pd.DataFrame({'date': df["date"], 'organizations': numOrgs}) + @annotate(tag='open-issues') def open_issues(self, owner, repo=None, group_by="week"): """ Subgroup: Individual Diversity - Endpoint: issues - chaoss-metric: open-issues - Timeseries of issues opened per day + Timeseries of the count of newly issues opened per week :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with issues/day + :return: DataFrame with opened issues/week """ repoid = self.repoid(owner, repo) issuesSQL = s.sql.text(self.__single_table_count_by_date('issues', 'repo_id', 'reporter_id', group_by=group_by)) return pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='pull-request-comments') def pull_request_comments(self, owner, repo=None): """ Subgroup: Code Development - chaoss-metric: pull-request-comments - Timeseries of pull request comments + Timeseries of the count of new pull request comments :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with new by week + :return: DataFrame with new pull request comments/week """ repoid = self.repoid(owner, repo) pullRequestCommentsSQL = s.sql.text(self.__sub_table_count_by_date("pull_requests", "pull_request_comments", "pullreq_id", "pull_request_id", "base_repo_id")) return pd.read_sql(pullRequestCommentsSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='pull-requests-open') def pull_requests_open(self, owner, repo=None): """ Subgroup: Code Development - Endpoint: pulls - chaoss-metric: pull-requests-open - Timeseries of pull requests creation, also gives their associated activity + Timeseries of pull requests creation and their associated activity :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with pull requests by day + :return: DataFrame with pull request information/week """ repoid = self.repoid(owner, repo) pullsSQL = s.sql.text(""" @@ -538,40 +516,72 @@ def pull_requests_open(self, owner, repo=None): ### ACTIVITY ### ##################################### - def watchers(self, owner, repo=None, group_by="week"): + @annotate(tag='issue-comments') + def issue_comments(self, owner, repo=None): """ - Timeseries of when people starred a repo + Timeseries of the count of new issue comments :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with stargazers/day + :return: DataFrame with new issue comments/week """ repoid = self.repoid(owner, repo) - stargazersSQL = s.sql.text(self.__single_table_count_by_date('watchers', 'repo_id', 'user_id', group_by=group_by)) - df = pd.read_sql(stargazersSQL, self.db, params={"repoid": str(repoid)}) - df.drop(df.index[:1], inplace=True) - return df + issueCommentsSQL = s.sql.text(self.__sub_table_count_by_date("issues", "issue_comments", "issue_id", "issue_id", "repo_id")) + return pd.read_sql(issueCommentsSQL, self.db, params={"repoid": str(repoid)}) - def issue_comments(self, owner, repo=None): + @annotate(tag='pull-requests-made-closed') + def pull_requests_made_closed(self, owner, repo=None): """ - Timeseries of issue comments + Timeseries of the ratio of pull requests made/closed :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with new by week + :return: DataFrame with the ratio of pull requests made/closed """ repoid = self.repoid(owner, repo) - issueCommentsSQL = s.sql.text(self.__sub_table_count_by_date("issues", "issue_comments", "issue_id", "issue_id", "repo_id")) - return pd.read_sql(issueCommentsSQL, self.db, params={"repoid": str(repoid)}) + pullRequestsMadeClosedSQL = s.sql.text(""" + SELECT DATE(closed_on) AS "date", CAST(num_opened AS DECIMAL)/CAST(num_closed AS DECIMAL) AS "rate" + FROM + (SELECT COUNT(DISTINCT pull_request_id) AS num_opened, DATE(pull_request_history.created_at) AS opened_on + FROM pull_request_history + JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id + WHERE action = 'opened' AND pull_requests.base_repo_id = :repoid + GROUP BY opened_on) opened + JOIN + (SELECT count(distinct pull_request_id) AS num_closed, DATE(pull_request_history.created_at) AS closed_on + FROM pull_request_history + JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id + WHERE action = 'closed' + AND pull_requests.base_repo_id = :repoid + GROUP BY closed_on) closed + ON closed.closed_on = opened.opened_on + """) + return pd.read_sql(pullRequestsMadeClosedSQL, self.db, params={"repoid": str(repoid)}) + + @annotate(tag='watchers') + def watchers(self, owner, repo=None, group_by="week"): + """ + Returns of the count of people who starred the repo on that date + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with new stargazers + """ + repoid = self.repoid(owner, repo) + stargazersSQL = s.sql.text(self.__single_table_count_by_date('watchers', 'repo_id', 'user_id', group_by=group_by)) + df = pd.read_sql(stargazersSQL, self.db, params={"repoid": str(repoid)}) + df.drop(df.index[:1], inplace=True) + return df ##################################### ### EXPERIMENTAL ### ##################################### # COMMIT RELATED + @annotate(tag='commits100') def commits100(self, owner, repo=None, group_by="week"): """ - Timeseries of all the commits on a repo + Timeseries of the count of commits, limited to the first 100 overall :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. @@ -583,11 +593,10 @@ def commits100(self, owner, repo=None, group_by="week"): tem = temp['commits'] > 100 return temp[tem].reset_index(drop=True) + @annotate(tag='commit-comments') def commit_comments(self, owner, repo=None, group_by="week"): """ - augur-metric: commit-comments - - Timeseries of commit comments + Timeseries of the count of new commit comments :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. @@ -597,16 +606,17 @@ def commit_comments(self, owner, repo=None, group_by="week"): commitCommentsSQL = s.sql.text(self.__sub_table_count_by_date("commits", "commit_comments", "id", "commit_id", "project_id")) return pd.read_sql(commitCommentsSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='committer-locations') def committer_locations(self, owner, repo=None): """ - Return committers and their locations + Returns committers and their locations - @todo: Group by country code instead of users, needs the new schema :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. - :return: DataFrame with users and locations sorted by commtis + :return: DataFrame with users and locations sorted by descending count of commits """ + #TODO: Group by country code instead of users, needs the new schema repoid = self.repoid(owner, repo) rawContributionsSQL = s.sql.text(""" SELECT users.login, users.location, COUNT(*) AS "commits" @@ -621,19 +631,18 @@ def committer_locations(self, owner, repo=None): """) return pd.read_sql(rawContributionsSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='total-committers') def total_committers(self, owner, repo=None): """ - augur-metric: total-committers - - Number of total committers as of each week + Timeseries of total committers as of each week - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being a week + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with total committers/week """ repoid = self.repoid(owner, repo) totalCommittersSQL = s.sql.text(""" - SELECT total_committers.created_at AS "date", COUNT(total_committers.author_id) total_total_committers + SELECT total_committers.created_at AS "date", COUNT(total_committers.author_id) total_committers FROM ( SELECT author_id, MIN(DATE(created_at)) created_at FROM commits @@ -643,13 +652,18 @@ def total_committers(self, owner, repo=None): GROUP BY YEARWEEK(total_committers.created_at) """) df = pd.read_sql(totalCommittersSQL, self.db, params={"repoid": str(repoid)}) - df['total_total_committers'] = df['total_total_committers'].cumsum() + df['total_committers'] = df['total_committers'].cumsum() return df # ISSUE RELATED + @annotate(tag='issue-activity') def issue_activity(self, owner, repo=None): """ - augur-metric: issue-activity + Timeseries of issue related activity: issues opened, closed, reopened, and currently open + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with total committers/week """ repoid = self.repoid(owner, repo) issueActivity = s.sql.text(""" @@ -659,6 +673,7 @@ def issue_activity(self, owner, repo=None): WHERE issues.repo_id = :repoid GROUP BY YEARWEEK(issues.created_at) """) + #TODO: clean this up df = pd.read_sql(issueActivity, self.db, params={"repoid": str(repoid)}) df = df.assign(issues_open = 0) globalIssuesOpened = 0 @@ -685,15 +700,14 @@ def issue_activity(self, owner, repo=None): return df4 # PULL REQUEST RELATED + @annotate(tag='pull-request-acceptance-rate') def pull_request_acceptance_rate(self, owner, repo=None): """ - augur-metric: pull-request-acceptance-rate - - Timeseries of pull request acceptance rate (Number of pull requests merged on a date over Number of pull requests opened on a date) + Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date) :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with the pull acceptance rate and the dates + :return: DataFrame with ratio/day """ repoid = self.repoid(owner, repo) pullAcceptanceSQL = s.sql.text(""" @@ -716,10 +730,13 @@ def pull_request_acceptance_rate(self, owner, repo=None): return pd.read_sql(pullAcceptanceSQL, self.db, params={"repoid": str(repoid)}) # COMMUNITY / CONRIBUTIONS + @annotate(tag='community-age') def community_age(self, owner, repo=None): """ Information helpful to determining a community's age + (Currently broken) + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. :return: DataFrame with the first event of each type (commits, fork, ...) @@ -753,19 +770,38 @@ def community_age(self, owner, repo=None): return pd.read_sql(communityAgeSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='community-engagement') def community_engagement(self, owner, repo): """ - Lots of information about issues and pull requests + Timeseries with lots of information about issues and pull requests + + DataFrame returns these columns: + date + issues_opened + issues_closed + pull_requests_opened + pull_requests_merged + pull_requests_closed + issues_opened_total + issues_closed_total + issues_closed_rate_this_window + issues_closed_rate_total + issues_delta + issues_open + pull_requests_opened_total + pull_requests_closed_total + pull_requests_closed_rate_this_window + pull_requests_closed_rate_total + pull_requests_delta + pull_requests - TODO: More documentation - - :param owner: The name of the project owner - :param repo: The name of the repo - :return: DataFrame with each row being a week + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with the associated information about a repo's activity on that specific date """ repoid = self.repoid(owner, repo) issuesFullSQL = s.sql.text(""" - SELECT DATE(date) as "date", + SELECT STR_TO_DATE(CONCAT(YEARWEEK(DATE,0),' Sunday'), '%X%V %W') as "date", SUM(issues_opened) AS "issues_opened", SUM(issues_closed) AS "issues_closed", SUM(pull_requests_opened) AS "pull_requests_opened", @@ -774,8 +810,8 @@ def community_engagement(self, owner, repo): FROM ( - SELECT issue_events.created_at as "date", - issue_events.action = "closed" AND issues.pull_request = 0 AS issues_closed, + SELECT STR_TO_DATE(CONCAT(YEARWEEK(issue_events.created_at,0),' Sunday'), '%X%V %W') as "date", + issue_events.action = "closed" AND issues.pull_request = 0 AS issues_closed, 0 AS pull_requests_closed, 0 AS pull_requests_merged, issue_events.action = "reopened" AND issues.pull_request = 0 AS issues_opened, @@ -786,12 +822,13 @@ def community_engagement(self, owner, repo): LEFT JOIN pull_request_history ON pull_request_history.pull_request_id = issues.pull_request_id WHERE issues.repo_id = :repoid + AND issue_events.action IN ('closed', 'reopened') UNION ALL - SELECT pull_request_history.created_at as "date", + SELECT STR_TO_DATE(CONCAT(YEARWEEK(pull_request_history.created_at,0),' Sunday'), '%X%V %W') as "date", 0 AS issues_closed, - pull_request_history.action = "closed" AND issues.pull_request = 1 AS pull_requests_closed, + pull_request_history.action = "closed" AND issues.pull_request = 1 AS pull_requests_closed, pull_request_history.action = "merged" AND issues.pull_request = 1 AS pull_requests_merged, 0 AS issues_opened, pull_request_history.action = "reopened" AND issues.pull_request = 1 AS pull_requests_opened @@ -799,10 +836,11 @@ def community_engagement(self, owner, repo): LEFT JOIN pull_request_history ON pull_request_history.pull_request_id = issues.pull_request_id WHERE issues.repo_id = :repoid + AND pull_request_history.action IN ('closed', 'merged', 'reopened') UNION ALL - SELECT issues.created_at as "date", + SELECT STR_TO_DATE(CONCAT(YEARWEEK(issues.created_at ,0),' Sunday'), '%X%V %W') as "date", 0 AS issues_closed, 0 AS pull_requests_closed, 0 AS pull_requests_merged, @@ -814,37 +852,41 @@ def community_engagement(self, owner, repo): ) summary - GROUP BY YEARWEEK(date) + GROUP BY YEARWEEK(date, 1) + + """) counts = pd.read_sql(issuesFullSQL, self.db, params={"repoid": str(repoid)}) - # counts.drop(0, inplace=True) + counts.drop(0, inplace=True) counts['issues_opened_total'] = counts.issues_opened.cumsum() counts['issues_closed_total'] = counts.issues_closed.cumsum() - counts['issues_closed_rate_this_window'] = counts.issues_closed / counts.issues_opened + counts['issues_closed_rate_this_week'] = counts.issues_closed / counts.issues_opened counts['issues_closed_rate_total'] = counts.issues_closed_total / counts.issues_opened_total counts['issues_delta'] = counts.issues_opened - counts.issues_closed counts['issues_open'] = counts['issues_delta'].cumsum() counts['pull_requests_opened_total'] = counts.pull_requests_opened.cumsum() counts['pull_requests_closed_total'] = counts.pull_requests_closed.cumsum() - counts['pull_requests_closed_rate_this_window'] = counts.pull_requests_closed / counts.pull_requests_opened + counts['pull_requests_merged_total'] = counts.pull_requests_merged.cumsum() + counts['pull_requests_closed_rate_this_week'] = counts.pull_requests_closed / counts.pull_requests_opened + counts['pull_requests_merged_rate_this_week'] = counts.pull_requests_merged / counts.pull_requests_opened counts['pull_requests_closed_rate_total'] = counts.pull_requests_closed_total / counts.pull_requests_opened_total + counts['pull_requests_merged_rate_total'] = counts.pull_requests_merged_total / counts.pull_requests_opened_total counts['pull_requests_delta'] = counts.pull_requests_opened - counts.pull_requests_closed counts['pull_requests_open'] = counts['pull_requests_delta'].cumsum() return counts + @annotate(tag='contributors') def contributors(self, owner, repo=None): """ - augur-metric: contributors - All the contributors to a project and the counts of their contributions :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with users id, users login, and their contributions by type + :return: DataFrame with user's id and contributions by type, separated by user """ repoid = self.repoid(owner, repo) contributorsSQL = s.sql.text(""" - SELECT id AS user, SUM(commits) AS commits, SUM(issues) AS issues, + SELECT users.login as name, a.id AS user, SUM(commits) AS commits, SUM(issues) AS issues, SUM(commit_comments) AS commit_comments, SUM(issue_comments) AS issue_comments, SUM(pull_requests) AS pull_requests, SUM(pull_request_comments) AS pull_request_comments, SUM(a.commits + a.issues + a.commit_comments + a.issue_comments + a.pull_requests + a.pull_request_comments) AS total @@ -861,22 +903,32 @@ def contributors(self, owner, repo=None): (SELECT actor_id AS id, 0, 0, 0, 0, COUNT(*) AS pull_requests, 0 FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.id WHERE pull_request_history.action = 'opened' AND pull_requests.`base_repo_id` = :repoid GROUP BY actor_id) UNION ALL (SELECT user_id AS id, 0, 0, 0, 0, 0, COUNT(*) AS pull_request_comments FROM pull_request_comments JOIN pull_requests ON pull_requests.base_commit_id = pull_request_comments.commit_id WHERE pull_requests.base_repo_id = :repoid GROUP BY user_id) - ) a - WHERE id IS NOT NULL - GROUP BY id + ) a JOIN users ON users.id = a.id + WHERE a.id IS NOT NULL + GROUP BY a.id ORDER BY total DESC; """) return pd.read_sql(contributorsSQL, self.db, params={"repoid": str(repoid)}) + @annotate(tag='contributions') def contributions(self, owner, repo=None, userid=None): """ - augur metric: contributions Timeseries of all the contributions to a project, optionally limited to a specific user + DataFrame has these columns: + date + commits + pull_requests + issues + commit_comments + pull_request_comments + issue_comments + tota + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table :param repo: The name of the repo. Unneeded if repository id was passed as owner. :param userid: The id of user if you want to limit the contributions to a specific user. - :return: DataFrame with all of the contributions seperated by day. + :return: DataFrame with all of the contributions separated by day """ repoid = self.repoid(owner, repo) rawContributionsSQL = """ @@ -932,7 +984,7 @@ def classify_contributors(self, owner, repo=None): :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. :param repo: The name of the repo. Unneeded if repository id was passed as owner. - :return: DataFrame with the login and role of contributors + :return: DataFrame with the id and role of contributors """ repoid = self.repoid(owner, repo) contributors = self.contributors(repoid, repo=None) @@ -957,7 +1009,15 @@ def classify(row): roles = contributors.apply(classify, axis=1) return roles + @annotate(tag='project-age') def project_age(self, owner, repo=None): + """ + Date of the project's creation + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with the date of the project's creation + """ repoid = self.repoid(owner, repo) projectAgeSQL = s.sql.text(""" SELECT date(created_at) AS "date", COUNT(*) AS "{0}" @@ -970,9 +1030,14 @@ def project_age(self, owner, repo=None): # DEPENDENCY RELATED # OTHER - def fakes(self, owner, repo=None): + @annotate(tag='fakes') + def fakes(self, owner, repo=None): #should this be for users who contribute to the given repo? """ - augur-metric: fakes + Timeseries of new fake users per week + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with new fake users/week """ repoid = self.repoid(owner, repo) contributorsSQL = s.sql.text(""" @@ -982,10 +1047,3 @@ def fakes(self, owner, repo=None): GROUP BY YEARWEEK(date) """) return pd.read_sql(contributorsSQL, self.db, params={"repoid": str(repoid)}) - - def ghtorrent_range(self): - ghtorrentRangeSQL = s.sql.text(""" - SELECT MIN(date(created_at)) AS "min_date", MAX(date(created_at)) AS "max_date" - FROM commits - """) - return pd.read_sql(ghtorrentRangeSQL, self.db) diff --git a/augur/ghtorrentplus.py b/augur/ghtorrentplus.py --- a/augur/ghtorrentplus.py +++ b/augur/ghtorrentplus.py @@ -1,8 +1,14 @@ +#SPDX-License-Identifier: MIT +""" +Data source that extends GHTorrent with summary tables +""" + import pandas as pd import sqlalchemy as s import numpy as np import re from augur import logger +from augur.util import annotate # end imports # (don't remove the above line, it's for a script) @@ -40,11 +46,21 @@ def update(self): ##################################### ### GROWTH, MATURITY, AND DECLINE ### ##################################### - + + @annotate(tag='closed-issue-resolution-duration') def closed_issue_resolution_duration(self, owner, repo=None): """ - Endpoint: issue_close_time - augur-metric: closed-issue-resolution-duration + Returns a DataFrame with these columns: + id + repo_id + closed + pull_request + minutes_to_close + z-score + + :param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. + :param repo: The name of the repo. Unneeded if repository id was passed as owner. + :return: DataFrame with the above columns """ repoid = self.ghtorrent.repoid(owner, repo) issuesClosedSQL = s.sql.text(""" diff --git a/augur/git.py b/augur/git.py --- a/augur/git.py +++ b/augur/git.py @@ -1,6 +1,6 @@ #SPDX-License-Identifier: MIT """ -Analyzes Git repos directly using dulwich +Analyzes Git repos directly using git """ import os @@ -11,7 +11,7 @@ import pandas as pd import git from lockfile import LockFile, AlreadyLocked -from augur.util import logger, get_cache +from augur.util import logger, get_cache, annotate # end imports # (don't remove the above line, it's for a script @@ -101,6 +101,12 @@ def __init__(self, list_of_repositories, storage_folder, csv, cache=None): self.is_updater = False def get_repo(self, repo_url): + """ + Create a repo object from the given url + + :param repo_url: URL of the repository + :return: a Repo obeject + """ if repo_url in self._git_repos: return self._git_repos[repo_url] else: @@ -129,9 +135,41 @@ def update(self): self.is_updater = False + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + @annotate(tag='downloaded-repos') def downloaded_repos(self): + """ + Get all downloaded repositories and the date they were last updated + :return: a JSON object with the URL and date of last update for all downloaded repos + """ downloaded = [] for repo_url in self._repo_urls: repo = self.get_repo(repo_url) @@ -148,7 +186,7 @@ def downloaded_repos(self): return downloaded - + @annotate(tag='lines-changed-minus-whitespace') def lines_changed_minus_whitespace(self, repo_url, from_commit=None, df=None, rebuild_cache=False): """ Makes sure the storageFolder contains updated versions of all the repos @@ -214,7 +252,8 @@ def heavy_lifting(): results = new_results return results - def changes_by_author(self, repo_url, freq='M', rebuild_cache=False): + @annotate(tag='lines-changed-by-author') + def lines_changed_by_author(self, repo_url, freq='M', rebuild_cache=False): """ Makes sure the storageFolder contains updated versions of all the repos """ @@ -229,4 +268,4 @@ def heavy_lifting(): if rebuild_cache: self.__cache.remove_value(key='cba-{}-{}'.format(freq, repo_url)) results = self.__cache.get(key='cba-{}-{}'.format(freq, repo_url), createfunc=heavy_lifting) - return results \ No newline at end of file + return result5 diff --git a/augur/githubapi.py b/augur/githubapi.py --- a/augur/githubapi.py +++ b/augur/githubapi.py @@ -1,3 +1,8 @@ +#SPDX-License-Identifier: MIT +""" +Data source that uses the GitHub API +""" + from augur.localcsv import LocalCSV import json import re @@ -8,6 +13,7 @@ import datetime import requests from augur import logger +from augur.util import annotate # end imports # (don't remove the above line, it's for a script) @@ -33,14 +39,14 @@ def __init__(self, api_key): ### GROWTH, MATURITY, AND DECLINE ### ##################################### + @annotate(tag='lines-of-code-changed') def lines_of_code_changed(self, owner, repo=None): """ - chaoss-metric: lines-of-code-changed - Additions and deletions each week + Timeseries of the count of lines added, deleted, and the net change each week :param owner: The name of the project owner :param repo: The name of the repo - :return: DataFrame with each row being am issue + :return: DataFrame with the associated lines changed information/week """ # get the data we need from the GitHub API # see <project_root>/augur/githubapi.py for examples using the GraphQL API @@ -76,81 +82,91 @@ def lines_of_code_changed(self, owner, repo=None): ### EXPERIMENTAL ### ##################################### - def bus_factor(self, owner, repo, filename=None, start=None, end=None, threshold=50): + @annotate(tag='bus-factor') + def bus_factor(self, owner, repo, threshold=50): """ - augur-metric: bus-factor - Calculates bus factor by adding up percentages from highest to lowest until they exceed threshold :param owner: repo owner username :param repo: repo name - :param filename: optional; file or directory for function to run on - :param start: optional; start time for analysis - :param end: optional; end time for analysis :param threshold: Default 50; """ + cursor = "" + url = "https://api.github.com/graphql" + commit_count = [] + hasNextPage = True + threshold = threshold / 100 + while hasNextPage: + query = {"query" : + """ + query{ + repository(name: "%s", owner: "%s") { + ref(qualifiedName: "master") { + target { + ... on Commit { + id + history(first: 100%s) { + pageInfo { + hasNextPage + } + edges { + cursor + node { + author { + email + } + } + } + } + } + } + } + } + } + """ % (repo, owner, cursor) + } + r = requests.post(url, auth=requests.auth.HTTPBasicAuth('user', self.GITHUB_API_KEY), json=query) + raw = r.text + data = json.loads(json.loads(json.dumps(raw))) + hasNextPage = data['data']['repository']['ref']['target']['history']['pageInfo']['hasNextPage'] + commits = data['data']['repository']['ref']['target']['history']['edges'] + for i in commits: + commit_count.append({'email' : i['node']['author']['email']}) + cursor = ", after: \"%s\"" % (commits[-1]['cursor']) - if start != None: - start = parse(start) - else: - start = github.GithubObject.NotSet - - if end != None: - end = parse(end) - else: - end = github.GithubObject.NotSet - - commits = self.api.get_repo((owner + "/" + repo)).get_commits(since=start, until=end) - - if filename != None: - self.api.get_repo((owner + "/" + repo)).get_contents(filename) - - df = [] - - if filename != None: - for commit in commits: - for file in commit.files: - if file.filename == filename: - try: - df.append({'userid': commit.author.id}) - except AttributeError: - pass - break - else: - for commit in commits: - try: - df.append({'userid': commit.author.id}) - except AttributeError: - pass - df = pd.DataFrame(df) + df = pd.DataFrame(commit_count) + + total = df.email.count() - df = df.groupby(['userid']).userid.count() / df.groupby(['userid']).userid.count().sum() * 100 + df = df.groupby(['email']).email.count() / df.groupby(['email']).email.count().sum() * 100 i = 0 - for num in df.cumsum(): + for num in df.sort_values(ascending=False).cumsum(): i = i + 1 if num >= threshold: - worst = i break + worst = i - i = 0 + j = 0 for num in df.sort_values(ascending=True).cumsum(): - i = i + 1 + j = j + 1 if num >= threshold: - best = i break + best = j bus_factor = [{'worst': worst, 'best' : best}] return pd.DataFrame(bus_factor) + @annotate(tag='major-tags') def major_tags(self, owner, repo): """ - Returns dates and names of major version (according to semver) tags. May return blank if no major versions + Timeseries of the dates and names of major version (according to semver) tags. May return blank if no major versions :param owner: repo owner username :param repo: repo name + :return: DataFrame with major versions and their release date """ cursor = "null" tags_list = [] @@ -205,13 +221,15 @@ def major_tags(self, owner, repo): return pd.DataFrame(major_versions) + @annotate(tag='tags') def tags(self, owner, repo, raw=False): """ - Returns dates and names of tags + Timeseries of the dates and names of tags :param owner: repo owner username :param repo: repo name :param raw: Default False; Returns list of dicts + :return: DataFrame with all tags and their release date """ cursor = "null" diff --git a/augur/librariesio.py b/augur/librariesio.py --- a/augur/librariesio.py +++ b/augur/librariesio.py @@ -1,8 +1,14 @@ +""" +Data source that uses the LibrariesIO dependency data +""" + import requests import pandas as pd import numpy as np from bs4 import BeautifulSoup from augur import logger +from augur.util import annotate + # end imports # (don't remove the above line, it's for a script) @@ -41,10 +47,9 @@ def __init__(self, api_key, githubapi): ### EXPERIMENTAL ### ##################################### - + @annotate(tag='dependencies') def dependencies(self, owner, repo): """ - Finds the packages that a project depends on :param owner: GitHub username of the owner of the repo @@ -55,6 +60,7 @@ def dependencies(self, owner, repo): r = requests.get(url, params={"api_key": self.API_KEY}) return r.json() + @annotate(tag='dependency-stats') def dependency_stats(self, owner, repo): """ Finds the number of dependencies, dependant projects, and dependent repos by scrapping it off of the libraries.io website @@ -110,9 +116,9 @@ def dependency_stats(self, owner, repo): return final_data + @annotate(tag='dependents') def dependents(self, owner, repo): """ - Finds the packages depend on this repository :param owner: GitHub username of the owner of the repo diff --git a/augur/localcsv.py b/augur/localcsv.py --- a/augur/localcsv.py +++ b/augur/localcsv.py @@ -1,4 +1,7 @@ #SPDX-License-Identifier: MIT +""" +Loads small included datasets +""" import pandas as pd import tldextract from urllib.parse import urlparse diff --git a/augur/metadata.py b/augur/metadata.py --- a/augur/metadata.py +++ b/augur/metadata.py @@ -1 +1 @@ -__version__ = '0.6.1' \ No newline at end of file +__version__ = '0.7.0' \ No newline at end of file diff --git a/augur/metrics_status.py b/augur/metrics_status.py new file mode 100644 --- /dev/null +++ b/augur/metrics_status.py @@ -0,0 +1,286 @@ +#SPDX-License-Identifier: MIT +""" +Analyzes Augur source and CHAOSS repos to determine metric implementation status +""" + +import re +import json +import glob +import requests +from augur.util import metric_metadata + +class FrontendStatusExtractor(object): + + def __init__(self): + self.api_text = open("frontend/app/AugurAPI.js", 'r').read() + self.attributes = re.findall(r'(?:(GitEndpoint|Endpoint|Timeseries)\(repo, )\'(.*)\', \'(.*)\'', self.api_text) + self.timeseries_attributes = [attribute for attribute in self.attributes if attribute[0] == "Timeseries"] + self.endpoint_attributes = [attribute for attribute in self.attributes if attribute[0] == "Endpoint"] + self.git_endpoint_attributes = [attribute for attribute in self.attributes if attribute[0] == "GitEndpoint"] + + def determine_frontend_status(self, endpoint, metric_type): + attribute = None + + if metric_type is "timeseries": + attribute = next((attribute[1] for attribute in self.timeseries_attributes if attribute[2] in endpoint), None) + + elif metric_type is "metric": + attribute = next((attribute[1] for attribute in self.endpoint_attributes if attribute[2] in endpoint), None) + + elif metric_type is "git": + attribute = next((attribute[1] for attribute in self.git_endpoint_attributes if attribute[2] in endpoint), None) + + if attribute is not None: + status = 'implemented' + else: + status = 'unimplemented' + + return status + +class Metric(object): + + def __init__(self): + self.ID = 'none' + self.tag = 'none' + self.name = 'none' + self.group = 'none' + self.backend_status = 'unimplemented' + self.frontend_status = 'unimplemented' + self.endpoint = 'none' + self.source = 'none' + self.metric_type = 'none' + self.url = '/' + self.is_defined = 'false' + +class GroupedMetric(Metric): + + def __init__(self, raw_name, group): + Metric.__init__(self) + self.name = re.sub('/', '-', re.sub(r'-$|\*', '', re.sub('-', ' ', raw_name).title())) + self.tag = re.sub(' ', '-', self.name).lower() + self.ID = re.sub(r'-$|\*', '', self.source + '-' + self.tag) + self.group = group + +class ImplementedMetric(Metric): + + def __init__(self, metadata, frontend_status_extractor): + Metric.__init__(self) + + self.ID = metadata['ID'] + self.tag = metadata['tag'] + self.name = metadata['metric_name'] + self.backend_status = 'implemented' + self.source = metadata['source'] + self.group = "experimental" + + if 'metric_type' in metadata: + self.metric_type = metadata['metric_type'] + else: + self.metric_type = 'metric' + + if 'endpoint' in metadata: + self.endpoint = metadata['endpoint'] + self.frontend_status = frontend_status_extractor.determine_frontend_status(self.endpoint, self.metric_type) + +class MetricsStatus(object): + + diversity_inclusion_urls = [ + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_communication.md", "has_links": True }, + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_contribution.md", "has_links": True }, + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_events.md", "has_links": False }, + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_governance.md", "has_links": False }, + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_leadership.md", "has_links": False }, + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_project_places.md", "has_links": True }, + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/goal_recognition.md", "has_links": False } + ] + + growth_maturity_decline_urls = [ + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/2_Growth-Maturity-Decline.md", "has_links": True }, + ] + + risk_urls = [ + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/3_Risk.md", "has_links": False }, + ] + + value_urls = [ + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/4_Value.md", "has_links": False }, + ] + + activity_urls = [ + { "raw_content_url": "https://raw.githubusercontent.com/augurlabs/metrics/wg-gmd/activity-metrics-list.md", "has_links": False }, + ] + + activity_repo = "augurlabs/metrics" + + def __init__(self, githubapi): + self.__githubapi = githubapi.api + + self.groups = { + "diversity-inclusion": "Diversity and Inclusion", + "growth-maturity-decline": "Growth, Maturity, and Decline", + "risk": "Risk", + "value": "Value", + "activity": "Activity", + "experimental": "Experimental", + "all": "All" + }, + + self.sources = [] + self.metric_types = [] + self.tags = {} + + self.implemented_metrics = [] + + self.raw_metrics_status = [] + self.metadata = [] + + def create_metrics_status(self): + + self.build_implemented_metrics() + + self.diversity_inclusion_metrics = self.create_grouped_metrics(self.diversity_inclusion_urls, "diversity-inclusion") + self.growth_maturity_decline_metrics = self.create_grouped_metrics(self.growth_maturity_decline_urls, "growth-maturity-decline") + self.risk_metrics = self.create_grouped_metrics(self.risk_urls, "risk") + self.value_metrics = self.create_grouped_metrics(self.value_urls, "value") + + self.metrics_by_group = [self.diversity_inclusion_metrics, self.growth_maturity_decline_metrics, self.risk_metrics, self.value_metrics] + + self.activity_metrics = self.create_activity_metrics() + self.metrics_by_group.append(self.activity_metrics) + + self.create_experimental_metrics() + self.metrics_by_group.append(self.experimental_metrics) + + self.copy_implemented_metrics() + + self.find_defined_metrics() + + self.get_raw_metrics_status() + + self.get_metadata() + + def build_implemented_metrics(self): + frontend_status_extractor = FrontendStatusExtractor() + for metric in metric_metadata: + if "ID" in metric.keys(): + self.implemented_metrics.append(ImplementedMetric(metric, frontend_status_extractor)) + + def extract_grouped_metric_names(self, remote): + metric_file = requests.get(remote["raw_content_url"]).text + + regEx = r'^(?!Name)(.*[^-])(?:\ \|)' + if remote["has_links"] == True: + regEx = r'\[(.*?)\]\((?:.*?\.md)\)' + + return re.findall(regEx, metric_file, re.M) + + def create_grouped_metrics(self, remotes_list, group): + remote_names = [] + + for remote in remotes_list: + for name in self.extract_grouped_metric_names(remote): + remote_names.append(name) + + remote_metrics = [] + + for name in remote_names: + remote_metrics.append(GroupedMetric(name, group)) + + return remote_metrics + + def create_activity_metrics(self): + activity_metrics_raw_text = requests.get(self.activity_urls[0]["raw_content_url"]).text + + raw_activity_names = re.findall(r'\|(?:\[|)(.*)\|(?:\]|)(?:\S| )', activity_metrics_raw_text) + + activity_names = [re.sub(r'(?:\]\(.*\))', '', name) for name in raw_activity_names if '---' not in name and 'Name' not in name] + + activity_metrics = [] + + for raw_name in activity_names: + metric = GroupedMetric(raw_name, "activity") + + is_grouped_metric = True + for group in self.metrics_by_group: + if metric.tag not in [metric.tag for metric in group]: + is_grouped_metric = False + + if is_grouped_metric == False: + activity_metrics.append(metric) + + return activity_metrics + + def create_experimental_metrics(self): + tags = [] + for group in self.metrics_by_group: + for metric in group: + tags.append(metric.tag) + + self.experimental_metrics = [metric for metric in self.implemented_metrics if metric.tag not in tags] + + def copy_implemented_metrics(self): + # takes implemented metrics and copies their data to the appropriate metric object + # I'm sorry + implemented_metric_tags = [metric.tag for metric in self.implemented_metrics] + for group in self.metrics_by_group: + if group is not self.experimental_metrics: #experimental metrics don't need to be copied, since they don't have a definition + for grouped_metric in group: + if grouped_metric.tag in implemented_metric_tags: + metric = next(metric for metric in self.implemented_metrics if metric.tag == grouped_metric.tag) + for key in metric.__dict__.keys(): + if key != 'group': #don't copy the group over, since the metrics are already grouped + grouped_metric.__dict__[key] = metric.__dict__[key] + + def find_defined_metrics(self): + activity_files = self.__githubapi.get_repo(self.activity_repo).get_dir_contents("activity-metrics") + defined_tags = [re.sub(".md", '', file.name) for file in activity_files] + + for group in self.metrics_by_group: + for metric in group: + if metric.tag in defined_tags: + metric.is_defined = 'true' + metric.url = "https://github.com/{}/blob/wg-gmd/activity-metrics/{}.md".format(MetricsStatus.activity_repo, metric.tag) + + def get_raw_metrics_status(self): + for group in self.metrics_by_group: + for metric in group: + self.raw_metrics_status.append(metric.__dict__) + + def get_metadata(self): + self.get_metric_sources() + self.get_metric_types() + self.get_metric_tags() + + self.metadata = { + "remotes": { + "diversity_inclusion_urls": self.diversity_inclusion_urls, + "growth_maturity_decline_urls": self.growth_maturity_decline_urls, + "risk_urls": self.risk_urls, + "value_urls": self.value_urls, + "activity_repo_urls": self.activity_urls + }, + "groups": self.groups, + "sources": self.sources, + "metric_types": self.metric_types, + "tags": self.tags + } + + def get_metric_sources(self): + for source in [metric['source'] for metric in self.raw_metrics_status]: + source = source.lower() + if source not in self.sources and source != "none": + self.sources.append(source) + self.sources.append("all") + + def get_metric_types(self): + for metric_type in [metric['metric_type'] for metric in self.raw_metrics_status]: + metric_type = metric_type.lower() + if metric_type not in self.metric_types and metric_type != "none": + self.metric_types.append(metric_type) + self.metric_types.append("all") + + def get_metric_tags(self): + for tag in [(metric['tag'], metric['group']) for metric in self.raw_metrics_status]: + # tag[0] = tag[0].lower() + if tag[0] not in [tag[0] for tag in self.tags] and tag[0] != "none": + self.tags[tag[0]] = tag[1] diff --git a/augur/plugins/__init__.py b/augur/plugins/__init__.py new file mode 100644 --- /dev/null +++ b/augur/plugins/__init__.py @@ -0,0 +1,12 @@ +import pkgutil +import importlib + +__all__ = [] +loaded = [] + +__path__ = pkgutil.extend_path(__path__, __name__) +for importer, modname, ispkg in pkgutil.walk_packages(path=__path__, prefix=__name__+'.'): + if ispkg: + module = importlib.import_module(modname) + __all__.append(modname) + loaded.append(module) \ No newline at end of file diff --git a/augur/plugins/example_plugin/__init__.py b/augur/plugins/example_plugin/__init__.py new file mode 100644 --- /dev/null +++ b/augur/plugins/example_plugin/__init__.py @@ -0,0 +1,2 @@ +from .example_plugin import ExamplePlugin +__all__ = ['ExamplePlugin'] \ No newline at end of file diff --git a/augur/plugins/example_plugin/example_plugin.py b/augur/plugins/example_plugin/example_plugin.py new file mode 100644 --- /dev/null +++ b/augur/plugins/example_plugin/example_plugin.py @@ -0,0 +1,24 @@ +#SPDX-License-Identifier: MIT +from augur import AugurPlugin, Application, logger +# (don't remove the above line, it's for a script) + +class ExamplePlugin(AugurPlugin): + """ + This plugin serves as an example as to how to load plugins into Augur + """ + def __init__(self, app): + self.augur_app = app + logger.info('example-plugin loaded') + return + + def example_metric(self, owner, repo): + return 'Hello, {}/{}'.format(owner, repo) + + def add_routes(self, flask_app): + """ + Responsible for adding this plugin's data sources to the API + """ + flask_app.addMetric(self.example_metric, 'example_metric') + +ExamplePlugin.name = 'example-plugin' +Application.register_plugin(ExamplePlugin) \ No newline at end of file diff --git a/augur/publicwww.py b/augur/publicwww.py deleted file mode 100644 --- a/augur/publicwww.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -PublicWWW is a class for making API requests to https://publicwww.com/ a -search engine for the source of websites -""" -import sys -import pandas as pd -if sys.version_info > (3, 0): - import urllib.parse as url -else: - import urllib as url -# end imports -# (don't remove the above line, it's for a script) - -class PublicWWW(object): - """ - PublicWWW is a class for making API requests to https://publicwww.com/ a - search engine for the source of websites - """ - - def __init__(self, api_key): - """ - Initalizes a PublicWWW instance - - :param api_key: The API key for PublicWWW. This is required to get the full names of more results - """ - self.__api_key = api_key - - ##################################### - ### DIVERSITY AND INCLUSION ### - ##################################### - - - ##################################### - ### GROWTH, MATURITY, AND DECLINE ### - ##################################### - - - ##################################### - ### RISK ### - ##################################### - - - ##################################### - ### VALUE ### - ##################################### - - - ##################################### - ### ACTIVITY ### - ##################################### - - - ##################################### - ### EXPERIMENTAL ### - ##################################### - - def linking_websites(self, owner, repo): - """ - Finds the repo's popularity on the internet - - :param owner: The username of a project's owner - :param repo: The name of the repository - :return: DataFrame with the issues' id the date it was - opened, and the date it was first responded to - """ - - # Find websites that link to that repo - repo_url = "https://github.com/{owner}/{repo}".format(owner=owner, repo=repo) - query = '<a+href%3D"{repourl}"'.format(repourl=url.quote_plus(repo_url)) - req = 'https://publicwww.com/websites/{query}/?export=csv&apikey={apikey}' - req.format(query=query, apikey=self.__api_key) - result = pd.read_csv(req, delimiter=';', header=None, names=['url', 'rank']) - return result \ No newline at end of file diff --git a/augur/routes/__git_routes.py b/augur/routes/__git_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/__git_routes.py @@ -0,0 +1,86 @@ +from flask import Response + +def create_routes(server): + + git = server.augur_app.git() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ##s/closed# + ##################################### + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + # @server.app.route('/{}/git/repos'.format(server.api_version)) + # def git_downloaded_repos(): #TODO: make this name automatic - wrapper? + # drs = server.transform(git.downloaded_repos) + # return Response(response=drs, + # status=200, + # mimetype="application/json") + # server.updateMetricMetadata(function=git.downloaded_repos, endpoint='/{}/git/repos'.format(server.api_version), metric_type='git') + + """ + @api {get} /git/lines_changed_minus_whitespace/:git_repo_url Lines Changed Minus Whitespace + @apiName LinesChangedMinusWhitespace + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + @apiParam {String} git_repo_url URL of the GitHub repository + @apiSuccessExample {json} Success-Response: + [ + { + "additions":2, + "author_date":"2018-05-14 10:09:57 -0500", + "author_email":"[email protected]", + "author_name":"Sean P. Goggins", + "commit_date":"2018-05-16 10:12:22 -0500", + "committer_email":"[email protected]", + "committer_name":"Derek Howard", + "deletions":0, + "hash":"77e603a", + "message":"merge dev", + "parents":"b8ec0ed" + } + ] + """ + server.addGitMetric(git.lines_changed_minus_whitespace, 'lines_changed') + + """ + @api {get} /git/lines_changed_by_author/:git_repo_url Lines Changed by Author + @apiName LinesChangedByAuthor + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + @apiParam {String} git_repo_url URL of the GitHub repository + @apiSuccessExample {json} Success-Response: + [ + { + "additions":2, + "author_date":"2018-05-14 10:09:57 -0500", + "author_email":"[email protected]", + "author_name":"Sean P. Goggins", + "commit_date":"2018-05-16 10:12:22 -0500", + "committer_email":"[email protected]", + "committer_name":"Derek Howard", + "deletions":0,"hash":"77e603a", + "message":"merge dev", + "parents":"b8ec0ed" + } + ] + """ + server.addGitMetric(git.lines_changed_by_author, 'changes_by_author') diff --git a/augur/routes/__init__.py b/augur/routes/__init__.py new file mode 100644 --- /dev/null +++ b/augur/routes/__init__.py @@ -0,0 +1,24 @@ +import importlib +import os +import glob +from augur.util import getFileID + +def getRouteFiles(): + route_files = [] + + for filename in glob.iglob("**/routes/*"): + if not getFileID(filename).startswith('__'): + route_files.append(getFileID(filename)) + + return route_files + +route_files = getRouteFiles() + +def create_all_datasource_routes(server): + for route_file in route_files: + module = importlib.import_module('.' + route_file, 'augur.routes') + module.create_routes(server) + +def create_status_routes(server): + module = importlib.import_module('.__metric_status_routes', 'augur.routes') + module.create_routes(server) \ No newline at end of file diff --git a/augur/routes/__metric_status_routes.py b/augur/routes/__metric_status_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/__metric_status_routes.py @@ -0,0 +1,182 @@ +from flask import Response, request +from augur.metrics_status import Metric +import json + +def filterBy(status, key, value): + if value == "all" or value == '' or value is None: + return status + elif value is not None: + return [metric for metric in status if metric[key].lower() == value.lower()] + +def create_routes(server): + + metrics_status = server.augur_app.metrics_status() + metrics_status.create_metrics_status() + metrics_status_URL = "metrics/status" + + """ + @api {get} metrics/status Metrics Status + @apiName metrics-status + @apiGroup Metrics-Status + @apiDescription Information about the Augur implementation status of CHAOSS metrics. + + @apiSuccessExample {json} Success-Response: + [ + { + + "ID": "ghtorrent-fakes", + "tag": "fakes", + "name": "Fakes", + "group": "experimental", + "backend_status": "implemented", + "frontend_status": "implemented", + "endpoint": "/api/unstable/<owner>/<repo>/timeseries/fakes", + "source": "ghtorrent", + "metric_type": "timeseries", + "url": "/", + "is_defined": "false" + }, + { + "ID": "ghtorrentplus-closed-issue-resolution-duration", + "tag": "closed-issue-resolution-duration", + "name": "Closed Issue Resolution Duration", + "group": "experimental", + "backend_status": "implemented", + "frontend_status": "unimplemented", + "endpoint": "/api/unstable/<owner>/<repo>/issues/time_to_close", + "source": "ghtorrentplus", + "metric_type": "metric", + "url": "activity-metrics/closed-issue-resolution-duration.md", + "is_defined": "true" + }, + { + "ID": "githubapi-lines-of-code-changed", + "tag": "lines-of-code-changed", + "name": "Lines Of Code Changed", + "group": "experimental", + "backend_status": "implemented", + "frontend_status": "implemented", + "endpoint": "/api/unstable/<owner>/<repo>/timeseries/lines_changed", + "source": "githubapi", + "metric_type": "timeseries", + "url": "activity-metrics/lines-of-code-changed.md", + "is_defined": "true" + } + ] + """ + @server.app.route("/{}/{}".format(server.api_version, metrics_status_URL)) + def metrics_status_view(): + return Response(response=json.dumps(metrics_status.raw_metrics_status), + status=200, + mimetype="application/json") + + """ + @api {get} metrics/status/metadata Metrics Status Metadata + @apiName metrics-status-metadata + @apiGroup Metrics-Status + @apiDescription Metadata about the Augur implemntation status of CHAOSS metrics. + + @apiSuccessExample {json} Success-Response: + [ + { + "groups": [ + { + "diversity-inclusion": "Diversity and Inclusion", + "growth-maturity-decline": "Growth, Maturity, and Decline", + "risk": "Risk", + "value": "Value", + "activity": "Activity", + "experimental": "Experimental" + } + ], + "sources": [ + "ghtorrent", + "ghtorrentplus", + "githubapi", + "downloads", + "facade", + "publicwww", + "librariesio", + "git" + ], + "metric_types": [ + "timeseries", + "metric", + "git" + ], + "tags": { + "listening": "diversity-inclusion", + "speaking": "diversity-inclusion", + ... + } + } + ] + """ + @server.app.route("/{}/{}/metadata".format(server.api_version, metrics_status_URL)) + def metrics_status_metadata_view(): + return Response(response=json.dumps(metrics_status.metadata), + status=200, + mimetype="application/json") + + """ + @api {get} metrics/status/filter?ID=:ID&tag=:tag&group=:group&backend_status=:backend_status&frontend_status=:frontend_status&source=:source&metric_type=:metric_type&is_defined=:is_defined Filtered Metrics Status + @apiName filter-metrics-status + @apiGroup Metrics-Status + @apiDescription Metrics Status that allows for filtering of the results via the query string. Filters can be combined. + + @apiParam {string} [ID] Returns the status of the metric that matches this ID + @apiParam {string} [tag] Returns all the statuses of all metrics that have this tag + @apiParam {string} [group] Returns all the metrics in this metric grouping + @apiParam {string="unimplemented", "undefined", "implemented"} [backend_status] + @apiParam {string="unimplemented", "implemented"} [frontend_status] + @apiParam {string} [source] Returns the statuses of all metrics from this data source + @apiParam {string} [metric_type] Returns the statuses of the metrics of this metric type + @apiParam {string="true", "false"} [is_defined] Returns the statuses of metrics that are or aren't defined + + @apiParamExample {string} Sample Query String: + metrics/status/filter?group=growth-maturity-decline&metric_type=metric + + + @apiSuccessExample {json} Success-Response: + [ + { + "ID": "ghtorrentplus-closed-issue-resolution-duration", + "tag": "closed-issue-resolution-duration", + "name": "Closed Issue Resolution Duration", + "group": "growth-maturity-decline", + "backend_status": "implemented", + "frontend_status": "unimplemented", + "endpoint": "/api/unstable/<owner>/<repo>/issues/time_to_close", + "source": "ghtorrentplus", + "metric_type": "metric", + "url": "activity-metrics/closed-issue-resolution-duration.md", + "is_defined": "true" + }, + { + "ID": "ghtorrent-contributors", + "tag": "contributors", + "name": "Contributors", + "group": "growth-maturity-decline", + "backend_status": "implemented", + "frontend_status": "implemented", + "endpoint": "/api/unstable/<owner>/<repo>/contributors", + "source": "ghtorrent", + "metric_type": "metric", + "url": "activity-metrics/contributors.md", + "is_defined": "true" + } + ] + """ + @server.app.route("/{}/{}/filter".format(server.api_version, metrics_status_URL)) + def filtered_metrics_status_view(): + + filtered_metrics_status = metrics_status.raw_metrics_status + + valid_filters = [key for key in Metric().__dict__.keys() if key != 'name' and key != 'endpoint' and key != 'url'] + + for valid_filter in valid_filters: + filtered_metrics_status = filterBy(filtered_metrics_status, valid_filter, request.args.get(valid_filter)) + + return Response(response=json.dumps(filtered_metrics_status), + status=200, + mimetype="application/json") diff --git a/augur/routes/downloads_routes.py b/augur/routes/downloads_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/downloads_routes.py @@ -0,0 +1,51 @@ +def create_routes(server): + + downloads = server.augur_app.downloads() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + """ + @api {get} /:owner/:repo/timeseries/downloads Downloads + @apiName downloads + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2018-06-14", + "downloads": 129148 + }, + { + "date": "2018-06-13", + "downloads": 131262 + } + ] + """ + server.addTimeseries(downloads.downloads, 'downloads') + diff --git a/augur/routes/facade_routes.py b/augur/routes/facade_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/facade_routes.py @@ -0,0 +1,65 @@ +from flask import Response + +def create_routes(server): + + facade = server.augur_app.facade() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + @server.app.route('/{}/git/repos'.format(server.api_version)) + def facade_downloaded_repos(): #TODO: make this name automatic - wrapper? + drs = server.transform(facade.downloaded_repos) + return Response(response=drs, + status=200, + mimetype="application/json") + server.updateMetricMetadata(function=facade.downloaded_repos, endpoint='/{}/git/repos'.format(server.api_version), metric_type='git') + + """ + @api {get} /git/lines_changed/:git_repo_url Lines Changed by Author + @apiName lines-changed-by-author + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "additions":2, + "author_date":"2018-05-14 10:09:57 -0500", + "author_email":"[email protected]", + "author_name":"Sean P. Goggins", + "commit_date":"2018-05-16 10:12:22 -0500", + "committer_email":"[email protected]", + "committer_name":"Derek Howard", + "deletions":0,"hash":"77e603a", + "message":"merge dev", + "parents":"b8ec0ed" + } + ] + """ + server.addGitMetric(facade.lines_changed_by_author, 'changes_by_author') + diff --git a/augur/routes/ghtorrent_routes.py b/augur/routes/ghtorrent_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/ghtorrent_routes.py @@ -0,0 +1,803 @@ +from flask import request, Response + +def create_routes(server): + + ghtorrent = server.augur_app.ghtorrent() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + """ + @api {get} /:owner/:repo/timeseries/issues/closed Closed Issues + @apiName closed-issues + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/issues-closed.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + @apiParam {string} group_by (default to week) allows for results to be grouped by day, week, month, or year + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2011-03-19T00:00:00.000Z", + "issues_closed": 3 + }, + { + "date": "2011-03-25T00:00:00.000Z", + "issues_closed": 6 + } + ] + """ + server.addTimeseries(ghtorrent.closed_issues, 'issues/closed') + + """ + @api {get} /:owner/:repo/timeseries/commits?group_by=:group_by Code Commits + @apiName code-commits + @apiGroup Growth-Maturity-Decline + @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/code-commits.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + @apiParam {String} group_by (Default to week) Allows for results to be grouped by day, week, month, or year + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2017-08-27T00:00:00.000Z", + "commits": 44 + }, + { + "date": "2017-08-20T00:00:00.000Z", + "commits": 98 + } + ] + """ + server.addTimeseries(ghtorrent.code_commits, 'commits') + + """ + @api {get} /:owner/:repo/timeseries/code_review_iteration Code Review Iteration + @apiName code-review-iteration + @apiGroup Growth-Maturity-Decline + @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/code-review-iteration.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2012-05-16T00:00:00.000Z", + "iterations": 2 + }, + { + "date": "2012-05-16T00:00:00.000Z", + "iterations": 1 + } + ] + """ + server.addTimeseries(ghtorrent.code_review_iteration, 'code_review_iteration') + + """ + @api {get} /:owner/:repo/timeseries/contribution_acceptance Contribution Acceptance + @apiName contribution-acceptance + @apiGroup Growth-Maturity-Decline + @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/contribution-acceptance.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2012-05-16T00:00:00.000Z", + "ratio": 1.1579 + }, + { + "date": "2012-05-20T00:00:00.000Z", + "ratio": 1.3929 + } + ] + """ + server.addTimeseries(ghtorrent.contribution_acceptance, 'contribution_acceptance') + + """ + @api {get} /:owner/:repo/timeseries/contributing_github_organizations Contributing Github Organizations + @apiName contributing-github-organizations + @apiGroup Growth-Maturity-Decline + @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/contributing-organizations.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "contributing_org": 4066, + "commits": 36069, + "issues": 432, + "commit_comments": 1597, + "issue_comments": 15421, + "pull_requests": 808, + "pull_request_comments": 0, + "total": 54327, + "count": 35 + }, + { + "contributing_org": 16465, + "commits": 39111, + "issues": 332, + "commit_comments": 524, + "issue_comments": 3188, + "pull_requests": 57, + "pull_request_comments": 18, + "total": 43230, + "count": 11 + } + ] + """ + server.addMetric(ghtorrent.contributing_github_organizations, 'contributing_github_organizations') + + """ + @api {get} /:owner/:repo/timeseries/issues/response_time First Response To Issue Duration + @apiName first-response-to-issue-duration + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/first-response-to-issue-duration.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "id": 2, + "opened": "2012-01-19T05:24:55.000Z", + "first_commented": "2012-01-19T05:30:13.000Z", + "pull_request": 0, + "minutes_to_comment": 5 + }, + { + "id": 3, + "opened": "2012-01-26T15:07:56.000Z", + "first_commented": "2012-01-26T15:09:28.000Z", + "pull_request": 0, + "minutes_to_comment": 1 + } + ] + """ + server.addTimeseries(ghtorrent.first_response_to_issue_duration, 'issues/response_time') + + """ + @api {get} /:owner/:repo/timeseries/forks?group_by=:group_by Forks + @apiName forks + @apiGroup Growth-Maturity-Decline + @apiParam {String} group_by (Default to week) Allows for results to be grouped by day, week, month, or year + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/forks.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2017-08-20T00:00:00.000Z", + "projects": 48 + }, + { + "date": "2017-08-13T00:00:00.000Z", + "projects": 53 + } + ] + """ + server.addTimeseries(ghtorrent.forks, 'forks') + + """ + @api {get} /:owner/:repo/pulls/maintainer_response_time Maintainer Response to Merge Request Duration + @apiName maintainer-response-to-merge-request-duration + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/OSSHealth/metrics/blob/master/activity-metrics/maintainer-response-to-merge-request-duration.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2011-05-10T00:00:00.000Z", + "days": 32 + }, + { + "date": "2011-05-21T00:00:00.000Z", + "days": 3 + } + ] + """ + server.addTimeseries(ghtorrent.maintainer_response_to_merge_request_duration, 'pulls/maintainer_response_time') + + """ + @api {get} /:owner/:repo/pulls/new_contributing_github_organizations New Contributing Github Organizations + @apiName new-github-contributing-organizations + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/OSSHealth/metrics/blob/master/activity-metrics/new-contributing-organizations.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2008-04-12T23:43:38.000Z", + "organizations": 1 + }, + { + "date": "2008-08-23T15:05:52.000Z", + "organizations": 2 + } + ] + """ + server.addTimeseries(ghtorrent.new_contributing_github_organizations, 'new_contributing_github_organizations') + + """ + @api {get} /:owner/:repo/timeseries/issues Open Issues + @apiName open-issues + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/open-issues.md">CHAOSS Metric Definition</a> + + @apiParam {string} group_by (default to week) allows for results to be grouped by day, week, month, or year + @apiParam {string} owner username of the owner of the github repository + @apiParam {string} repo name of the github repository + + @apiSucessExample {json} success-response: + [ + { + "date": "2017-08-27T00:00:00.000Z", + "issues": 67 + }, + { + "date": "2017-08-20T00:00:00.000Z", + "issues": 100 + } + ] + """ + server.addTimeseries(ghtorrent.open_issues, 'issues') + + """ + @api {get} /:owner/:repo/timeseries/pulls/comments?group_by=:group_by Pull Request Comments + @apiName pull-request-comments + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/pull-request-comments.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2011-11-15T00:00:00.000Z", + "counter": 3 + }, + { + "date": "2011-11-25T00:00:00.000Z", + "counter": 1 + } + ] + + """ + server.addTimeseries(ghtorrent.pull_request_comments, 'pulls/comments') + + """ + @api {get} /:owner/:repo/timeseries/pulls Pull Requests Open + @apiName pull-requests-open + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/pull-requests-open.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2013-01-09T00:00:00.000Z", + "pull_requests": 3 + }, + { + "date": "2016-01-14T00:00:00.000Z", + "pull_requests": 1 + } + ] + """ + server.addTimeseries(ghtorrent.pull_requests_open, 'pulls') + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + """ + @api {get} /:owner/:repo/timeseries/issue_comments Issue Comments + @apiName issue-comments + @apiGroup Activity + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/issue-comments.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2009-04-05T00:00:00.000Z", + "counter": 3 + }, + { + "date": "2009-04-16T00:00:00.000Z", + "counter": 5 + } + ] + """ + server.addTimeseries(ghtorrent.issue_comments, 'issue_comments') + + """ + @api {get} /:owner/:repo/timeseries/pulls/made_closed Pull Requests Made/Closed + @apiName pull-requests-made-closed + @apiGroup Activity + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/pull-requests-made-closed.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2010-09-11T00:00:00.000Z", + "rate": 0.3333 + }, + { + "date": "2010-09-13T00:00:00.000Z", + "rate": 0.3333 + } + ] + """ + server.addTimeseries(ghtorrent.pull_requests_made_closed, 'pulls/made_closed') + + """ + @api {get} /:owner/:repo/timeseries/watchers Watchers + @apiName watchers + @apiGroup Activity + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/activity-metrics-list.md">CHAOSS Metric Definition</a> + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2017-08-23T00:00:00.000Z", + "watchers": 86 + }, + { + "date": "2017-08-16T00:00:00.000Z", + "watchers": 113 + } + ] + """ + server.addTimeseries(ghtorrent.watchers, 'watchers') + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + """ + @api {get} /:owner/:repo/timeseries/commits100 Commits100 + @apiName commits100 + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2017-08-13T00:00:00.000Z", + "commits": 114 + }, + { + "date": "2017-08-06T00:00:00.000Z", + "commits": 113 + } + ] + """ + server.addTimeseries(ghtorrent.commits100, 'commits100') + + """ + @api {get} /:owner/:repo/timeseries/commits/comments Commit Comments + @apiName commit-comments + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2008-07-10T00:00:00.000Z", + "counter": 2 + }, + { + "date": "2008-07-25T00:00:00.000Z", + "counter": 1 + } + ] + + """ + server.addTimeseries(ghtorrent.commit_comments, 'commits/comments') + + """ + @api {get} /:owner/:repo/committer_locations Committer Locations + @apiName committer-locations + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "login": "rafaelfranca", + "location": "São Paulo, Brazil", + "commits": 7171 + }, + { + "login": "tenderlove", + "location": "Seattle", + "commits": 4605 + } + ] + """ + server.addMetric(ghtorrent.committer_locations, 'committer_locations') + + """ + @api {get} /:owner/:repo/timeseries/total_committers Total Committers + @apiName total-committers + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2004-11-24T00:00:00.000Z", + "total_total_committers": 1 + }, + { + "date": "2005-02-18T00:00:00.000Z", + "total_total_committers": 2 + } + ] + """ + server.addTimeseries(ghtorrent.total_committers, 'total_committers') + + """ + @api {get} /:owner/:repo/timeseries/issues/activity Issue Activity + @apiName issue-activity + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "0000-00-00", + "count": 2, + "action": "closed" + }, + { + "date": "0000-00-00", + "count": 70, + "action": "opened" + }, + { + "date": "0000-00-00", + "count": 0, + "action": "reopened" + }, + { + "date": "0000-00-00", + "count": 68, + "action": "open" + }, + { + "date": "2009-04-01T00:00:00.000Z", + "count": 0, + "action": "closed" + }, + { + "date": "2009-04-01T00:00:00.000Z", + "count": 29, + "action": "opened" + }, + { + "date": "2009-04-01T00:00:00.000Z", + "count": 0, + "action": "reopened" + }, + { + "date": "2009-04-01T00:00:00.000Z", + "count": 29, + "action": "open" + } + ] + """ + server.addTimeseries(ghtorrent.issue_activity, 'issues/activity') + + """ + @api {get} /:owner/:repo/timeseries/pulls/acceptance_rate Pull Request Acceptance Rate + @apiDeprecated This endpoint was removed. Please use (#Experimental:community-engagement) + @apiName pull-request-acceptance-rate + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2010-09-11T00:00:00.000Z", + "rate": 0.3333 + }, + { + "date": "2010-09-13T00:00:00.000Z", + "rate": 0.3333 + } + ] + """ + + """ + @api {get} /:owner/:repo/community_age Community Age + @apiName community-age + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "login": "bonnie", + "location": "Rowena, TX", + "commits": 12 + }, + { + "login":"clyde", + "location":"Ellis County, TX", + "commits": 12 + } + ] + """ + server.addMetric(ghtorrent.community_age, 'community_age') + + """ + @api {get} /:owner/:repo/timeseries/community_engagement Community Engagement + @apiName community-engagement + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2011-04-16T00:00:00.000Z", + "issues_opened": 0, + "issues_closed": 0, + "pull_requests_opened": 32, + "pull_requests_merged": 0, + "pull_requests_closed": 19, + "issues_opened_total": 4, + "issues_closed_total": 0, + "issues_closed_rate_this_window": null, + "issues_closed_rate_total": 0, + "issues_delta": 0, + "issues_open": 4, + "pull_requests_opened_total": 284, + "pull_requests_closed_total": 242, + "pull_requests_closed_rate_this_window": 0.59375, + "pull_requests_closed_rate_total": 0.8521126761, + "pull_requests_delta": 13, + "pull_requests_open": 42 + }, + { + "date": "2011-04-17T00:00:00.000Z", + "issues_opened": 0, + "issues_closed": 0, + "pull_requests_opened": 15, + "pull_requests_merged": 1, + "pull_requests_closed": 14, + "issues_opened_total": 4, + "issues_closed_total": 0, + "issues_closed_rate_this_window": null, + "issues_closed_rate_total": 0, + "issues_delta": 0, + "issues_open": 4, + "pull_requests_opened_total": 299, + "pull_requests_closed_total": 256, + "pull_requests_closed_rate_this_window": 0.9333333333, + "pull_requests_closed_rate_total": 0.856187291, + "pull_requests_delta": 1, + "pull_requests_open": 43 + } + ] + """ + server.addTimeseries(ghtorrent.community_engagement, 'community_engagement') + + """ + @api {get} /:owner/:repo/contributors Total Contributions by User + @apiName contributors + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "user": 8153, + "commits": 6825, + "issues": 127, + "commit_comments": 313, + "issue_comments": 13152, + "pull_requests": 1, + "pull_request_comments": 0, + "total": 20418 + }, + { + "user": 45381, + "commits": 2192, + "issues": 202, + "commit_comments": 130, + "issue_comments": 4633, + "pull_requests": 0, + "pull_request_comments": 0, + "total": 7157 + } + ] + """ + server.addMetric(ghtorrent.contributors, 'contributors') + + """ + @api {get} /:owner/:repo/timeseries/contributions Contributions + @apiName contributions + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + @apiParam (String) user Limit results to the given user's contributions + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2004-11-24T00:00:00.000Z", + "commits": 3, + "pull_requests": null, + "issues": null, + "commit_comments": null, + "pull_request_comments": null, + "issue_comments": null, + "total": null + }, + { + "date": "2004-11-30T00:00:00.000Z", + "commits": 7, + "pull_requests": null, + "issues": null, + "commit_comments": null, + "pull_request_comments": null, + "issue_comments": null, + "total": null + } + ] + """ + # ghtorrent.contributons, 'contributors' + # don't remove the above line it's for a script + @server.app.route('/{}/<owner>/<repo>/contributions'.format(server.api_version)) + def contributions(owner, repo): + repoid = ghtorrent.repoid(owner, repo) + user = request.args.get('user') + transformed_contributors = server.transform(ghtorrent.contributions, args=(owner, repo), orient=request.args.get('orient')) + return Response(response=transformed_contributors, + status=200, + mimetype="application/json") + server.updateMetricMetadata(ghtorrent.contributions, '/api/unstable/<owner>/<repo>/timeseries/contributions') + + """ + @api {get} /:owner/:repo/project_age Project Age + @apiName project-age + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2008-04-11T00:00:00.000Z", + "{0}": 1 + } + ] + + """ + server.addMetric(ghtorrent.project_age, 'project_age') + + """ + @api {get} /:owner/:repo/timeseries/fakes Fakes + @apiName fakes + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2010-04-09T00:00:00.000Z", + "fakes": 1 + }, + { + "date": "2010-04-27T00:00:00.000Z", + "fakes": 2 + } + ] + """ + server.addTimeseries(ghtorrent.fakes, 'fakes') + + """ + @api {get} /ghtorrent_range GHTorrent Date Range + @apiName GhtorrentRange + @apiGroup Utility + @apiDescription Utility endpoint to show the range of dates GHTorrent covers. + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2008-04-10T17:25:06-07:00", + "release": "v0.9.1" + }, + { + "date": "2008-04-10T17:25:07-07:00", + "release": "v0.9.2" + } + ] + """ + @server.app.route('/{}/ghtorrent_range'.format(server.api_version)) + + def ghtorrent_range(): + ghr = server.transform(ghtorrent.ghtorrent_range()) + return Response(response=ghr, + status=200, + mimetype="application/json") + # server.updateMetricMetadata(ghtorrent.ghtorrent_range, '/{}/ghtorrent_range'.format(server.api_version)) diff --git a/augur/routes/ghtorrentplus_routes.py b/augur/routes/ghtorrentplus_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/ghtorrentplus_routes.py @@ -0,0 +1,50 @@ +def create_routes(server): + + ghtorrentplus = server.augur_app.ghtorrentplus() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + """ + @api {get} /:owner/:repo/issue_close_time Closed Issue Resolution Duration + @apiName closed-issue-resolution-duration + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/issue-resolution-duration.md">CHAOSS Metric Definition</a> + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + @apiSuccessExample {json} Success-Response: + [ + { + "id": 2, + "date": "2012-01-19T05:24:55.000Z", + "days_to_close": 7 + }, + { + "id": 3, + "date": "2012-01-26T15:07:56.000Z", + "days_to_close": 0 + } + ] + """ + server.addMetric(ghtorrentplus.closed_issue_resolution_duration, 'issues/time_to_close') + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + ##################################### + ### EXPERIMENTAL ### + ##################################### diff --git a/augur/routes/githubapi_routes.py b/augur/routes/githubapi_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/githubapi_routes.py @@ -0,0 +1,117 @@ +def create_routes(server): + + github = server.augur_app.githubapi() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + """ + @api {get} /:owner/:repo/timeseries/lines_changed Lines of Code Changed + @apiName lines-of-code-changed + @apiGroup Growth-Maturity-Decline + @apiDescription <a href="https://github.com/OSSHealth/metrics/blob/master/activity-metrics/lines-of-code-changed.md">CHAOSS Metric Definition</a> + + @apiGroup Growth-Maturity-Decline + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + 'date': '2015-11-01T00:00:00Z', + 'lines_changed': 396137.0 + }, + { + 'date': '2015-11-08T00:00:00Z', + 'lines_changed': 3896.0 + } + ] + """ + server.addTimeseries(github.lines_of_code_changed, 'lines_changed') + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + """ + @api {get} /:owner/:repo/bus_factor Bus Factor + @apiName bus-factor + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + @apiParam {Int} threshold Percentage used to determine how many lost people would kill the project + + @apiSuccessExample {json} Success-Response: + [ + { + "best": "5", + "worst": "1" + } + ] + """ + server.addMetric(github.bus_factor, "bus_factor") + + """ + @api {get} /:owner/:repo/timeseries/tags/major Major Tags + @apiName major-tags + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2008-04-10T17:25:14-07:00", + "release": "v1.0.0" + }, + { + "date": "2008-04-10T17:25:47-07:00", + "release": "v2.0.0" + } + ] + """ + server.addTimeseries(github.major_tags, 'tags/major') + + """ + @api {get} /:owner/:repo/timeseries/tags/major Tages + @apiName tags + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2008-04-10T17:25:06-07:00", + "release": "v0.9.1" + }, + { + "date": "2008-04-10T17:25:07-07:00", + "release": "v0.9.2" + } + ] + """ + server.addTimeseries(github.tags, 'tags') \ No newline at end of file diff --git a/augur/routes/librariesio_routes.py b/augur/routes/librariesio_routes.py new file mode 100644 --- /dev/null +++ b/augur/routes/librariesio_routes.py @@ -0,0 +1,196 @@ +def create_routes(server): + + librariesio = server.augur_app.librariesio() + + ##################################### + ### DIVERSITY AND INCLUSION ### + ##################################### + + ##################################### + ### GROWTH, MATURITY, AND DECLINE ### + ##################################### + + ##################################### + ### RISK ### + ##################################### + + ##################################### + ### VALUE ### + ##################################### + + ##################################### + ### ACTIVITY ### + ##################################### + + ##################################### + ### EXPERIMENTAL ### + ##################################### + + """ + @api {get} /:owner/:repo/dependencies Dependencies + @apiName dependencies + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + "full_name": "rails/rails", + "description": "Ruby on Rails", + "fork": false, + "created_at": "2008-04-11T02:19:47.000Z", + "updated_at": "2018-05-08T14:18:07.000Z", + "pushed_at": "2018-05-08T11:38:30.000Z", + "homepage": "http://rubyonrails.org", + "size": 163747, + "stargazers_count": 39549, + "language": "Ruby", + "has_issues": true, + "has_wiki": false, + "has_pages": false, + "forks_count": 16008, + "mirror_url": null, + "open_issues_count": 1079, + "default_branch": "master", + "subscribers_count": 2618, + "uuid": "8514", + "source_name": null, + "license": "MIT", + "private": false, + "contributions_count": 2627, + "has_readme": "README.md", + "has_changelog": null, + "has_contributing": "CONTRIBUTING.md", + "has_license": "MIT-LICENSE", + "has_coc": "CODE_OF_CONDUCT.md", + "has_threat_model": null, + "has_audit": null, + "status": null, + "last_synced_at": "2018-03-31T12:40:28.163Z", + "rank": 28, + "host_type": "GitHub", + "host_domain": null, + "name": null, + "scm": "git", + "fork_policy": null, + "github_id": "8514", + "pull_requests_enabled": null, + "logo_url": null, + "github_contributions_count": 2627, + "keywords": [ + "activejob", + "activerecord", + "framework", + "html", + "mvc", + "rails", + "ruby" + ], + "dependencies": [ + { + "project_name": "blade-sauce_labs_plugin", + "name": "blade-sauce_labs_plugin", + "platform": "rubygems", + "requirements": "0.7.2", + "latest_stable": "0.7.3", + "latest": "0.7.3", + "deprecated": false, + "outdated": true, + "filepath": "Gemfile.lock", + "kind": "runtime" + }, + { + "project_name": "blade-qunit_adapter", + "name": "blade-qunit_adapter", + "platform": "rubygems", + "requirements": "2.0.1", + "latest_stable": "2.0.1", + "latest": "2.0.1", + "deprecated": false, + "outdated": false, + "filepath": "Gemfile.lock", + "kind": "runtime" + } + ] + """ + server.addMetric(librariesio.dependencies, 'dependencies') + + """ + @api {get} /:owner/:repo/dependency_stats Dependency Stats + @apiName dependency-stats + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "dependencies": "10", + "dependent_projects": "10.6K", + "dependent_repositories": "392K" + } + ] + """ + server.addMetric(librariesio.dependency_stats, 'dependency_stats') + + """ + @api {get} /:owner/:repo/dependents Dependents + @apiName dependents + @apiGroup Experimental + @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. + + @apiParam {String} owner Username of the owner of the GitHub repository + @apiParam {String} repo Name of the GitHub repository + + @apiSuccessExample {json} Success-Response: + [ + { + "name": "rspec-rails", + "platform": "Rubygems", + "description": "rspec-rails is a testing framework for Rails 3+.", + "homepage": "https://github.com/rspec/rspec-rails", + "repository_url": "https://github.com/rspec/rspec-rails", + "normalized_licenses": [ + "MIT" + ], + "rank": 26, + "latest_release_published_at": "2017-11-20T09:27:22.144Z", + "latest_release_number": "3.7.2", + "language": "Ruby", + "status": null, + "package_manager_url": "https://rubygems.org/gems/rspec-rails", + "stars": 3666, + "forks": 732, + "keywords": [], + "latest_stable_release": { + "id": 11315605, + "project_id": 245284, + "number": "3.7.2", + "published_at": "2017-11-20T09:27:22.144Z", + "created_at": "2017-11-20T09:31:11.532Z", + "updated_at": "2017-11-20T09:31:11.532Z", + "runtime_dependencies_count": 7 + }, + "latest_download_url": "https://rubygems.org/downloads/rspec-rails-3.7.2.gem", + "dependents_count": 4116, + "dependent_repos_count": 129847, + "versions": [ + { + "number": "2.12.2", + "published_at": "2013-01-12T18:56:40.027Z" + }, + { + "number": "2.12.1", + "published_at": "2013-01-07T23:04:53.104Z" + }, + { + "number": "2.12.0", + "published_at": "2012-11-13T03:37:01.354Z" + } + ] + """ + server.addMetric(librariesio.dependents, 'dependents') diff --git a/augur/runtime.py b/augur/runtime.py --- a/augur/runtime.py +++ b/augur/runtime.py @@ -1,3 +1,8 @@ +#SPDX-License-Identifier: MIT +""" +Runs Augur with Gunicorn when called +""" + import multiprocessing as mp import sched import os @@ -10,11 +15,14 @@ from gunicorn.six import iteritems from gunicorn.arbiter import Arbiter + + class AugurGunicornApp(gunicorn.app.base.BaseApplication): def __init__(self, options=None): self.options = options or {} super(AugurGunicornApp, self).__init__() + # self.cfg.pre_request.set(pre_request) def load_config(self): config = dict([(key, value) for key, value in iteritems(self.options) @@ -27,6 +35,7 @@ def load(self): return server.app def run(): + mp.set_start_method('forkserver') app = augur.Application() app.arg_parser.add_argument("-u", "--updater", action="store_true", @@ -47,7 +56,6 @@ def exit(): os._exit(0) - if not args.updater: host = app.read_config('Server', 'host', 'AUGUR_HOST', '0.0.0.0') port = app.read_config('Server', 'port', 'AUGUR_PORT', '5000') @@ -55,7 +63,8 @@ def exit(): options = { 'bind': '%s:%s' % (host, port), 'workers': workers, - 'accesslog': '-' + 'accesslog': '-', + 'access_log_format': '%(h)s - %(t)s - %(r)s', } logger.info('Starting server...') master = Arbiter(AugurGunicornApp(options)).run() @@ -67,5 +76,4 @@ def exit(): exit() if __name__ == '__main__': - mp.set_start_method('forkserver') run() diff --git a/augur/server.py b/augur/server.py --- a/augur/server.py +++ b/augur/server.py @@ -1,27 +1,26 @@ -#spdx-license-identifier: mit +#SPDX-License-Identifier: MIT +""" +Creates a WSGI server that serves the Augur REST API +""" import os import sys import json +import re +import html from flask import Flask, request, Response, send_from_directory from flask_cors import CORS import pandas as pd import augur -from augur.util import annotate, metrics - -sys.path.append('..') +from augur.util import annotate, metric_metadata, logger +from augur.routes import create_all_datasource_routes, create_status_routes AUGUR_API_VERSION = 'api/unstable' -''' -make a try and accept condition -if its open the GH_DATA_CONFIG_FILE and then its open in read mode -and if the file does't open the it print Couldn\'t open config file, attempting to create. -''' - class Server(object): def __init__(self): # Create Flask application self.app = Flask(__name__) + self.api_version = AUGUR_API_VERSION app = self.app CORS(app) @@ -34,1228 +33,137 @@ def __init__(self): self.cache = augur_app.cache.get_cache('server', expire=expire) self.cache.clear() - # Initalize all of the classes - ghtorrent = augur_app.ghtorrent() - ghtorrentplus = augur_app.ghtorrentplus() - publicwww = augur_app.publicwww() - git = augur_app.git() - github = augur_app.githubapi() - librariesio = augur_app.librariesio() - downloads = augur_app.downloads() - localcsv = augur_app.localcsv() + self.show_metadata = False + create_all_datasource_routes(self) + + # this needs to be the last route creation function called so that all the metrics have their metadata updated + create_status_routes(self) ##################################### - ### API STATUS ### + ### UTILITY ### ##################################### - @app.route('/{}/'.format(AUGUR_API_VERSION)) + @app.route('/{}/'.format(self.api_version)) def status(): status = { - 'status': 'OK', - 'avaliable_metrics': metrics + 'status': 'OK' } - json = self.transform(status) - return Response(response=json, + return Response(response=json.dumps(status), status=200, mimetype="application/json") - - ##################################### - ### DIVERSITY AND INCLUSION ### - ##################################### - - - ##################################### - ### GROWTH, MATURITY, AND DECLINE ### - ##################################### - - """ - @api {get} /:owner/:repo/timeseries/issues/closed Closed Issues - @apiName ClosedIssues - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/issues-closed.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - @apiParam {string} group_by (default to week) allows for results to be grouped by day, week, month, or year - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2011-03-19T00:00:00.000Z", - "issues_closed": 3 - }, - { - "date": "2011-03-25T00:00:00.000Z", - "issues_closed": 6 - } - ] - """ - self.addTimeseries(ghtorrent.closed_issues, "issues/closed") - - """ - @api {get} /:owner/:repo/issue_close_time Issue Resolution Duration - @apiName IssueResolutionDuration - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/issue-resolution-duration.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "id": 2, - "date": "2012-01-19T05:24:55.000Z", - "days_to_close": 7 - }, - { - "id": 3, - "date": "2012-01-26T15:07:56.000Z", - "days_to_close": 0 - } - ] - """ - self.addMetric(ghtorrentplus.closed_issue_resolution_duration, 'issues/time_to_close') - - """ - @api {get} /:owner/:repo/timeseries/commits?group_by=:group_by Code Commits - @apiName CodeCommits - @apiGroup Growth-Maturity-Decline - @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/code-commits.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - @apiParam {String} group_by (Default to week) Allows for results to be grouped by day, week, month, or year - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2017-08-27T00:00:00.000Z", - "commits": 44 - }, - { - "date": "2017-08-20T00:00:00.000Z", - "commits": 98 - } - ] - """ - self.addTimeseries(ghtorrent.code_commits, 'commits') - - # self.addTimeseries(github.code_reviews, 'code_reviews') - - """ - @api {get} /:owner/:repo/timeseries/code_review_iteration Code Review Iteration - @apiName CodeReviewIteration - @apiGroup Growth-Maturity-Decline - @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/code-review-iteration.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2012-05-16T00:00:00.000Z", - "iterations": 2 - }, - { - "date": "2012-05-16T00:00:00.000Z", - "iterations": 1 - } - ] - """ - self.addTimeseries(ghtorrent.code_review_iteration, 'code_review_iteration') - - """ - @api {get} /:owner/:repo/timeseries/contribution_acceptance Contribution Acceptance - @apiName ContributionAcceptance - @apiGroup Growth-Maturity-Decline - @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/contribution-acceptance.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2012-05-16T00:00:00.000Z", - "ratio": 1.1579 - }, - { - "date": "2012-05-20T00:00:00.000Z", - "ratio": 1.3929 - } - ] - """ - self.addTimeseries(ghtorrent.contribution_acceptance, 'contribution_acceptance') - - """ - @api {get} /:owner/:repo/timeseries/contributing_github_organizations Contributing Github Organizations - @apiName ContributingGithubOrganizations - @apiGroup Growth-Maturity-Decline - @apiDescription/github.<a href="com/chaoss/metrics/blob/master/activity-metrics/contributing-organizations.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "contributing_org": 4066, - "commits": 36069, - "issues": 432, - "commit_comments": 1597, - "issue_comments": 15421, - "pull_requests": 808, - "pull_request_comments": 0, - "total": 54327, - "count": 35 - }, - { - "contributing_org": 16465, - "commits": 39111, - "issues": 332, - "commit_comments": 524, - "issue_comments": 3188, - "pull_requests": 57, - "pull_request_comments": 18, - "total": 43230, - "count": 11 - } - ] - """ - self.addMetric(ghtorrent.contributing_github_organizations, 'contributing_github_organizations') - - """ - @api {get} /:owner/:repo/timeseries/issues/response_time First Response To Issue Duration - @apiName FirstResponseToIssueDuration - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/first-response-to-issue-duration.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "id": 2, - "opened": "2012-01-19T05:24:55.000Z", - "first_commented": "2012-01-19T05:30:13.000Z", - "pull_request": 0, - "minutes_to_comment": 5 - }, - { - "id": 3, - "opened": "2012-01-26T15:07:56.000Z", - "first_commented": "2012-01-26T15:09:28.000Z", - "pull_request": 0, - "minutes_to_comment": 1 - } - ] - """ - self.addTimeseries(ghtorrent.first_response_to_issue_duration, 'issues/response_time') - - """ - @api {get} /:owner/:repo/timeseries/forks?group_by=:group_by Forks - @apiName Forks - @apiGroup Growth-Maturity-Decline - @apiParam {String} group_by (Default to week) Allows for results to be grouped by day, week, month, or year - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/forks.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2017-08-20T00:00:00.000Z", - "projects": 48 - }, - { - "date": "2017-08-13T00:00:00.000Z", - "projects": 53 - } - ] - """ - self.addTimeseries(ghtorrent.forks, 'forks') - - """ - @api {get} /:owner/:repo/timeseries/lines_changed Lines of Code Changed - @apiName LinesOfCodeChanged - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/OSSHealth/metrics/blob/master/activity-metrics/lines-of-code-changed.md">CHAOSS Metric Definition</a> - - @apiGroup Growth-Maturity-Decline - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - 'date': '2015-11-01T00:00:00Z', - 'lines_changed': 396137.0 - }, - { - 'date': '2015-11-08T00:00:00Z', - 'lines_changed': 3896.0 - } - ] - """ - self.addTimeseries(github.lines_of_code_changed, 'lines_changed') - - """ - @api {get} /:owner/:repo/pulls/maintainer_response_time Maintainer to Merge Request Duration - @apiName MaintainerToMergeRequestDuration - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/OSSHealth/metrics/blob/master/activity-metrics/maintainer-response-to-merge-request-duration.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2011-05-10T00:00:00.000Z", - "days": 32 - }, - { - "date": "2011-05-21T00:00:00.000Z", - "days": 3 - } - ] - """ - self.addTimeseries(ghtorrent.maintainer_response_to_merge_request_duration, 'pulls/maintainer_response_time') - - """ - @api {get} /:owner/:repo/pulls/new_contributing_github_organizations New Contributing Github Organizations - @apiName NewContributingGithubOrganizations - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/OSSHealth/metrics/blob/master/activity-metrics/new-contributing-organizations.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2008-04-12T23:43:38.000Z", - "organizations": 1 - }, - { - "date": "2008-08-23T15:05:52.000Z", - "organizations": 2 - } - ] - """ - self.addTimeseries(ghtorrent.new_contributing_github_organizations, 'new_contributing_github_organizations') - - """ - @api {get} /:owner/:repo/timeseries/issues Open Issues - @apiName OpenIssues - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/open-issues.md">CHAOSS Metric Definition</a> - - @apiParam {string} group_by (default to week) allows for results to be grouped by day, week, month, or year - @apiParam {string} owner username of the owner of the github repository - @apiParam {string} repo name of the github repository - - @apiSucessExample {json} success-response: - [ - { - "date": "2017-08-27T00:00:00.000Z", - "issues": 67 - }, - { - "date": "2017-08-20T00:00:00.000Z", - "issues": 100 - } - ] - """ - self.addTimeseries(ghtorrent.open_issues, 'issues') - - """ - @api {get} /:owner/:repo/timeseries/pulls/comments?group_by=:group_by Pull Request Comments - @apiName PullRequestComments - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/pull-request-comments.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2011-11-15T00:00:00.000Z", - "counter": 3 - }, - { - "date": "2011-11-25T00:00:00.000Z", - "counter": 1 - } - ] - """ - self.addTimeseries(ghtorrent.pull_request_comments, 'pulls/comments') - - """ - @api {get} /:owner/:repo/timeseries/pulls Pull Requests Open - @apiName PullRequestsOpen - @apiGroup Growth-Maturity-Decline - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/pull-requests-open.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2013-01-09T00:00:00.000Z", - "pull_requests": 3 - }, - { - "date": "2016-01-14T00:00:00.000Z", - "pull_requests": 1 - } - ] + @api {post} /batch Batch Requests + @apiName Batch + @apiGroup Batch + @apiDescription Returns results of batch requests + POST JSON of api requests """ - self.addTimeseries(ghtorrent.pull_requests_open, 'pulls') - - - ##################################### - ### RISK ### - ##################################### + @app.route('/{}/batch'.format(self.api_version), methods=['GET', 'POST']) + def batch(): + """ + Execute multiple requests, submitted as a batch. + :statuscode 207: Multi status + """ + """ + to have on future batch request for each individual chart: - ##################################### - ### VALUE ### - ##################################### + - timeseries/metric + - props that are in current card files (title) + - do any of these things act like the vuex states? + - what would singular card(dashboard) look like now? - ##################################### - ### ACTIVITY ### - ##################################### - """ - @api {get} /:owner/:repo/timeseries/issue_comments Issue Comments - @apiName IssueComments - @apiGroup Activity - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/issue-comments.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2009-04-05T00:00:00.000Z", - "counter": 3 - }, - { - "date": "2009-04-16T00:00:00.000Z", - "counter": 5 - } - ] - """ - self.addTimeseries(ghtorrent.issue_comments, 'issue/comments') + """ - """ - @api {get} /:owner/:repo/watchers Watchers - @apiName Watchers - @apiGroup Activity - @apiDescription <a href="https://github.com/chaoss/metrics/blob/master/activity-metrics/activity-metrics-list.md">CHAOSS Metric Definition</a> - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2017-08-23T00:00:00.000Z", - "watchers": 86 - }, - { - "date": "2017-08-16T00:00:00.000Z", - "watchers": 113 - } - ] - """ - self.addMetric(ghtorrent.watchers, 'watchers') + self.show_metadata = False - ##################################### - ### EXPERIMENTAL ### - ##################################### + if request.method == 'GET': + """this will return sensible defaults in the future""" + return app.make_response('{"status": "501", "response": "Defaults for batch requests not implemented. Please POST a JSON array of requests to this endpoint for now."}') - ### COMMIT RELATED ### - """ - @api {get} /:owner/:repo/timeseries/commits100 Commits100 - @apiName Commits100 - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2017-08-13T00:00:00.000Z", - "commits": 114 - }, - { - "date": "2017-08-06T00:00:00.000Z", - "commits": 113 - } - ] - """ - self.addTimeseries(ghtorrent.commits100, 'commits100') + try: + requests = json.loads(request.data) + except ValueError as e: + request.abort(400) - """ - @api {get} /:owner/:repo/timeseries/commits/comments Commit Comments - @apiName CommitComments - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2008-07-10T00:00:00.000Z", - "counter": 2 - }, - { - "date": "2008-07-25T00:00:00.000Z", - "counter": 1 - } - ] + responses = [] - """ - self.addTimeseries(ghtorrent.commit_comments, 'commits/comments') + for index, req in enumerate(requests): - """ - @api {get} /:owner/:repo/committer_locations Committer Locations - @apiName CommitterLocations - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "login": "rafaelfranca", - "location": "São Paulo, Brazil", - "commits": 7171 - }, - { - "login": "tenderlove", - "location": "Seattle", - "commits": 4605 - } - ] - """ - self.addMetric(ghtorrent.committer_locations, 'committer_locations') - """ - @api {get} /:owner/:repo/timeseries/total_committers Total Committers - @apiName TotalCommitters - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2004-11-24T00:00:00.000Z", - "total_total_committers": 1 - }, - { - "date": "2005-02-18T00:00:00.000Z", - "total_total_committers": 2 - } - ] - """ - self.addTimeseries(ghtorrent.total_committers, 'total_committers') + method = req['method'] + path = req['path'] + body = req.get('body', None) - ### ISSUE RELATED ### - """ - @api {get} /:owner/:repo/timeseries/issues/activity Issue Activity - @apiName IssueActivity - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "0000-00-00", - "count": 2, - "action": "closed" - }, - { - "date": "0000-00-00", - "count": 70, - "action": "opened" - }, - { - "date": "0000-00-00", - "count": 0, - "action": "reopened" - }, - { - "date": "0000-00-00", - "count": 68, - "action": "open" - }, - { - "date": "2009-04-01T00:00:00.000Z", - "count": 0, - "action": "closed" - }, - { - "date": "2009-04-01T00:00:00.000Z", - "count": 29, - "action": "opened" - }, - { - "date": "2009-04-01T00:00:00.000Z", - "count": 0, - "action": "reopened" - }, - { - "date": "2009-04-01T00:00:00.000Z", - "count": 29, - "action": "open" - } - ] - """ - self.addTimeseries(ghtorrent.issue_activity, 'issues/activity') + try: - # PULL REQUEST RELATED - """ - @api {get} /:owner/:repo/timeseries/pulls/acceptance_rate Pull Request Acceptance Rate - @apiName PullRequestAcceptanceRate - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2010-09-11T00:00:00.000Z", - "rate": 0.3333 - }, - { - "date": "2010-09-13T00:00:00.000Z", - "rate": 0.3333 - } - ] - """ - self.addTimeseries(ghtorrent.pull_request_acceptance_rate, 'pulls/acceptance_rate') + logger.debug('batch-internal-loop: %s %s' % (method, path)) - # COMMUNITY / CONTRIBUTIONS - """ - @api {get} /:owner/:repo/timeseries/community_age Community Age - @apiName CommunityAge - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "login": "bonnie", - "location": "Rowena, TX", - "commits": 12 - }, - { - "login":"clyde", - "location":"Ellis County, TX", - "commits": 12 - } - ] - """ - self.addMetric(ghtorrent.community_age, 'community_age') + with app.app_context(): + with app.test_request_context(path, + method=method, + data=body): + try: + # Can modify flask.g here without affecting + # flask.g of the root request for the batch - """ - @api {get} /:owner/:repo/timeseries/community_engagement Community Engagement - @apiName CommunityEngagement - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2011-04-16T00:00:00.000Z", - "issues_opened": 0, - "issues_closed": 0, - "pull_requests_opened": 32, - "pull_requests_merged": 0, - "pull_requests_closed": 19, - "issues_opened_total": 4, - "issues_closed_total": 0, - "issues_closed_rate_this_window": null, - "issues_closed_rate_total": 0, - "issues_delta": 0, - "issues_open": 4, - "pull_requests_opened_total": 284, - "pull_requests_closed_total": 242, - "pull_requests_closed_rate_this_window": 0.59375, - "pull_requests_closed_rate_total": 0.8521126761, - "pull_requests_delta": 13, - "pull_requests_open": 42 - }, - { - "date": "2011-04-17T00:00:00.000Z", - "issues_opened": 0, - "issues_closed": 0, - "pull_requests_opened": 15, - "pull_requests_merged": 1, - "pull_requests_closed": 14, - "issues_opened_total": 4, - "issues_closed_total": 0, - "issues_closed_rate_this_window": null, - "issues_closed_rate_total": 0, - "issues_delta": 0, - "issues_open": 4, - "pull_requests_opened_total": 299, - "pull_requests_closed_total": 256, - "pull_requests_closed_rate_this_window": 0.9333333333, - "pull_requests_closed_rate_total": 0.856187291, - "pull_requests_delta": 1, - "pull_requests_open": 43 - } - ] - """ - self.addTimeseries(ghtorrent.community_engagement, 'community_engagement') + # Pre process Request + rv = app.preprocess_request() - """ - @api {get} /:owner/:repo/contributors Total Contributions by User - @apiName TotalContributions - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "user": 8153, - "commits": 6825, - "issues": 127, - "commit_comments": 313, - "issue_comments": 13152, - "pull_requests": 1, - "pull_request_comments": 0, - "total": 20418 - }, - { - "user": 45381, - "commits": 2192, - "issues": 202, - "commit_comments": 130, - "issue_comments": 4633, - "pull_requests": 0, - "pull_request_comments": 0, - "total": 7157 - } - ] - """ - self.addMetric(ghtorrent.contributors, 'contributors') + if rv is None: + # Main Dispatch + rv = app.dispatch_request() - """ - @api {get} /:owner/:repo/timeseries/contributions Contributions - @apiName ContributionsByWeek - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - @apiParam (String) user Limit results to the given user's contributions - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2004-11-24T00:00:00.000Z", - "commits": 3, - "pull_requests": null, - "issues": null, - "commit_comments": null, - "pull_request_comments": null, - "issue_comments": null, - "total": null - }, - { - "date": "2004-11-30T00:00:00.000Z", - "commits": 7, - "pull_requests": null, - "issues": null, - "commit_comments": null, - "pull_request_comments": null, - "issue_comments": null, - "total": null - } - ] - """ - @app.route('/{}/<owner>/<repo>/contributions'.format(AUGUR_API_VERSION)) - def contributions(owner, repo): - repoid = ghtorrent.repoid(owner, repo) - user = request.args.get('user') - contribs = ghtorrent.contributions(owner, repo) - transformed_contributors = self.transform(contribs, orient=request.args.get('orient')) - return Response(response=transformed_contributors, - status=200, - mimetype="application/json") + except Exception as e: + rv = app.handle_user_exception(e) - """ - @api {get} /:owner/:repo/timeseries/project_age Project Age - @apiName ProjectAge - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2008-04-11T00:00:00.000Z", - "{0}": 1 - } - ] - - """ - self.addMetric(ghtorrent.project_age, 'project_age') + response = app.make_response(rv) - ### DEPENDENCY RELATED ### - """ - @api {get} /:owner/:repo/dependencies Dependencies - @apiName Dependencies - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - "full_name": "rails/rails", - "description": "Ruby on Rails", - "fork": false, - "created_at": "2008-04-11T02:19:47.000Z", - "updated_at": "2018-05-08T14:18:07.000Z", - "pushed_at": "2018-05-08T11:38:30.000Z", - "homepage": "http://rubyonrails.org", - "size": 163747, - "stargazers_count": 39549, - "language": "Ruby", - "has_issues": true, - "has_wiki": false, - "has_pages": false, - "forks_count": 16008, - "mirror_url": null, - "open_issues_count": 1079, - "default_branch": "master", - "subscribers_count": 2618, - "uuid": "8514", - "source_name": null, - "license": "MIT", - "private": false, - "contributions_count": 2627, - "has_readme": "README.md", - "has_changelog": null, - "has_contributing": "CONTRIBUTING.md", - "has_license": "MIT-LICENSE", - "has_coc": "CODE_OF_CONDUCT.md", - "has_threat_model": null, - "has_audit": null, - "status": null, - "last_synced_at": "2018-03-31T12:40:28.163Z", - "rank": 28, - "host_type": "GitHub", - "host_domain": null, - "name": null, - "scm": "git", - "fork_policy": null, - "github_id": "8514", - "pull_requests_enabled": null, - "logo_url": null, - "github_contributions_count": 2627, - "keywords": [ - "activejob", - "activerecord", - "framework", - "html", - "mvc", - "rails", - "ruby" - ], - "dependencies": [ - { - "project_name": "blade-sauce_labs_plugin", - "name": "blade-sauce_labs_plugin", - "platform": "rubygems", - "requirements": "0.7.2", - "latest_stable": "0.7.3", - "latest": "0.7.3", - "deprecated": false, - "outdated": true, - "filepath": "Gemfile.lock", - "kind": "runtime" - }, - { - "project_name": "blade-qunit_adapter", - "name": "blade-qunit_adapter", - "platform": "rubygems", - "requirements": "2.0.1", - "latest_stable": "2.0.1", - "latest": "2.0.1", - "deprecated": false, - "outdated": false, - "filepath": "Gemfile.lock", - "kind": "runtime" - } - ] - """ - self.addMetric(librariesio.dependencies, 'dependencies') - - """ - @api {get} /:owner/:repo/dependency_stats Dependency Stats - @apiName DependencyStats - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "dependencies": "10", - "dependent_projects": "10.6K", - "dependent_repositories": "392K" - } - ] - """ - self.addMetric(librariesio.dependency_stats, 'dependency_stats') + # Post process Request + response = app.process_response(response) - """ - @api {get} /:owner/:repo/dependents Dependents - @apiName Dependents - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "name": "rspec-rails", - "platform": "Rubygems", - "description": "rspec-rails is a testing framework for Rails 3+.", - "homepage": "https://github.com/rspec/rspec-rails", - "repository_url": "https://github.com/rspec/rspec-rails", - "normalized_licenses": [ - "MIT" - ], - "rank": 26, - "latest_release_published_at": "2017-11-20T09:27:22.144Z", - "latest_release_number": "3.7.2", - "language": "Ruby", - "status": null, - "package_manager_url": "https://rubygems.org/gems/rspec-rails", - "stars": 3666, - "forks": 732, - "keywords": [], - "latest_stable_release": { - "id": 11315605, - "project_id": 245284, - "number": "3.7.2", - "published_at": "2017-11-20T09:27:22.144Z", - "created_at": "2017-11-20T09:31:11.532Z", - "updated_at": "2017-11-20T09:31:11.532Z", - "runtime_dependencies_count": 7 - }, - "latest_download_url": "https://rubygems.org/downloads/rspec-rails-3.7.2.gem", - "dependents_count": 4116, - "dependent_repos_count": 129847, - "versions": [ - { - "number": "2.12.2", - "published_at": "2013-01-12T18:56:40.027Z" - }, - { - "number": "2.12.1", - "published_at": "2013-01-07T23:04:53.104Z" - }, - { - "number": "2.12.0", - "published_at": "2012-11-13T03:37:01.354Z" - } - ] - """ - self.addMetric(librariesio.dependents, 'dependents') + # Response is a Flask response object. + # _read_response(response) reads response.response + # and returns a string. If your endpoints return JSON object, + # this string would be the response as a JSON string. + responses.append({ + "path": path, + "status": response.status_code, + "response": str(response.get_data(), 'utf8'), + }) - ### OTHER ### - """ - @api {get} /:owner/:repo/bus_factor Bus Factor - @apiName BusFactor - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "best": "5", - "worst": "1" - } - ] - """ - self.addMetric(github.bus_factor, "bus_factor") + except Exception as e: - """ - @api {get} /git/lines_changed/:git_repo_url Lines Changed by Author - @apiName ChangesByAuthor - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "additions":2, - "author_date":"2018-05-14 10:09:57 -0500", - "author_email":"[email protected]", - "author_name":"Sean P. Goggins", - "commit_date":"2018-05-16 10:12:22 -0500", - "committer_email":"[email protected]", - "committer_name":"Derek Howard", - "deletions":0,"hash":"77e603a", - "message":"merge dev", - "parents":"b8ec0ed" - } - ] - """ - self.addGitMetric(git.changes_by_author, 'changes_by_author') + responses.append({ + "path": path, + "status": 500, + "response": str(e) + }) - """ - @api {get} /:owner/:repo/timeseries/downloads Downloads - @apiName Downloads - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2018-06-14", - "downloads": 129148 - }, - { - "date": "2018-06-13", - "downloads": 131262 - } - ] - """ - self.addTimeseries(downloads.downloads, 'downloads') - @app.route('/{}/git/repos'.format(AUGUR_API_VERSION)) - def downloaded_repos(): - drs = self.transform(git.downloaded_repos()) - return Response(response=drs, - status=200, + return Response(response=json.dumps(responses), + status=207, mimetype="application/json") - """ - @api {get} /:owner/:repo/timeseries/fakes Fakes - @apiName Fakes - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2010-04-09T00:00:00.000Z", - "fakes": 1 - }, - { - "date": "2010-04-27T00:00:00.000Z", - "fakes": 2 - } - ] - """ - self.addTimeseries(ghtorrent.fakes, 'fakes') - - """ - @api {get} /git/lines_changed/:git_repo_url Lines Changed (minus whitespace) - @apiName LinesChanged - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "additions":2, - "author_date":"2018-05-14 10:09:57 -0500", - "author_email":"[email protected]", - "author_name":"Sean P. Goggins", - "commit_date":"2018-05-16 10:12:22 -0500", - "committer_email":"[email protected]", - "committer_name":"Derek Howard", - "deletions":0, - "hash":"77e603a", - "message":"merge dev", - "parents":"b8ec0ed" - } - ] - """ - self.addGitMetric(git.lines_changed_minus_whitespace, 'lines_changed') - - """ - @api {get} /:owner/:repo/linking_websites Linking Websites - @apiName LinkingWebsites - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "url": "missouri.edu", - "rank": "1" - }, - { - "url": "unomaha.edu", - "rank": "2" - } - ] - """ - self.addMetric(publicwww.linking_websites, 'linking_websites') - - """ - @api {get} /:owner/:repo/timeseries/tags/major Major Tags - @apiName MajorTags - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2008-04-10T17:25:14-07:00", - "release": "v1.0.0" - }, - { - "date": "2008-04-10T17:25:47-07:00", - "release": "v2.0.0" - } - ] - """ - self.addTimeseries(github.major_tags, 'tags/major') - - """ - @api {get} /:owner/:repo/timeseries/tags/major Tages - @apiName Tags - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. - - @apiParam {String} owner Username of the owner of the GitHub repository - @apiParam {String} repo Name of the GitHub repository - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2008-04-10T17:25:06-07:00", - "release": "v0.9.1" - }, - { - "date": "2008-04-10T17:25:07-07:00", - "release": "v0.9.2" - } - ] - """ - self.addTimeseries(github.tags, 'tags') """ - @api {get} /ghtorrent_range GHTorrent Date Range - @apiName GhtorrentRange - @apiGroup Utility - @apiDescription Utility endpoint to show the range of dates GHTorrent covers. - - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2008-04-10T17:25:06-07:00", - "release": "v0.9.1" - }, - { - "date": "2008-04-10T17:25:07-07:00", - "release": "v0.9.2" - } - ] + @api {post} /batch Batch Request Metadata + @apiName BatchMetadata + @apiGroup Batch + @apiDescription Returns metadata of batch requests + POST JSON of API requests metadata """ - @app.route('/{}/ghtorrent_range'.format(AUGUR_API_VERSION)) - - def ghtorrent_range(): - ghtorrent_range = self.transform(ghtorrent.ghtorrent_range()) - return Response(response=ghtorrent_range, - status=200, - mimetype="application/json") + @app.route('/{}/batch/metadata'.format(self.api_version), methods=['GET', 'POST']) + def batch_metadata(): - ####################### - # Batch Requests # - ####################### + self.show_metadata = True - """ - @api {post} /batch Batch Requests - @apiName Batch - @apiGroup Batch - @apiDescription Returns results of batch requests - POST JSON of api requests - """ - #TODO: documentation - @app.route('/{}/batch'.format(AUGUR_API_VERSION), methods=['GET', 'POST']) - def batch(): - """ - Execute multiple requests, submitted as a batch. - :statuscode 207: Multi status - """ if request.method == 'GET': """this will return sensible defaults in the future""" - return app.make_response('{"status": "501", "response": "Defaults for batch requests not implemented. Please POST a JSON array of requests to this endpoint for now."}') + return app.make_response(json.dumps(metric_metadata)) try: requests = json.loads(request.data) @@ -1266,13 +174,14 @@ def batch(): for index, req in enumerate(requests): - method = req['method'] path = req['path'] body = req.get('body', None) try: + augur.logger.info('batch endpoint: ' + path) + with app.app_context(): with app.test_request_context(path, method=method, @@ -1300,10 +209,11 @@ def batch(): # _read_response(response) reads response.response # and returns a string. If your endpoints return JSON object, # this string would be the response as a JSON string. + responses.append({ "path": path, "status": response.status_code, - "response": str(response.get_data(), 'utf8') + "response": str(response.get_data(), 'utf8'), }) except Exception as e: @@ -1314,15 +224,14 @@ def batch(): "response": str(e) }) + self.show_metadata = False return Response(response=json.dumps(responses), status=207, mimetype="application/json") - augur_app.finalize_config() - - def transform(self, data, orient='records', + def transform(self, func, args=None, kwargs=None, orient='records', group_by=None, on=None, aggregate='sum', resample=None, date_col='date'): if orient is None: @@ -1330,20 +239,31 @@ def transform(self, data, orient='records', result = '' - if hasattr(data, 'to_json'): - if group_by is not None: - data = data.group_by(group_by).aggregate(aggregate) - if resample is not None: - data['idx'] = pd.to_datetime(data[date_col]) - data = data.set_index('idx') - data = data.resample(resample).aggregate(aggregate) - data['date'] = data.index - result = data.to_json(orient=orient, date_format='iso', date_unit='ms') + if not self.show_metadata: + + if not args and not kwargs: + data = func() + elif args and not kwargs: + data = func(*args) + else: + data = func(*args, **kwargs) + + if hasattr(data, 'to_json'): + if group_by is not None: + data = data.group_by(group_by).aggregate(aggregate) + if resample is not None: + data['idx'] = pd.to_datetime(data[date_col]) + data = data.set_index('idx') + data = data.resample(resample).aggregate(aggregate) + data['date'] = data.index + result = data.to_json(orient=orient, date_format='iso', date_unit='ms') + else: + try: + result = json.dumps(data) + except: + result = data else: - try: - result = json.dumps(data) - except: - result = data + result = json.dumps(func.metadata) return result @@ -1355,33 +275,31 @@ def flaskify(self, func, cache=True): if cache: def generated_function(*args, **kwargs): def heavy_lifting(): - return self.transform(func(*args, **kwargs), **request.args.to_dict()) + return self.transform(func, args, kwargs, **request.args.to_dict()) body = self.cache.get(key=str(request.url), createfunc=heavy_lifting) return Response(response=body, status=200, mimetype="application/json") - generated_function.__name__ = func.__name__ + generated_function.__name__ = func.__self__.__class__.__name__ + " _" + func.__name__ return generated_function else: def generated_function(*args, **kwargs): kwargs.update(request.args.to_dict()) - return Response(response=self.transform(func(*args, **kwargs)), + return Response(response=self.transform(func, args, kwargs, **request.args.to_dict()), status=200, mimetype="application/json") - generated_function.__name__ = func.__name__ + generated_function.__name__ = func.__self__.__class__.__name__ + " _" + func.__name__ return generated_function def addMetric(self, function, endpoint, cache=True, **kwargs): """Simplifies adding routes that only accept owner/repo""" - endpoint = '/{}/<owner>/<repo>/{}'.format(AUGUR_API_VERSION, endpoint) + endpoint = '/{}/<owner>/<repo>/{}'.format(self.api_version, endpoint) self.app.route(endpoint)(self.flaskify(function, cache=cache)) self.updateMetricMetadata(function, endpoint, **kwargs) - - def addGitMetric(self, function, endpoint, cache=True): """Simplifies adding routes that accept""" - endpoint = '/{}/git/{}/<path:repo_url>/'.format(AUGUR_API_VERSION, endpoint) + endpoint = '/{}/git/{}/<path:repo_url>/'.format(self.api_version, endpoint) self.app.route(endpoint)(self.flaskify(function, cache=cache)) self.updateMetricMetadata(function, endpoint=endpoint, metric_type='git') @@ -1400,7 +318,7 @@ def updateMetricMetadata(self, function, endpoint, **kwargs): # Get the unbound function from the bound function's class so that we can modify metadata # across instances of that class. real_func = getattr(function.__self__.__class__, function.__name__) - annotate(endpoint=endpoint, source=function.__self__.__class__.__name__, **kwargs)(real_func) + annotate(endpoint=endpoint, **kwargs)(real_func) def run(): server = Server() diff --git a/augur/util.py b/augur/util.py --- a/augur/util.py +++ b/augur/util.py @@ -1,6 +1,11 @@ #SPDX-License-Identifier: MIT +""" +Provides shared functions that do not fit in a class of their own +""" import pandas as pd import os +import re +import json import logging import coloredlogs import beaker @@ -12,6 +17,8 @@ # end imports # (don't remove the above line, it's for a script) +def getFileID(path): + return os.path.splitext(os.path.basename(path))[0] __ROOT = os.path.abspath(os.path.dirname(__file__)) def get_data_path(path): @@ -31,7 +38,7 @@ def get_cache(namespace, cache_manager=None): cache_manager = __memory_cache return cache_manager.get_cache(namespace) -metrics = [] +metric_metadata = [] def annotate(metadata=None, **kwargs): """ Decorate a function as being a metric @@ -41,8 +48,13 @@ def annotate(metadata=None, **kwargs): def decorate(func): if not hasattr(func, 'metadata'): func.metadata = {} - metrics.append(func.metadata) + metric_metadata.append(func.metadata) func.metadata.update(metadata) func.metadata.update(dict(kwargs)) + + func.metadata['metric_name'] = re.sub('_', ' ', func.__name__).title() + func.metadata['source'] = re.sub(r'(.*\.)', '', func.__module__) + func.metadata['ID'] = "{}-{}".format(func.metadata['source'].lower(), func.metadata['tag']) + return func - return decorate \ No newline at end of file + return decorate diff --git a/docs/metrics/status.py b/docs/metrics/status.py deleted file mode 100644 --- a/docs/metrics/status.py +++ /dev/null @@ -1,85 +0,0 @@ -import os -import re -import json -import glob -import webbrowser -from flask import Flask, request, Response - -metric_files = ['upstream/1_Diversity-Inclusion.md', 'upstream/2_Growth-Maturity-Decline.md', 'upstream/3_Risk.md', 'upstream/4_Value.md'] - -metric_type_by_file = { - 'upstream/1_Diversity-Inclusion.md': 'Diversity and Inclusion', - 'upstream/2_Growth-Maturity-Decline.md': 'Growth, Maturity, and Decline', - 'upstream/3_Risk.md': 'Risk', - 'upstream/4_Value.md': 'Value', -} - -color_by_status = { - 'unimplemented': '<span style="color: #C00">unimplemented</span>', - 'in_progress': '<span style="color: #CC0">in progress</span>', - 'implemented': '<span style="color: #0C0">implemented</span>' -} - -statusMap = json.loads(open('status.json', 'r').read()) -statusHTML = """ -<html> -<head> - <title>Augur Metrics Status</title> - <style> - td { padding: 5px } - </style> -</head> -<body> - <h1>Augur Metrics Status</h1> - -""" - -def getFileID(path): - return os.path.splitext(os.path.basename(path))[0] - -def printMetric(title, path): - global statusHTML - status = 'unimplemented' - fileID = getFileID(path) - if fileID in statusMap: - status = statusMap[fileID] - if status != 'printed': - statusHTML += '<tr><td>{}</td><td><a href="https://github.com/chaoss/wg-gmd/tree/master/{}"> {} ({})</td></tr>'.format(color_by_status[status], path, title, fileID) - statusMap[fileID] = 'printed' - return fileID - -# Iterate through the category Markdown files to categorize links -for filename in metric_files: - file = open(filename, 'r') - matches = re.findall(r'\[(.*?)\]\((.*?\.md)\)', file.read()) - if len(matches) > 0: - statusHTML += '<h2>' + metric_type_by_file[filename] + '</h2><table><tr><td>status</td><td>metric</td></tr>' - for match in matches: - printMetric(match[0], match[1]) - statusHTML += '</table>' - - -# Iterate through the files in activity-metrics to find uncategorized metrics -statusHTML += '<h2>Uncategorized</h2><table><tr><td>status</td><td>metric</td></tr>' -for filename in glob.iglob('upstream/activity-metrics/*.md'): - printMetric(getFileID(filename).replace('-', ' ').title(), 'activity-metrics/' + getFileID(filename) + '.md') - - -statusHTML += """ - </table> -</body> -</html> -""" - -app = Flask(__name__) - [email protected]("/") -def root(): - return statusHTML - -def run(): - webbrowser.open_new_tab('http://localhost:5001/') - app.run(port=5001) - -if __name__ == "__main__": - run() diff --git a/docs/python/source/conf.py b/docs/python/source/conf.py old mode 100644 new mode 100755 --- a/docs/python/source/conf.py +++ b/docs/python/source/conf.py @@ -32,6 +32,7 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', + 'sphinx.ext.autosummary', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', @@ -85,7 +86,7 @@ # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True - +html_sidebars = { '**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'] } # -- Options for HTML output ---------------------------------------------- @@ -164,6 +165,8 @@ ] +autosummary_generate = True + # Example configuration for intersphinx: refer to the Python standard library. diff --git a/plugins/example-plugin/plugin.py b/plugins/example-plugin/plugin.py deleted file mode 100644 --- a/plugins/example-plugin/plugin.py +++ /dev/null @@ -1,26 +0,0 @@ -#SPDX-License-Identifier: MIT -from augur import register_plugin, logger -from augur.server import addMetric -# (don't remove the above line, it's for a script) - -class ExamplePlugin(object): - """ - This plugin serves as an example as to how to load plugins into Augur - """ - def __init__(self): - logger.info('example-plugin loaded') - return - - def example_metric(self, owner, repo): - return [] - - -def add_routes(app, instance): - """ - Responsible for adding this plugin's data sources to the API - """ - addMetric(app, instance.example_metric, 'example-metric') - - - -register_plugin(ExamplePlugin, 'example-plugin', routes='routes.py') \ No newline at end of file diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -26,8 +26,7 @@ url='https://github.com/OSSHealth/augur', author='Derek Howard', author_email='[email protected]', - packages=['augur'], - package_dir={'augur': 'augur'}, + packages=['augur', 'augur.plugins', 'augur.routes'], license='MIT', classifiers=[ 'Development Status :: 1 - Planning', @@ -40,8 +39,7 @@ install_requires=[ 'cython', 'protobuf', 'ipdb', 'setuptools-git', 'beautifulsoup4', 'flask', 'flask-cors', 'PyMySQL', 'requests', 'python-dateutil', 'sqlalchemy', 'pandas', 'pytest', 'PyGithub', 'GitPython', - 'pyevent', 'gunicorn', 'datetime', 'traitlets', 'coloredlogs', 'tldextract', 'python-daemon', 'beaker', - 'lockfile'], + 'gunicorn', 'traitlets', 'coloredlogs', 'tldextract', 'beaker', 'lockfile'], extras_require={ 'dev': ['check-manifest'], 'test': ['coverage'],
diff --git a/docs/testing.md b/docs/testing.md old mode 100644 new mode 100755 --- a/docs/testing.md +++ b/docs/testing.md @@ -10,3 +10,6 @@ If you don't have both Python 2 and 3, you can run the tests individually - Python 2: `python2 -m pytest` - Python 3: `python3 -m pytest` + +To test the Augur API, run `make test-api`. +- You will need to add a Postman API key to your `augur.config.json`. diff --git a/notebooks/Python Function Testing.ipynb b/notebooks/Python Function Testing.ipynb new file mode 100644 --- /dev/null +++ b/notebooks/Python Function Testing.ipynb @@ -0,0 +1,300 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Python Function Testing\n", + "\n", + "This notebook is for testing all the Python functions. Each cell is a data source class; feel free to experiment to your heart's content." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import augur\n", + "\n", + "# import everything that githubapi.py imports so we can just copy and paste our function later\n", + "augur_app = augur.Application('../augur.config.json')" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import sqlalchemy as s\n", + "import numpy as np\n", + "import re\n", + "from augur import logger\n", + "from augur.util import annotate\n", + "\n", + "ghtorrent = augur_app.ghtorrent()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "# ghtorrent.closed_issues(owner, repo)\n", + "# ghtorrent.code_commits(owner, repo)\n", + "# ghtorrent.code_review_iteration(owner, repo)\n", + "# ghtorrent.contribution_acceptance(owner, repo)\n", + "# ghtorrent.contributing_github_organizations(owner, repo)\n", + "# ghtorrent.first_response_to_issue_duration(owner, repo)\n", + "# ghtorrent.forks(owner, repo)\n", + "# ghtorrent.maintainer_response_to_merge_request_duration(owner, repo)\n", + "# ghtorrent.new_contributing_github_organizations(owner, repo)\n", + "# ghtorrent.open_issues(owner, repo)\n", + "# ghtorrent.pull_request_comments(owner, repo)\n", + "# ghtorrent.pull_requests_open(owner, repo)\n", + "# ghtorrent.issue_comments(owner, repo)\n", + "# ghtorrent.watchers(owner, repo)\n", + "# ghtorrent.commits100(owner, repo)\n", + "# ghtorrent.commit_comments(owner, repo)\n", + "# ghtorrent.committer_locations(owner, repo)\n", + "# ghtorrent.total_committers(owner, repo)\n", + "# ghtorrent.issue_activity(owner, repo)\n", + "# ghtorrent.pull_request_acceptance_rate(owner, repo)\n", + "# ghtorrent.community_age(owner, repo)\n", + "# ghtorrent.community_engagement(owner, repo)\n", + "# ghtorrent.contributors(owner, repo)\n", + "# ghtorrent.contributions(owner, repo)\n", + "# ghtorrent.classify_contributors(owner, repo)\n", + "# ghtorrent.project_age(owner, repo)\n", + "# ghtorrent.fakes(owner, repo)\n", + "# ghtorrent.ghtorrent_range(owner, repo)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import sqlalchemy as s\n", + "import numpy as np\n", + "import re\n", + "from augur import logger\n", + "\n", + "ghtorrentplus = augur_app.ghtorrentplus()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "# ghtorrentplus.closed_issue_resolution_duration(owner, repo)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import sys\n", + "import pandas as pd\n", + "if sys.version_info > (3, 0):\n", + " import urllib.parse as url\n", + "else:\n", + " import urllib as url\n", + "\n", + "publicwww = augur_app.publicwww()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "# publicwww.linking_websites(owner, repo)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "ename": "NoSuchPathError", + "evalue": "/Users/carterlandis/Documents/Code/augur/runtime/git_repos/repos/rails/repo", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNoSuchPathError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m<ipython-input-28-45c4dcdad33f>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;31m# git.update()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0;31m# git.downloaded_repos()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mgit\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlines_changed_minus_whitespace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"https://github.com/rails/rails\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 17\u001b[0m \u001b[0;31m# git.changes_by_author(\"https://github.com/rails/rails\")\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/Documents/Code/augur/augur/git.py\u001b[0m in \u001b[0;36mlines_changed_minus_whitespace\u001b[0;34m(self, repo_url, from_commit, df, rebuild_cache)\u001b[0m\n\u001b[1;32m 209\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'deletions'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdeletions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 210\u001b[0m \u001b[0mframes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindex\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'hash'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 211\u001b[0;31m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 212\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 213\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mframes\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/beaker/cache.py\u001b[0m in \u001b[0;36mget\u001b[0;34m(self, key, **kw)\u001b[0m\n\u001b[1;32m 320\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 321\u001b[0m \u001b[0;34m\"\"\"Retrieve a cached value from the container\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 322\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 323\u001b[0m \u001b[0mget_value\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 324\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/beaker/container.py\u001b[0m in \u001b[0;36mget_value\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 378\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 379\u001b[0m \u001b[0mdebug\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"get_value creating new value\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 380\u001b[0;31m \u001b[0mv\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcreatefunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 381\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mv\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 382\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mv\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/Documents/Code/augur/augur/git.py\u001b[0m in \u001b[0;36mheavy_lifting\u001b[0;34m()\u001b[0m\n\u001b[1;32m 162\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mlines_changed_minus_whitespace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrepo_url\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfrom_commit\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdf\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrebuild_cache\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 164\u001b[0;31m \"\"\"\n\u001b[0m\u001b[1;32m 165\u001b[0m \u001b[0mMakes\u001b[0m \u001b[0msure\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mstorageFolder\u001b[0m \u001b[0mcontains\u001b[0m \u001b[0mupdated\u001b[0m \u001b[0mversions\u001b[0m \u001b[0mof\u001b[0m \u001b[0mall\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mrepos\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 166\u001b[0m \"\"\"\n", + "\u001b[0;32m~/Documents/Code/augur/augur/git.py\u001b[0m in \u001b[0;36mgit\u001b[0;34m(self, is_updater)\u001b[0m\n\u001b[1;32m 74\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 75\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__git\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mis_updater\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 76\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__git\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgit\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mRepo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 77\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__git\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 78\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/git/repo/base.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, path, odbt, search_parent_directories, expand_vars)\u001b[0m\n\u001b[1;32m 122\u001b[0m \u001b[0mepath\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mexpand_path\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexpand_vars\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 123\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexists\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 124\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mNoSuchPathError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 125\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 126\u001b[0m \u001b[0;31m## Walk up the path to find the `.git` dir.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mNoSuchPathError\u001b[0m: /Users/carterlandis/Documents/Code/augur/runtime/git_repos/repos/rails/repo" + ] + } + ], + "source": [ + "import os\n", + "import shutil\n", + "import re\n", + "import json\n", + "import datetime\n", + "import pandas as pd\n", + "import git\n", + "from lockfile import LockFile, AlreadyLocked\n", + "from augur.util import logger, get_cache\n", + "\n", + "git = augur_app.git()\n", + "\n", + "# git.get_repo(\"https://github.com/rails/rails\")\n", + "# git.update()\n", + "# git.downloaded_repos()\n", + "# git.lines_changed_minus_whitespace(\"https://github.com/rails/rails\")\n", + "# git.changes_by_author(\"https://github.com/rails/rails\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "'str' object has no attribute 'GITHUB_API_KEY'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m<ipython-input-4-e86e635d5d49>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;31m# return the dataframe\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 44\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mdf\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 45\u001b[0;31m \u001b[0mlines_of_code_changed\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mowner\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrepo\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m<ipython-input-4-e86e635d5d49>\u001b[0m in \u001b[0;36mlines_of_code_changed\u001b[0;34m(self, owner, repo)\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0;31m# see <project_root>/augur/githubapi.py for examples using the GraphQL API\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 33\u001b[0m \u001b[0murl\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"https://api.github.com/repos/{}/{}/stats/code_frequency\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mowner\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrepo\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 34\u001b[0;31m \u001b[0mjson\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mrequests\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mauth\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'user'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGITHUB_API_KEY\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjson\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 35\u001b[0m \u001b[0;31m# get our data into a dataframe\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0mdf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mjson\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'date'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'additions'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'deletions'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mAttributeError\u001b[0m: 'str' object has no attribute 'GITHUB_API_KEY'" + ] + } + ], + "source": [ + "from augur.localcsv import LocalCSV\n", + "import json\n", + "import re\n", + "from dateutil.parser import parse\n", + "import pandas as pd\n", + "import github\n", + "import numpy as np\n", + "import datetime\n", + "import requests\n", + "from augur import logger\n", + "\n", + "github = augur_app.githubapi()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "github.lines_of_code_changed(owner, repo)\n", + "# github.bus_factor(owner, repo)\n", + "# github.major_tags(owner, repo)\n", + "# github.tags(owner, repo)\n", + "# github.contributors_gender(owner, repo)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import requests\n", + "import pandas as pd\n", + "import numpy as np\n", + "from bs4 import BeautifulSoup\n", + "from augur import logger\n", + "\n", + "librariesio = augur_app.librariesio()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "# librariesio.dependencies(owner, repo)\n", + "# librariesio.dependency_stats(owner, repo)\n", + "# librariesio.dependents(owner, repo)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import json\n", + "import pandas as pd\n", + "import requests\n", + "import datetime\n", + "import base64\n", + "from augur import logger\n", + "\n", + "downloads = augur_app.downloads()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "# downloads.downloads(owner, repo)\n", + "# downloads.ruby_downloads(owner)\n", + "# downloads.npm_downloads(owner, repo)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import tldextract\n", + "from urllib.parse import urlparse\n", + "from .util import get_data_path\n", + "\n", + "localcsv = augur_app.localcsv()\n", + "owner='rails'\n", + "repo='rails'\n", + "\n", + "localcsv.classify_emails(self, email_series)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python (augur)", + "language": "python", + "name": "augur" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/test.ipynb b/notebooks/test.ipynb --- a/notebooks/test.ipynb +++ b/notebooks/test.ipynb @@ -2,10 +2,8 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": true - }, + "execution_count": 9, + "metadata": {}, "outputs": [], "source": [ "import augur\n", @@ -23,15 +21,13 @@ }, { "cell_type": "code", - "execution_count": 2, - "metadata": { - "collapsed": true - }, + "execution_count": 5, + "metadata": {}, "outputs": [], "source": [ - "augurApp = augur.Application('../augur.cfg')\n", + "augurApp = augur.Application('../augur.config.json')\n", "# we only need an instance of the GitHubAPI class\n", - "github = augurApp.github()" + "github = augurApp.githubapi()" ] }, { @@ -656,6 +652,20 @@ "bus_factor(stan, \"rails\", \"rails\")" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "code", "execution_count": null, @@ -668,9 +678,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python (augur)", "language": "python", - "name": "python3" + "name": "augur" }, "language_info": { "codemirror_mode": { diff --git a/test/Augur.postman_collection.json b/test/Augur.postman_collection.json new file mode 100644 --- /dev/null +++ b/test/Augur.postman_collection.json @@ -0,0 +1,1244 @@ +{ + "info": { + "_postman_id": "ec950b0b-a5e9-4fe3-b1a5-ad4f49c209f9", + "name": "Augur", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" + }, + "item": [ + { + "name": "ghtorrent", + "item": [ + { + "name": "issues/closed", + "event": [ + { + "listen": "test", + "script": { + "id": "3596106b-7311-4099-b5fd-e5c1e40f5799", + "type": "text/javascript", + "exec": [ + "" + ] + } + } + ], + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/issues/closed", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "issues", + "closed" + ] + } + }, + "response": [] + }, + { + "name": "commits", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/commits", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "commits" + ] + } + }, + "response": [] + }, + { + "name": "code_review_iteration", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/code_review_iteration", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "code_review_iteration" + ] + } + }, + "response": [] + }, + { + "name": "contribution_acceptance", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/contribution_acceptance", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "contribution_acceptance" + ] + } + }, + "response": [] + }, + { + "name": "contributing_github_organizations", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/contributing_github_organizations", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "contributing_github_organizations" + ] + } + }, + "response": [] + }, + { + "name": "issues/response_time", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/issues/response_time", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "issues", + "response_time" + ] + } + }, + "response": [] + }, + { + "name": "forks", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/forks", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "forks" + ] + } + }, + "response": [] + }, + { + "name": "pulls/maintainer_response_time", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/pulls/maintainer_response_time", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "pulls", + "maintainer_response_time" + ] + } + }, + "response": [] + }, + { + "name": "new_contributing_github_organizations", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/new_contributing_github_organizations", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "new_contributing_github_organizations" + ] + } + }, + "response": [] + }, + { + "name": "issues", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/issues", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "issues" + ] + } + }, + "response": [] + }, + { + "name": "pulls/comments", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/pulls/comments", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "pulls", + "comments" + ] + } + }, + "response": [] + }, + { + "name": "pulls", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/pulls", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "pulls" + ] + } + }, + "response": [] + }, + { + "name": "issue_comments", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/issue_comments", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "issue_comments" + ] + } + }, + "response": [] + }, + { + "name": "pulls/made_closed", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/pulls/made_closed", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "pulls", + "made_closed" + ] + } + }, + "response": [] + }, + { + "name": "watchers", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/watchers", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "watchers" + ] + } + }, + "response": [] + }, + { + "name": "commits100", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/commits100", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "commits100" + ] + } + }, + "response": [] + }, + { + "name": "commits/comments", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/commits/comments", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "commits", + "comments" + ] + } + }, + "response": [] + }, + { + "name": "committer_locations", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/committer_locations", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "committer_locations" + ] + } + }, + "response": [] + }, + { + "name": "total_committers", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/total_committers", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "total_committers" + ] + } + }, + "response": [] + }, + { + "name": "issues/activity", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/issues/activity", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "issues", + "activity" + ] + } + }, + "response": [] + }, + { + "name": "community_age", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/community_engagement", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "community_engagement" + ] + } + }, + "response": [] + }, + { + "name": "community_engagement", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/community_engagement", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "community_engagement" + ] + } + }, + "response": [] + }, + { + "name": "contributors", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/contributors", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "contributors" + ] + } + }, + "response": [] + }, + { + "name": "contributions", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/contributions", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "contributions" + ] + } + }, + "response": [] + }, + { + "name": "project_age", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/project_age", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "project_age" + ] + } + }, + "response": [] + }, + { + "name": "fakes", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/fakes", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "fakes" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "8976e16c-77cc-4411-b1c1-fabf347058f0", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "83ad29a9-5409-455f-a669-6d327b29dba7", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "ghtorrentplus", + "item": [ + { + "name": "issues/time_to_close", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/issues/time_to_close", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "issues", + "time_to_close" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "5626f5d3-5538-456c-969d-97dee6349358", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "6d38b43a-4d2f-42d4-b2d2-20d29cf09767", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "facade", + "item": [ + { + "name": "downloaded_repos", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/git/repos", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "git", + "repos" + ] + } + }, + "response": [] + }, + { + "name": "lines_changed", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/git/changes_by_author", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "git", + "changes_by_author" + ] + } + }, + "response": [] + }, + { + "name": "lines_changed_minus_white_space", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/git/lines_changed", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "git", + "lines_changed" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "fc81708a-ca9f-40dc-9b00-3ca36b1fb66c", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "e2941d3b-33f6-4dea-811f-47e1534d4d78", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "githubapi", + "item": [ + { + "name": "lines_changed", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/lines_changed", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "lines_changed" + ] + } + }, + "response": [] + }, + { + "name": "tags", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/tags", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "tags" + ] + } + }, + "response": [] + }, + { + "name": "tags/major", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/tags/major", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "tags", + "major" + ] + } + }, + "response": [] + }, + { + "name": "bus_factor", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/bus_factor", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "bus_factor" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "04eb46cb-53f4-4450-90da-657fc8e87c40", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "ba64c471-d5e0-4963-b0a7-e50f3011b5c6", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "librariesio", + "item": [ + { + "name": "dependencies", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/dependencies", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "dependencies" + ] + } + }, + "response": [] + }, + { + "name": "dependency_stats", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/dependency_stats", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "dependency_stats" + ] + } + }, + "response": [] + }, + { + "name": "dependents", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/dependents", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "dependents" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "4e2f03ee-9040-47cf-b983-fb12dff72870", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "fec972a1-5955-413e-8e37-1a10a889ed8a", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "publicwww", + "item": [ + { + "name": "linking_websites", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/linking_websites", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "linking_websites" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "bce40f57-125b-4d9c-b66f-736e3e215b63", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "4c2489b5-4e5d-46e4-a75a-f31d0709155a", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "downloads", + "item": [ + { + "name": "downloads", + "request": { + "auth": { + "type": "noauth" + }, + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/{{repo}}/timeseries/downloads", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "{{repo}}", + "timeseries", + "downloads" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "4a6473a2-4606-41f0-b970-db4ef7fcbe9d", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "9d5c0c80-0211-4094-a953-ff37c17037bb", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + }, + { + "name": "batch", + "item": [ + { + "name": "batch", + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "[{\"method\": \"GET\", \"path\": \"/api/unstable/rails/rails/timeseries/commits\"}, {\"method\": \"GET\", \"path\": \"/api/unstable/rails/rails/timeseries/issues\"}, {\"method\": \"GET\", \"path\": \"/api/unstable/rails/rails/timeseries/issues/closed\"}]\n" + }, + "url": { + "raw": "{{server}}/{{api_version}}/batch", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "batch" + ] + } + }, + "response": [] + }, + { + "name": "batch w metadata", + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "[{\"method\": \"GET\", \"path\": \"/api/unstable/rails/rails/timeseries/commits\"}, {\"method\": \"GET\", \"path\": \"/api/unstable/rails/rails/timeseries/issues\"}]\n" + }, + "url": { + "raw": "{{server}}/{{api_version}}/batch/metadata", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "batch", + "metadata" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "c996d09b-20d8-4f96-bec8-71de0bb8ff11", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "0d42076e-056b-4e2e-b11d-13c4dbfc5439", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(207);", + "});" + ] + } + } + ] + }, + { + "name": "metrics status", + "item": [ + { + "name": "metrics/status", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/metrics/status", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "metrics", + "status" + ] + } + }, + "response": [] + }, + { + "name": "metrics/status/metadata", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/metrics/status/metadata", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "metrics", + "status", + "metadata" + ] + } + }, + "response": [] + }, + { + "name": "metrics/status/filter", + "request": { + "method": "GET", + "header": [], + "body": {}, + "url": { + "raw": "{{server}}/{{api_version}}/metrics/status/filter?source=ghtorrent", + "host": [ + "{{server}}" + ], + "path": [ + "{{api_version}}", + "metrics", + "status", + "filter" + ], + "query": [ + { + "key": "source", + "value": "ghtorrent" + } + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "b22f156c-866f-4be6-8acc-178388602ee4", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "3504c981-5c77-4d0c-a96d-1c20ec8608c2", + "type": "text/javascript", + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" + ] + } + } + ] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "22463b9e-8286-4bba-928c-6b26ed9e3bab", + "type": "text/javascript", + "exec": [ + "" + ] + } + }, + { + "listen": "test", + "script": { + "id": "b1db8fe4-1748-4bda-8632-5f7f2ddf0ec4", + "type": "text/javascript", + "exec": [ + "tests['JSON array is not empty'] = (JSON.parse(responseBody).length > 0);" + ] + } + } + ] +} \ No newline at end of file diff --git a/test/dev-unstable-rails.postman_environment.json b/test/dev-unstable-rails.postman_environment.json new file mode 100644 --- /dev/null +++ b/test/dev-unstable-rails.postman_environment.json @@ -0,0 +1,27 @@ +{ + "id": "392a724a-6e59-4dc9-b70f-50221adf4847", + "name": "dev - unstable - rails", + "values": [ + { + "key": "server", + "value": "dev.augurlabs.io", + "description": "", + "enabled": true + }, + { + "key": "api_version", + "value": "api/unstable", + "description": "", + "enabled": true + }, + { + "key": "repo", + "value": "rails/rails", + "description": "", + "enabled": true + } + ], + "_postman_variable_scope": "environment", + "_postman_exported_at": "2018-08-17T17:15:04.995Z", + "_postman_exported_using": "Postman/6.2.4" +} \ No newline at end of file diff --git a/test/test_api.py b/test/test_api.py new file mode 100644 --- /dev/null +++ b/test/test_api.py @@ -0,0 +1,7 @@ +import os +import augur + +augur_app = augur.Application(config_file="augur.config.json") +postman_api_key = augur_app.read_config("Postman", "apikey", "AUGUR_POSTMAN_API_KEY", "None") + +os.system("newman run https://api.getpostman.com/collections/4566755-ec950b0b-a5e9-4fe3-b1a5-ad4f49c209f9?apikey={} -e https://api.getpostman.com/environments/4566755-2eb8f02c-642f-4f12-892f-d75f4c5faa24?apikey={} --color off | tee test/api-test.log".format(postman_api_key, postman_api_key)) \ No newline at end of file diff --git a/test/test_ghtorrent.py b/test/test_ghtorrent.py --- a/test/test_ghtorrent.py +++ b/test/test_ghtorrent.py @@ -1,7 +1,7 @@ import os import pytest [email protected] [email protected](scope="module") def ghtorrent(): import augur augurApp = augur.Application() @@ -29,9 +29,6 @@ def test_userid(ghtorrent): def test_closed_issues(ghtorrent): assert ghtorrent.closed_issues('cashmusic', 'platform').isin(["2012-11-09T00:00:00.000Z"]).any -def test_closed_issue_resolution_duration(ghtorrent): - assert ghtorrent.closed_issue_resolution_duration('mopidy', 'mopidy').isin(["2012-11-10T09:51:19.000Z"]).any - def test_code_commits(ghtorrent): assert ghtorrent.code_commits('facebook', 'folly').isin(["2013-01-07"]).any @@ -115,10 +112,6 @@ def test_project_age(ghtorrent): def test_fakes(ghtorrent): assert ghtorrent.fakes('rails', 'rails').isin(["2008-09-24T00:00:00.000Z"]).any -def test_ghtorrent_range(ghtorrent): - assert ghtorrent.ghtorrent_range().isin(["0000-00-00"]).any - - diff --git a/test/test_ghtorrentplus.py b/test/test_ghtorrentplus.py --- a/test/test_ghtorrentplus.py +++ b/test/test_ghtorrentplus.py @@ -12,7 +12,7 @@ def ghtorrentplus(): # *** GROWTH, MATURITY, AND DECLINE *** # def test_closed_issue_resolution_duration(ghtorrentplus): - assert ghtorrentplus.closed_issue_resolution_duration('mopidy', 'mopidy').isin(["2012-11-10T09:51:19.000Z"]).any + assert ghtorrentplus.closed_issue_resolution_duration('TEST', 'TEST').isin(["DATE"]).any # *** RISK *** # diff --git a/test/test_publicwww.py b/test/test_publicwww.py deleted file mode 100644 --- a/test/test_publicwww.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -import pytest -import pandas - [email protected] -def publicwww(): - import augur - augurApp = augur.Application() - return augurApp.publicwww() - -# *** DIVERSITY AND INCLUSION *** # - -# *** GROWTH, MATURITY, AND DECLINE *** # - -# *** RISK *** # - -# *** VALUE *** # - -# *** ACTIVITY *** # - -# *** EXPERIMENTAL *** # -def test_linking_websites(publicwww): - assert publicwww.linking_websites(owner='yihui', repo='knitr').isin(["sohu.com"]).any \ No newline at end of file
Batch API Logging update the development logger for the Batch API needs to output the underlying endpoints being called for debugging purposes.
2018-08-27T23:22:24Z
[]
[]
chaoss/augur
330
chaoss__augur-330
[ "327" ]
3d435a9a91086952b0704c021fb87a0d45bb4422
diff --git a/augur/datasources/augur_db/augur_db.py b/augur/datasources/augur_db/augur_db.py --- a/augur/datasources/augur_db/augur_db.py +++ b/augur/datasources/augur_db/augur_db.py @@ -1212,6 +1212,48 @@ def cii_best_practices_badge(self, repo_group_id, repo_id=None): results = pd.read_sql(cii_best_practices_badge_SQL, self.db, params={'repo_id': repo_id}) return results + @annotate(tag='average-issue-resolution-time') + def average_issue_resolution_time(self, repo_group_id, repo_id=None): + """ + Returns the average issue resolution time + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :return: Average issue resolution time + """ + if not repo_id: + avg_issue_resolution_SQL = s.sql.text(""" + SELECT + issues.repo_id, + repo.repo_name, + AVG(issues.closed_at - issues.created_at)::text AS avg_issue_resolution_time + FROM issues JOIN repo ON issues.repo_id = repo.repo_id + WHERE issues.repo_id IN + (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + AND closed_at IS NOT NULL + GROUP BY issues.repo_id, repo.repo_name + ORDER BY issues.repo_id + """) + + results = pd.read_sql(avg_issue_resolution_SQL, self.db, + params={'repo_group_id': repo_group_id}) + return results + + else: + avg_issue_resolution_SQL = s.sql.text(""" + SELECT + repo.repo_name, + AVG(issues.closed_at - issues.created_at)::text AS avg_issue_resolution_time + FROM issues JOIN repo ON issues.repo_id = repo.repo_id + WHERE issues.repo_id = :repo_id + AND closed_at IS NOT NULL + GROUP BY repo.repo_name + """) + + results = pd.read_sql(avg_issue_resolution_SQL, self.db, + params={'repo_id': repo_id}) + return results + @annotate(tag='forks') def forks(self, repo_group_id, repo_id=None): """ @@ -1938,7 +1980,7 @@ def annual_commit_count_ranked_by_new_repo_in_repo_group(self, repo_group_id, re """ if calendar_year == None: calendar_year = 2019 - + cdRgNewrepRankedCommitsSQL = None if not repo_id: @@ -1953,7 +1995,7 @@ def annual_commit_count_ranked_by_new_repo_in_repo_group(self, repo_group_id, re ORDER BY net desc LIMIT 10 """) - else: + else: cdRgNewrepRankedCommitsSQL = s.sql.text(""" SELECT repo.repo_id, sum(cast(added as INTEGER) - cast(removed as INTEGER) - cast(whitespace as INTEGER)) as net, patches, repo_name FROM dm_repo_annual, repo, repo_groups @@ -1972,15 +2014,15 @@ def annual_commit_count_ranked_by_new_repo_in_repo_group(self, repo_group_id, re @annotate(tag='annual-commit-count-ranked-by-repo-in-repo-group') def annual_commit_count_ranked_by_repo_in_repo_group(self, repo_group_id, repo_id=None, timeframe=None): """ - For each repository in a collection of repositories being managed, each REPO's total commits during the current Month, - Year or Week. Result ranked from highest number of commits to lowest by default. + For each repository in a collection of repositories being managed, each REPO's total commits during the current Month, + Year or Week. Result ranked from highest number of commits to lowest by default. :param repo_group_id: The repository's repo_group_id :param repo_id: The repository's repo_id, defaults to None :param calendar_year: the calendar year a repo is created in to be considered "new" - """ + """ if timeframe == None: timeframe = 'all' - + cdRgTpRankedCommitsSQL = None if repo_id: @@ -2020,7 +2062,7 @@ def annual_commit_count_ranked_by_repo_in_repo_group(self, repo_group_id, repo_i order by net desc LIMIT 10 """) - else: + else: if timeframe == 'all': cdRgTpRankedCommitsSQL = s.sql.text(""" SELECT repo.repo_id, repo_name as name, SUM(added - removed - whitespace) as net, patches @@ -2078,7 +2120,7 @@ def annual_lines_of_code_count_ranked_by_new_repo_in_repo_group(self, repo_group """ if calendar_year == None: calendar_year = 2019 - + cdRgNewrepRankedCommitsSQL = None if not repo_id: @@ -2093,7 +2135,7 @@ def annual_lines_of_code_count_ranked_by_new_repo_in_repo_group(self, repo_group ORDER BY net desc LIMIT 10 """) - else: + else: cdRgNewrepRankedCommitsSQL = s.sql.text(""" SELECT repo.repo_id, sum(cast(added as INTEGER) - cast(removed as INTEGER) - cast(whitespace as INTEGER)) as net, patches, repo_name FROM dm_repo_annual, repo, repo_groups @@ -2112,15 +2154,15 @@ def annual_lines_of_code_count_ranked_by_new_repo_in_repo_group(self, repo_group @annotate(tag='annual-lines-of-code-count-ranked-by-repo-in-repo-group') def annual_lines_of_code_count_ranked_by_repo_in_repo_group(self, repo_group_id, repo_id=None, timeframe=None): """ - For each repository in a collection of repositories being managed, each REPO's total commits during the current Month, - Year or Week. Result ranked from highest number of commits to lowest by default. + For each repository in a collection of repositories being managed, each REPO's total commits during the current Month, + Year or Week. Result ranked from highest number of commits to lowest by default. :param repo_group_id: The repository's repo_group_id :param repo_id: The repository's repo_id, defaults to None :param calendar_year: the calendar year a repo is created in to be considered "new" - """ + """ if timeframe == None: timeframe = 'all' - + cdRgTpRankedCommitsSQL = None if repo_id: @@ -2160,7 +2202,7 @@ def annual_lines_of_code_count_ranked_by_repo_in_repo_group(self, repo_group_id, order by net desc LIMIT 10 """) - else: + else: if timeframe == 'all': cdRgTpRankedCommitsSQL = s.sql.text(""" SELECT repo.repo_id, repo_name as name, SUM(added - removed - whitespace) as net, patches @@ -2205,6 +2247,107 @@ def annual_lines_of_code_count_ranked_by_repo_in_repo_group(self, repo_group_id, "repo_id": repo_id}) return results + @annotate(tag='top-committers') + def top_committers(self, repo_group_id, repo_id=None, year=None, threshold=0.5): + """ + Returns a list of contributors contributing N% of all commits. + + :param repo_group_id: Repo group ID + :param repo_id: Repo ID. + :param year: Year. eg: 2018, 2107. Defaults to current year. + :param threshold: The threshold to specify N%. Defaults to 0.5 + """ + threshold = float(threshold) + if threshold < 0 or threshold > 1: + raise ValueError('threshold should be between 0 and 1') + + if year is None: + year = datetime.datetime.now().year + + if not repo_id: + total_commits_SQL = s.sql.text(""" + SELECT SUM(patches)::int + FROM + (SELECT repo_group_id, email, year, patches + FROM dm_repo_group_annual + WHERE year = :year AND repo_group_id = :repo_group_id + ORDER BY patches DESC) a + """) + + results = pd.read_sql(total_commits_SQL, self.db, + params={'year': year, 'repo_group_id': repo_group_id}) + else: + total_commits_SQL = s.sql.text(""" + SELECT SUM(patches)::int + FROM + (SELECT repo_id, email, year, patches + FROM dm_repo_annual + WHERE year = :year AND repo_id = :repo_id + ORDER BY patches DESC) a + """) + + results = pd.read_sql(total_commits_SQL, self.db, + params={'year': year, 'repo_id': repo_id}) + + total_commits = int(results.iloc[0]['sum']) + threshold_commits = round(threshold * total_commits) + + if not repo_id: + committers_SQL = s.sql.text(""" + SELECT + a.repo_group_id, + rg_name AS repo_group_name, + a.email, + SUM(a.patches)::int AS commits + FROM + (SELECT repo_group_id, email, year, patches + FROM dm_repo_group_annual + WHERE year = :year AND repo_group_id = :repo_group_id + ORDER BY patches DESC) a, repo_groups + WHERE a.repo_group_id = repo_groups.repo_group_id + GROUP BY a.repo_group_id, repo_group_name, a.email + ORDER BY commits DESC + """) + + results = pd.read_sql(committers_SQL, self.db, + params={'year': year, 'repo_group_id': repo_group_id}) + else: + committers_SQL = s.sql.text(""" + SELECT + a.repo_id, + repo.repo_name, + a.email, + SUM(a.patches)::int AS commits + FROM + (SELECT repo_id, email, year, patches + FROM dm_repo_annual + WHERE year = :year AND repo_id = :repo_id + ORDER BY patches DESC) a, repo + WHERE a.repo_id = repo.repo_id + GROUP BY a.repo_id, repo.repo_name, a.email + ORDER BY commits DESC + """) + + results = pd.read_sql(committers_SQL, self.db, + params={'year': year, 'repo_id': repo_id}) + + cumsum = 0 + for i, row in results.iterrows(): + cumsum += row['commits'] + if cumsum >= threshold_commits: + results = results[:i + 1] + break + + if not repo_id: + rg_name = results.iloc[0]['repo_group_name'] + results.loc[i+1] = [repo_group_id, rg_name, 'other_contributors', + int(total_commits - cumsum)] + else: + repo_name = results.iloc[0]['repo_name'] + results.loc[i+1] = [repo_id, repo_name, 'other_contributors', + int(total_commits - cumsum)] + + return results ##################################### ### UTILITIES ### diff --git a/augur/datasources/augur_db/routes.py b/augur/datasources/augur_db/routes.py --- a/augur/datasources/augur_db/routes.py +++ b/augur/datasources/augur_db/routes.py @@ -1392,6 +1392,47 @@ def get_repos_for_dosocs(): """ server.addRepoMetric(augur_db.cii_best_practices_badge, 'cii-best-practices-badge') + """ + @api {get} /repo-groups/:repo_group_id/avgerage-issue-resolution-time Average Issue Resolution Time (Repo Group) + @apiName average-issue-resolution-time-repo-group + @apiGroup Risk + @apiDescription The average issue resolution time. + <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/business-risk.md">CHAOSS Metric Definition</a> + @apiParam {string} repo_group_id Repository Group ID + @apiSuccessExample {json} Success-Response: + [ + { + "repo_id": 21353, + "repo_name": "open_id_authentication", + "avg_issue_resolution_time": "1413 days 15:39:48" + }, + { + "repo_id": 21362, + "repo_name": "country_select", + "avg_issue_resolution_time": "140 days 09:37:58.2" + } + ] + """ + server.addRepoGroupMetric(augur_db.average_issue_resolution_time, 'average-issue-resolution-time') + + """ + @api {get} /repo-groups/:repo_group_id/repos/:repo_id/avgerage-issue-resolution-time Average Issue Resolution Time (Repo) + @apiName average-issue-resolution-time-repo + @apiGroup Risk + @apiDescription The average issue resolution time. + <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/business-risk.md">CHAOSS Metric Definition</a> + @apiParam {string} repo_group_id Repository Group ID. + @apiParam {string} repo_id Repository ID. + @apiSuccessExample {json} Success-Response: + [ + { + "repo_name": "maven-release", + "avg_issue_resolution_time": "276 days 13:54:13.2" + } + ] + """ + server.addRepoMetric(augur_db.average_issue_resolution_time, 'average-issue-resolution-time') + """ @api {get} /repo-groups/:repo_group_id/forks Forks (Repo Group) @apiName forks-repo-group @@ -2095,4 +2136,77 @@ def get_repos_for_dosocs(): } ] """ - server.addRepoMetric(augur_db.annual_lines_of_code_count_ranked_by_repo_in_repo_group,'annual-lines-of-code-count-ranked-by-repo-in-repo-group') \ No newline at end of file + server.addRepoMetric(augur_db.annual_lines_of_code_count_ranked_by_repo_in_repo_group,'annual-lines-of-code-count-ranked-by-repo-in-repo-group') + + """ + @api {get} /repo-groups/:repo_group_id/top-committers Top Committers (Repo Group) + @apiName top-committers-repo-group + @apiGroup Experimental + @apiDescription Returns a list of contributors contributing N% of all commits. + @apiParam {string} repo_group_id Repository Group ID + @apiParam {string} [year] Specify the year to return the results for. Default value: `current year` + @apiParam {string} [threshold=0.5] Specify N%. Accepts a value between `0` & `1` where `0` specifies + `0%` and `1` specifies `100%`. + @apiSuccessExample {json} Success-Response: + [ + { + "repo_group_id": 20, + "repo_group_name": "Rails", + "email": "[email protected]", + "commits": 502 + }, + { + "repo_group_id": 20, + "repo_group_name": "Rails", + "email": "[email protected]", + "commits": 246 + }, + { + "repo_group_id": 20, + "repo_group_name": "Rails", + "email": "[email protected]", + "commits": 119 + }, + { + "repo_group_id": "20", + "repo_group_name": "Rails", + "email": "other_contributors", + "commits": 1774 + } + ] + """ + server.addRepoGroupMetric(augur_db.top_committers, 'top-committers') + + """ + @api {get} /repo-groups/:repo_group_id/repos/:repo_id/top-committers Top Committers (Repo) + @apiName top-committers-repo + @apiGroup Experimental + @apiDescription Returns a list of contributors contributing N% of all commits. + @apiParam {string} repo_group_id Repository Group ID. + @apiParam {string} repo_id Repository ID. + @apiParam {string} [year] Specify the year to return the results for. Default value: `current year` + @apiParam {string} [threshold=0.5] Specify N%. Accepts a value between `0` & `1` where `0` specifies + `0%` and `1` specifies `100%`. + @apiSuccessExample {json} Success-Response: + [ + { + "repo_id": 21334, + "repo_name": "graphql", + "email": "[email protected]", + "commits": 4 + }, + { + "repo_id": 21334, + "repo_name": "graphql", + "email": "[email protected]", + "commits": 3 + }, + { + "repo_id": "21334", + "repo_name": "graphql", + "email": "other_contributors", + "commits": 5 + } + ] + """ + server.addRepoMetric(augur_db.top_committers, 'top-committers')
diff --git a/augur/datasources/augur_db/test_augur_db.py b/augur/datasources/augur_db/test_augur_db.py --- a/augur/datasources/augur_db/test_augur_db.py +++ b/augur/datasources/augur_db/test_augur_db.py @@ -231,6 +231,15 @@ def test_cii_best_practices_badge(augur_db): # repo_group assert augur_db.cii_best_practices_badge(21).iloc[0]['tiered_percentage'] > 80 +def test_average_issue_resolution_time(augur_db): + #repo + assert augur_db.average_issue_resolution_time(24, 21464).isin( + ['maven-release', '276 days 13:54:13.2']).any().any() + + # repo_group + assert augur_db.average_issue_resolution_time(24).isin( + ['maven-release', '276 days 13:54:13.2']).any().any() + def test_languages(augur_db): # TODO pass @@ -253,8 +262,8 @@ def test_annual_lines_of_code_count_ranked_by_repo_in_repo_group(augur_db): assert augur_db.annual_lines_of_code_count_ranked_by_repo_in_repo_group(20, 21000,timeframe = 'year').iloc[0].net > 0 def test_annual_lines_of_code_count_ranked_by_new_repo_in_repo_group(augur_db): - assert augur_db.annual_lines_of_code_count_ranked_by_new_repo_in_repo_group(20).iloc[0].net > 0 - assert augur_db.annual_lines_of_code_count_ranked_by_new_repo_in_repo_group(20, 21000).iloc[0].net > 0 + assert augur_db.annual_lines_of_code_count_ranked_by_new_repo_in_repo_group(20).iloc[0].net > 0 + assert augur_db.annual_lines_of_code_count_ranked_by_new_repo_in_repo_group(20, 21000).iloc[0].net > 0 def test_annual_commit_count_ranked_by_repo_in_repo_group(augur_db): assert augur_db.annual_commit_count_ranked_by_repo_in_repo_group(20).iloc[0].net > 0 @@ -263,8 +272,16 @@ def test_annual_commit_count_ranked_by_repo_in_repo_group(augur_db): assert augur_db.annual_commit_count_ranked_by_repo_in_repo_group(20, 21000,timeframe = 'year').iloc[0].net > 0 def test_annual_commit_count_ranked_by_new_repo_in_repo_group(augur_db): - assert augur_db.annual_commit_count_ranked_by_new_repo_in_repo_group(20).iloc[0].net > 0 - assert augur_db.annual_commit_count_ranked_by_new_repo_in_repo_group(20, 21000).iloc[0].net > 0 + assert augur_db.annual_commit_count_ranked_by_new_repo_in_repo_group(20).iloc[0].net > 0 + assert augur_db.annual_commit_count_ranked_by_new_repo_in_repo_group(20, 21000).iloc[0].net > 0 + +def test_top_committers(augur_db): + assert augur_db.top_committers(20).iloc[0]['commits'] > 0 + assert augur_db.top_committers(20, year=2017).iloc[0]['commits'] > 0 + assert augur_db.top_committers(20, year=2017, threshold=0.7).iloc[0]['commits'] > 0 + assert augur_db.top_committers(20, 21000).iloc[0]['commits'] > 0 + assert augur_db.top_committers(20, 21000, year=2017).iloc[0]['commits'] > 0 + assert augur_db.top_committers(20, 21000, year=2017, threshold=0.7).iloc[0]['commits'] > 0 def test_get_repos_for_dosocs(augur_db): assert augur_db.get_repos_for_dosocs().isin( diff --git a/augur/datasources/augur_db/test_augur_db_routes.py b/augur/datasources/augur_db/test_augur_db_routes.py --- a/augur/datasources/augur_db/test_augur_db_routes.py +++ b/augur/datasources/augur_db/test_augur_db_routes.py @@ -344,6 +344,18 @@ def test_cii_best_practices_badge_by_repo(augur_db_routes): assert response.status_code == 200 assert len(data) >= 1 +def test_average_issue_resolution_time_by_group(augur_db_routes): + response = requests.get('http://localhost:5000/api/unstable/repo-groups/24/average-issue-resolution-time') + data = response.json() + assert response.status_code == 200 + assert len(data) > 0 + +def test_average_issue_resolution_time_by_repo(augur_db_routes): + response = requests.get('http://localhost:5000/api/unstable/repo-groups/24/repos/21464/average-issue-resolution-time') + data = response.json() + assert response.status_code == 200 + assert len(data) > 0 + def test_languages_by_group(augur_db_routes): # TODO need data pass @@ -428,6 +440,20 @@ def test_annual_lines_of_code_count_ranked_by_repo_in_repo_group_by_group(augur_ assert len(data) >= 1 assert data[0]["net"] > 0 +def test_top_committers_by_repo(augur_db_routes): + response = requests.get('http://0.0.0.0:5000/api/unstable/repo-groups/22/repos/21334/top-committers') + data = response.json() + assert response.status_code == 200 + assert len(data) >= 1 + assert data[0]['commits'] > 0 + +def test_top_committers_by_group(augur_db_routes): + response = requests.get('http://0.0.0.0:5000/api/unstable/repo-groups/22/top-committers') + data = response.json() + assert response.status_code == 200 + assert len(data) >= 1 + assert data[0]['commits'] > 0 + def test_committer_by_repo(augur_db_routes): response = requests.get('http://localhost:5000/api/unstable/repo-groups/21/repos/21222/committers') data = response.json()
API Endpoint Request #### Calculate Contributions and Display Developers Contributing n % of all Commits (Repo and Repo Group) We want to build a pie chart that shows the commit share of the developers "inside the bus factor", and then provides a slice, as well, for "all other committers". 1. To orient ourselves, first, how many total commits are there? ```sql select sum(patches) from (select repo_group_id, email, year, patches from dm_repo_group_annual where year = 2017 order by patches desc)a; ``` For our example, there are 9,671 patches in a repository group of 112 repositories. 2. What is the distribution of committers and total commits? ```sql SELECT repo_group_id, email, SUM ( patches ) as commitsyear FROM ( SELECT repo_group_id, email, YEAR, patches FROM dm_repo_group_annual WHERE YEAR = 2017 ORDER BY patches DESC ) a GROUP BY repo_group_id, email order by commitsyear desc; ``` There are 783 committers in our sample. The top committer has 650 commits, the bottom committer has 1 commit, and there are 411 such individuals. 3. How many committers are in the set that contributes 50% of the 9,671 commits. - We know the total commits are 9,671, so we need to loop through committers in an ordered list, from the top down, until we arrive at the list of top contributors who contribute 50% of the total commits. In this case, `1/2*9,671 = 4,835.5`, and we round up to 4,836. - When we loop through top committers, keeping a cumulative total, we add each developer to the list until we exceed 4,836, at which point we have the list. There are 30 committers in our list (plus one row for everyone not in the "developers with the most contributions whose total commits add up to 50% (or other percent) of the total): ``` Developer Commits in a Year ``` 4. Our pie chart would then have 30 developers, and a 31st slice for "all other contributors". This is a large case, obviously. Perhaps a percentile that is lower would be appropriate. This is parameterizable. **Pseudo Code: ```python commits =1; for i in all_committers: commits = commits+developerCommits developerlist = add developer to list; if commits > totalcommits/2 break; ``` The end result we want is a list with the commits per developer until we reach 1/2 or some other share of the total commits PLUS, one slice for ALL the other commits made by people outside the group whose commit total, counted from the most commits down, adds up to that percentage (50% or otherwise). 5. The JSON we would want to return probably looks like: ```json [ { "user_id": 1, "email": "[email protected]", "commits": 333, "repo_group_name": "rails", "repo_group_id": 21000 }, { "user_id": 2, "email": "[email protected]", "commits": 331, "repo_group_name": "rails", "repo_group_id": 21000 }, { "user_id": 1, "email": "other_contributors", "commits": 133, "repo_group_name": "rails", "repo_group_id": 21000 } ] ```
@sgoggins what do we name this?
2019-07-21T10:55:25Z
[]
[]
chaoss/augur
573
chaoss__augur-573
[ "572" ]
ab039945d047e29ea9f096f283508186d83a56b9
diff --git a/augur/metrics/pull_request/pull_request.py b/augur/metrics/pull_request/pull_request.py --- a/augur/metrics/pull_request/pull_request.py +++ b/augur/metrics/pull_request/pull_request.py @@ -78,9 +78,13 @@ def pull_requests_closed_no_merge(self, repo_group_id, repo_id=None, period='day if repo_id: closedNoMerge = s.sql.text(""" SELECT DATE_TRUNC(:period, pull_requests.pr_closed_at) AS closed_date, - COUNT(pull_request_id) - FROM pull_requests WHERE repo_id = :repo_id and pull_requests.pr_closed_at is NOT NULL and + COUNT(pull_request_id) as pr_count + FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id = :repo_id + AND pull_requests.pr_closed_at is NOT NULL AND pull_requests.pr_merged_at is NULL + GROUP BY closed_date, pull_request_id + ORDER BY closed_date """) results = pd.read_sql(closedNoMerge, self.database, params={'repo_id': repo_id, 'period': period, 'begin_date': begin_date, @@ -89,9 +93,11 @@ def pull_requests_closed_no_merge(self, repo_group_id, repo_id=None, period='day else: closedNoMerge = s.sql.text(""" SELECT DATE_TRUNC(:period, pull_requests.pr_closed_at) AS closed_date, - COUNT(pull_request_id) - FROM pull_requests WHERE repo_id in (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + COUNT(pull_request_id) as pr_count + FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id WHERE pull_requests.repo_id in (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) and pull_requests.pr_closed_at is NOT NULL and pull_requests.pr_merged_at is NULL + GROUP BY closed_date, pull_request_id + ORDER BY closed_date """) results = pd.read_sql(closedNoMerge, self.database, diff --git a/augur/metrics/pull_request/routes.py b/augur/metrics/pull_request/routes.py --- a/augur/metrics/pull_request/routes.py +++ b/augur/metrics/pull_request/routes.py @@ -384,7 +384,6 @@ def create_pull_request_routes(server): @apiName pull-request-closed-no-merge @apiGroup Experimental @apiDescription Timeseries of pull request which were closed but not merged - @apiParam {string} repo_group_id Repository Group ID. @apiParam {string} repo_id Repository ID. @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` @@ -392,8 +391,26 @@ def create_pull_request_routes(server): [ { "date": "2019-01-01T00:00:00.000Z", - "rate": 5.3333333333 + "pr_count": 3 } ] """ server.addRepoMetric(metrics.pull_requests_closed_no_merge, 'pull-requests-closed-no-merge') + + """ + @api {get} /repo-groups/:repo_group_id/pull-request-closed-no-merge Pull Request Closed but not merged(Repo) + @apiName pull-request-closed-no-merge + @apiGroup Experimental + @apiDescription Timeseries of pull request which were closed but not merged + @apiParam {string} repo_group_id Repository Group ID. + @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` + @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` + @apiSuccessExample {json} Success-Response: + [ + { + "date": "2019-01-01T00:00:00.000Z", + "pr_count": 3 + } + ] + """ + server.addRepoGroupMetric(metrics.pull_requests_closed_no_merge, 'pull-requests-closed-no-merge')
diff --git a/test/api/test_pull_request_routes.py b/test/api/test_pull_request_routes.py --- a/test/api/test_pull_request_routes.py +++ b/test/api/test_pull_request_routes.py @@ -20,8 +20,9 @@ def test_pull_requests_merge_contributor_new_by_repo(metrics): assert data[0]["count"] > 0 def test_pull_requests_closed_no_merge(metrics): - response = requests.get('http://localhost:5000/api/unstable/repo/20/pull-request-closed-no-merge') + response = requests.get('http://localhost:5000/api/unstable/repos/21000/pull-requests-closed-no-merge') data = response.json() assert response.status_code == 200 assert len(data) >= 1 - assert data[0]["count"] > 0 + assert data[0]["pr_count"] > 0 + diff --git a/test/metrics/test_pull_request_metrics.py b/test/metrics/test_pull_request_metrics.py --- a/test/metrics/test_pull_request_metrics.py +++ b/test/metrics/test_pull_request_metrics.py @@ -28,7 +28,7 @@ def test_pull_request_acceptance_rate(metrics): end_date='2019-12-31 23:59:59',group_by='year').iloc[0]['rate'] > 0 def test_pull_request_closed_no_merge(metrics): - assert metrics.pull_requests_closed_no_merge(24).iloc[0]['count'] > 0 + assert metrics.pull_requests_closed_no_merge(24).iloc[0]['pr_count'] > 0 assert metrics.pull_requests_closed_no_merge(24, 21000, begin_date='2018-1-1 00:00:00', - end_date='2019-12-31 23:59:59', group_by='year').iloc[0]['count'] > 0 + end_date='2019-12-31 23:59:59').iloc[0]['pr_count'] > 0
`pull_request_closed_no_merge` metric is broken **Describe the bug:** The `pull_request_closed_no_merge` metric returns a 500 internal server error. **To Reproduce:** Steps to reproduce the behavior: 1. Checkout `dev` 2. Run `augur run --disable-housekeeper` 3. Hit `http://localhost:5000/api/unstable/repo/21000/pull-request-closed-no-merge` 4. Observe error **Expected behavior:** I expected the metric to return a valid response object, even if it was empty. **Screenshots** If applicable, add screenshots to help explain your problem. **Desktop (please complete the following information):** - OS: macOS v 10.14.6 **Additional context** I noticed that there is a missing `GROUP_BY` clause in the SQL query, based on the usage of `COUNT`. This appears to be the main culprit of the issue. Broken Travis build: https://travis-ci.org/chaoss/augur/builds/657396384
@ccarterlandis Has this been occurring only for the empty objects? I think It's a 404 error :sweat_smile: ``` > assert response.status_code == 200 E assert 404 == 200 E + where 404 = <Response [404]>.status_code ``` Check out this line of the metrics test: https://travis-ci.org/chaoss/augur/jobs/657396385#L581. You'll see the SQL error I'm referring to I didn't even notice this earlier, but the metrics API test is also failing because the route is incorrect. It should be `...unstable/repos/...` instead of `.../unstable/repo/` - this is why it's returning a 404. I'd also suggest using the same repo ID in both your metric and API test. > Check out this line of the metrics test: https://travis-ci.org/chaoss/augur/jobs/657396385#L581. You'll see the SQL error I'm referring to Gotcha, I see where the error is now. I'll send in a fix ASAP I'll fix both of them in a PR.
2020-03-02T20:44:54Z
[]
[]
chaoss/augur
776
chaoss__augur-776
[ "737" ]
1c299b232a46ce1d84090102fcefe4bc97c23c56
diff --git a/augur/__init__.py b/augur/__init__.py --- a/augur/__init__.py +++ b/augur/__init__.py @@ -1,10 +1 @@ #SPDX-License-Identifier: MIT - -import logging -import coloredlogs - -coloredlogs.install() -logger = logging.getLogger('augur') - -# Classes -from .application import Application, logger diff --git a/augur/application.py b/augur/application.py --- a/augur/application.py +++ b/augur/application.py @@ -4,72 +4,49 @@ """ import os -import time -import multiprocessing as mp +from pathlib import Path import logging +from logging import FileHandler, Formatter import coloredlogs import json -import pkgutil from beaker.cache import CacheManager from beaker.util import parse_cache_config_options import sqlalchemy as s import psycopg2 -from augur import logger from augur.metrics import Metrics -from augur.cli.configure import default_config +from augur.config import AugurConfig +from augur.logging import ROOT_AUGUR_DIRECTORY, initialize_logging, set_gunicorn_log_options -class Application(object): +logger = logging.getLogger(__name__) + +class Application(): """Initalizes all classes from Augur using a config file or environment variables""" - def __init__(self): + def __init__(self, offline_mode=False): """ Reads config, creates DB session, and initializes cache """ - self.config_file_name = 'augur.config.json' - self.__shell_config = None - self.__export_file = None - self.__env_file = None - self.config = default_config - self.env_config = {} - self.root_augur_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - default_config_path = self.root_augur_dir + '/' + self.config_file_name - using_config_file = False - - - config_locations = [self.config_file_name, default_config_path, f"/opt/augur/{self.config_file_name}"] - if os.getenv('AUGUR_CONFIG_FILE') is not None: - config_file_path = os.getenv('AUGUR_CONFIG_FILE') - using_config_file = True - else: - for index, location in enumerate(config_locations): - try: - f = open(location, "r+") - config_file_path = os.path.abspath(location) - using_config_file = True - f.close() - break - except FileNotFoundError: - pass - - if using_config_file: - try: - with open(config_file_path, 'r+') as config_file_handle: - self.config = json.loads(config_file_handle.read()) - except json.decoder.JSONDecodeError as e: - logger.warning('%s could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: %s', config_file_path, str(e)) - else: - logger.warning('%s could not be parsed, using defaults.') - - self.load_env_configuration() - - logger.setLevel(self.read_config("Development", "log_level")) + self.root_augur_dir = ROOT_AUGUR_DIRECTORY + self.config = AugurConfig(self.root_augur_dir) + + self.gunicorn_options = { + 'bind': '%s:%s' % (self.config.get_value("Server", "host"), self.config.get_value("Server", "port")), + 'workers': int(self.config.get_value('Server', 'workers')), + 'timeout': int(self.config.get_value('Server', 'timeout')) + } + + initialize_logging(self.config) + self.gunicorn_options.update(set_gunicorn_log_options()) + + self.logger = logger self.cache_config = { 'cache.type': 'file', 'cache.data_dir': 'runtime/cache/', 'cache.lock_dir': 'runtime/cache/' } + if not os.path.exists(self.cache_config['cache.data_dir']): os.makedirs(self.cache_config['cache.data_dir']) if not os.path.exists(self.cache_config['cache.lock_dir']): @@ -77,19 +54,19 @@ def __init__(self): cache_parsed = parse_cache_config_options(self.cache_config) self.cache = CacheManager(**cache_parsed) - self.database = self.__connect_to_database() - self.spdx_db = self.__connect_to_database(include_spdx=True) - - self.metrics = Metrics(self) + if offline_mode is False: + self.database = self._connect_to_database() + self.spdx_db = self._connect_to_database(include_spdx=True) + self.metrics = Metrics(self) - def __connect_to_database(self, include_spdx=False): - user = self.read_config('Database', 'user') - host = self.read_config('Database', 'host') - port = self.read_config('Database', 'port') - dbname = self.read_config('Database', 'name') + def _connect_to_database(self, include_spdx=False): + user = self.config.get_value('Database', 'user') + host = self.config.get_value('Database', 'host') + port = self.config.get_value('Database', 'port') + dbname = self.config.get_value('Database', 'name') database_connection_string = 'postgresql://{}:{}@{}:{}/{}'.format( - user, self.read_config('Database', 'password'), host, port, dbname + user, self.config.get_value('Database', 'password'), host, port, dbname ) csearch_path_options = 'augur_data' @@ -105,47 +82,5 @@ def __connect_to_database(self, include_spdx=False): return engine except s.exc.OperationalError as e: logger.fatal(f"Unable to connect to the database. Terminating...") - exit() - - def read_config(self, section, name=None): - """ - Read a variable in specified section of the config file, unless provided an environment variable - - :param section: location of given variable - :param name: name of variable - """ - if name is not None: - try: - value = self.config[section][name] - except KeyError as e: - value = default_config[section][name] - else: - try: - value = self.config[section] - except KeyError as e: - value = default_config[section] - - return value - - def load_env_configuration(self): - self.set_env_value(section='Database', name='key', environment_variable='AUGUR_GITHUB_API_KEY') - self.set_env_value(section='Database', name='host', environment_variable='AUGUR_DB_HOST') - self.set_env_value(section='Database', name='name', environment_variable='AUGUR_DB_NAME') - self.set_env_value(section='Database', name='port', environment_variable='AUGUR_DB_PORT') - self.set_env_value(section='Database', name='user', environment_variable='AUGUR_DB_USER') - self.set_env_value(section='Database', name='password', environment_variable='AUGUR_DB_PASSWORD') - self.set_env_value(section='Development', name='log_level', environment_variable='AUGUR_LOG_LEVEL') - - def set_env_value(self, section, name, environment_variable, sub_config=None): - """ - Sets names and values of specified config section according to their environment variables. - """ - # using sub_config lets us grab values from nested config blocks - if sub_config is None: - sub_config = self.config - - env_value = os.getenv(environment_variable) + raise(e) - if env_value is not None: - self.env_config[environment_variable] = env_value - sub_config[section][name] = env_value diff --git a/augur/cli/__init__.py b/augur/cli/__init__.py --- a/augur/cli/__init__.py +++ b/augur/cli/__init__.py @@ -0,0 +1,18 @@ +from functools import update_wrapper + +import click +from augur.application import Application + +def pass_application(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + ctx.obj = Application() + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def pass_config(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + ctx.obj = Application(offline_mode=True).config + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) \ No newline at end of file diff --git a/augur/runtime.py b/augur/cli/_multicommand.py similarity index 63% rename from augur/runtime.py rename to augur/cli/_multicommand.py --- a/augur/runtime.py +++ b/augur/cli/_multicommand.py @@ -6,16 +6,14 @@ import os import sys import click +import importlib import augur.application CONTEXT_SETTINGS = dict(auto_envvar_prefix='AUGUR') class AugurMultiCommand(click.MultiCommand): - def __commands_folder(self): - return os.path.abspath( - os.path.join(os.path.dirname(__file__), 'cli') - ) + return os.path.abspath(os.path.dirname(__file__)) def list_commands(self, ctx): rv = [] @@ -26,13 +24,8 @@ def list_commands(self, ctx): return rv def get_command(self, ctx, name): - # try: - if sys.version_info[0] == 2: - name = name.encode('ascii', 'replace') - mod = __import__('augur.cli.' + name, - None, None, ['cli']) - - return mod.cli + module = importlib.import_module('.' + name, 'augur.cli') + return module.cli @click.command(cls=AugurMultiCommand, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -40,11 +33,4 @@ def run(ctx): """ Augur is an application for open source community health analytics """ - - app = augur.application.Application() - ctx.obj = app - return ctx.obj - - -if __name__ == '__main__': - run() + return ctx diff --git a/augur/cli/configure.py b/augur/cli/configure.py --- a/augur/cli/configure.py +++ b/augur/cli/configure.py @@ -6,190 +6,13 @@ import os import click import json +import logging -from augur import logger +from augur.config import default_config, ENVVAR_PREFIX +from augur.cli import pass_config +from augur.logging import ROOT_AUGUR_DIRECTORY -ENVVAR_PREFIX = "AUGUR_" - -default_config = { - "Database": { - "name": "augur", - "host": "localhost", - "key": "key", - "password": "augur", - "port": 5432, - "user": "augur" - }, - "Housekeeper": { - "jobs": [ - { - "all_focused": 1, - "delay": 150000, - "given": [ - "github_url" - ], - "model": "issues", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "github_url" - ], - "model": "pull_request_commits", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "github_url" - ], - "model": "repo_info", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "repo_group" - ], - "model": "commits", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "github_url" - ], - "model": "pull_requests", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "contributors", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "insights", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "badges", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "value", - "repo_group_id": 0 - }, - { - "delay": 100000, - "given": [ - "github_url" - ], - "model": "pull_request_files", - "repo_group_id": 0 - } - ] - }, - "Workers": { - "facade_worker": { - "port": 50100, - "repo_directory": "repos/", - "switch": 1, - "workers": 1 - }, - "github_worker": { - "port": 50200, - "switch": 1, - "workers": 1 - }, - "insight_worker": { - "port": 50300, - "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"}, - "contamination": 0.041, - "switch": 0, - "workers": 1, - "training_days": 365, - "anomaly_days": 2 - }, - "linux_badge_worker": { - "port": 50400, - "switch": 1, - "workers": 1 - }, - "metric_status_worker": { - "port": 50500, - "switch": 0, - "workers": 1 - }, - "pull_request_worker": { - "port": 50600, - "switch": 1, - "workers": 1 - }, - "repo_info_worker": { - "port": 50700, - "switch": 1, - "workers": 1 - }, - "value_worker": { - "port": 50800, - "scc_bin": "scc", - "switch": 0, - "workers": 1 - }, - "contributor_worker": { - "port": 50900, - "switch": 1, - "workers": 1 - } - }, - "Facade": { - "check_updates": 1, - "clone_repos": 1, - "create_xlsx_summary_files": 1, - "delete_marked_repos": 0, - "fix_affiliations": 1, - "force_analysis": 1, - "force_invalidate_caches": 1, - "force_updates": 1, - "limited_run": 0, - "multithreaded": 0, - "nuke_stored_affiliations": 0, - "pull_repos": 1, - "rebuild_caches": 1, - "run_analysis": 1 - }, - "Server": { - "cache_expire": "3600", - "host": "0.0.0.0", - "port": "5000", - "workers": 4, - "timeout": 60 - }, - "Frontend": { - "host": "0.0.0.0", - "port": "5000" - }, - "Development": { - "log_level": "INFO" - } - } +logger = logging.getLogger("augur.cli") @click.group('configure', short_help='Generate an augur.config.json') def cli(): @@ -204,7 +27,9 @@ def cli(): @click.option('--github_api_key', help="GitHub API key for data collection from the GitHub API", envvar=ENVVAR_PREFIX + 'GITHUB_API_KEY') @click.option('--facade_repo_directory', help="Directory on the database server where Facade should clone repos", envvar=ENVVAR_PREFIX + 'FACADE_REPO_DIRECTORY') @click.option('--rc-config-file', help="File containing existing config whose values will be used as the defaults", type=click.Path(exists=True)) -def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file): [email protected]('--gitlab_api_key', help="GitLab API key for data collection from the GitLab API", envvar=ENVVAR_PREFIX + 'GITLAB_API_KEY') +@pass_config +def generate(augur_config, db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file, gitlab_api_key): """ Generate an augur.config.json """ @@ -250,11 +75,13 @@ def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, fa config['Database']['password'] = db_password if github_api_key is not None: config['Database']['key'] = github_api_key + if gitlab_api_key is not None: + config['Database']['gitlab_api_key'] = gitlab_api_key if facade_repo_directory is not None: config['Workers']['facade_worker']['repo_directory'] = facade_repo_directory try: - with open(os.path.abspath('augur.config.json'), 'w') as f: + with open(os.path.abspath(ROOT_AUGUR_DIRECTORY + '/augur.config.json'), 'w') as f: json.dump(config, f, indent=4) logger.info('augur.config.json successfully created') except Exception as e: diff --git a/augur/cli/db.py b/augur/cli/db.py --- a/augur/cli/db.py +++ b/augur/cli/db.py @@ -1,5 +1,6 @@ from os import walk, chdir, environ, chmod, path import os +import logging from sys import exit import stat from collections import OrderedDict @@ -12,7 +13,9 @@ import pandas as pd from sqlalchemy import exc -from augur import logger +from augur.cli import pass_config, pass_application + +logger = logging.getLogger("augur.cli") @click.group('db', short_help='Database utilities') def cli(): @@ -20,14 +23,12 @@ def cli(): @cli.command('add-repos') @click.argument('filename', type=click.Path(exists=True)) [email protected]_context -def add_repos(ctx, filename): +@pass_application +def add_repos(augur_app, filename): """ Add repositories to Augur's database """ - app = ctx.obj - - df = app.database.execute(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups")) + df = augur_app.database.execute(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups")) repo_group_IDs = [group[0] for group in df.fetchall()] insertSQL = s.sql.text(""" @@ -41,33 +42,29 @@ def add_repos(ctx, filename): for row in data: logger.info(f"Inserting repo with Git URL `{row[1]}` into repo group {row[0]}") if int(row[0]) in repo_group_IDs: - result = app.database.execute(insertSQL, repo_group_id=int(row[0]), repo_git=row[1]) + result = augur_app.database.execute(insertSQL, repo_group_id=int(row[0]), repo_git=row[1]) else: logger.warn(f"Invalid repo group id specified for {row[1]}, skipping.") @cli.command('get-repo-groups') [email protected]_context -def get_repo_groups(ctx): +@pass_application +def get_repo_groups(augur_app): """ List all repo groups and their associated IDs """ - app = ctx.obj - - df = pd.read_sql(s.sql.text("SELECT repo_group_id, rg_name, rg_description FROM augur_data.repo_groups"), app.database) + df = pd.read_sql(s.sql.text("SELECT repo_group_id, rg_name, rg_description FROM augur_data.repo_groups"), augur_app.database) print(df) return df @cli.command('add-repo-groups') @click.argument('filename', type=click.Path(exists=True)) [email protected]_context -def add_repo_groups(ctx, filename): +@pass_application +def add_repo_groups(augur_app, filename): """ Create new repo groups in Augur's database """ - app = ctx.obj - - df = pd.read_sql(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups"), app.database) + df = pd.read_sql(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups"), augur_app.database) repo_group_IDs = df['repo_group_id'].values.tolist() insert_repo_group_sql = s.sql.text(""" @@ -80,51 +77,48 @@ def add_repo_groups(ctx, filename): logger.info(f"Inserting repo group with name {row[1]} and ID {row[0]}...") if int(row[0]) not in repo_group_IDs: repo_group_IDs.append(int(row[0])) - app.database.execute(insert_repo_group_sql, repo_group_id=int(row[0]), repo_group_name=row[1]) + augur_app.database.execute(insert_repo_group_sql, repo_group_id=int(row[0]), repo_group_name=row[1]) else: logger.info(f"Repo group with ID {row[1]} for repo group {row[1]} already exists, skipping...") @cli.command('update-repo-directory') @click.argument('repo_directory') [email protected]_context -def update_repo_directory(ctx, repo_directory): +@pass_application +def update_repo_directory(augur_app, repo_directory): """ Update Facade worker repo cloning directory """ - app = ctx.obj - updateRepoDirectorySQL = s.sql.text(""" UPDATE augur_data.settings SET VALUE = :repo_directory WHERE setting='repo_directory'; """) - app.database.execute(updateRepoDirectorySQL, repo_directory=repo_directory) + augur_app.database.execute(updateRepoDirectorySQL, repo_directory=repo_directory) logger.info(f"Updated Facade repo directory to: {repo_directory}") # get_db_version is a helper function to print_db_version and upgrade_db_version -def get_db_version(app): +def get_db_version(augur_app): db_version_sql = s.sql.text(""" SELECT * FROM augur_operations.augur_settings WHERE setting = 'augur_data_version' """) - return int(app.database.execute(db_version_sql).fetchone()[2]) + return int(augur_app.database.execute(db_version_sql).fetchone()[2]) @cli.command('print-db-version') [email protected]_context -def print_db_version(ctx): +@pass_application +def print_db_version(augur_app): """ Get the version of the configured database """ - print(get_db_version(ctx.obj)) + print(get_db_version(augur_app)) @cli.command('upgrade-db-version') [email protected]_context -def upgrade_db_version(ctx): +@pass_application +def upgrade_db_version(augur_app): """ Upgrade the configured database to the latest version """ - app = ctx.obj - check_pgpass_credentials(app.config) - current_db_version = get_db_version(app) + check_pgpass_credentials(augur_app.config.get_raw_config()) + current_db_version = get_db_version(augur_app) update_scripts_filenames = [] for (_, _, filenames) in walk('schema/generate'): @@ -148,18 +142,17 @@ def upgrade_db_version(ctx): for target_version, script_location in target_version_script_map.items(): if target_version == current_db_version + 1: logger.info(f"Upgrading from {current_db_version} to {target_version}") - run_psql_command_in_database(app, '-f', f"schema/generate/{script_location}") + run_psql_command_in_database(augur_app, '-f', f"schema/generate/{script_location}") current_db_version += 1 @cli.command('check-for-upgrade') [email protected]_context -def check_for_upgrade(ctx): +@pass_application +def check_for_upgrade(augur_app): """ Upgrade the configured database to the latest version """ - app = ctx.obj - check_pgpass_credentials(app.config) - current_db_version = get_db_version(app) + check_pgpass_credentials(augur_app.config.get_raw_config()) + current_db_version = get_db_version(augur_app) update_scripts_filenames = [] for (_, _, filenames) in walk('schema/generate'): @@ -184,14 +177,13 @@ def check_for_upgrade(ctx): @cli.command('create-schema') [email protected]_context -def create_schema(ctx): +@pass_application +def create_schema(augur_app): """ Create schema in the configured database """ - app = ctx.obj - check_pgpass_credentials(app.config) - run_psql_command_in_database(app, '-f', 'schema/create_schema.sql') + check_pgpass_credentials(augur_app.config.get_raw_config()) + run_psql_command_in_database(augur_app, '-f', 'schema/create_schema.sql') def generate_key(length): return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length)) @@ -202,46 +194,40 @@ def generate_api_key(ctx): """ Generate and set a new Augur API key """ - app = ctx.obj key = generate_key(32) ctx.invoke(update_api_key, api_key=key) print(key) @cli.command('update-api-key') @click.argument("api_key") [email protected]_context -def update_api_key(ctx, api_key): +@pass_application +def update_api_key(augur_app, api_key): """ Update the API key in the database to the given key """ - app = ctx.obj - update_api_key_sql = s.sql.text(""" UPDATE augur_operations.augur_settings SET VALUE = :api_key WHERE setting='augur_api_key'; """) - app.database.execute(update_api_key_sql, api_key=api_key) + augur_app.database.execute(update_api_key_sql, api_key=api_key) logger.info(f"Update Augur API key to: {api_key}") @cli.command('get-api-key') [email protected]_context -def get_api_key(ctx): - app = ctx.obj - +@pass_application +def get_api_key(augur_app): get_api_key_sql = s.sql.text(""" SELECT value FROM augur_operations.augur_settings WHERE setting='augur_api_key'; """) try: - print(app.database.execute(get_api_key_sql).fetchone()[0]) + print(augur_app.database.execute(get_api_key_sql).fetchone()[0]) except TypeError: logger.warn("No Augur API key found.") @cli.command('check-pgpass', short_help="Check the ~/.pgpass file for Augur's database credentials") [email protected]_context -def check_pgpass(ctx): - app = ctx.obj - check_pgpass_credentials(app.config) +@pass_config +def check_pgpass(config): + check_pgpass_credentials(config.get_raw_config()) @cli.command('init-database') @click.option('--default-db-name', default='postgres') @@ -252,12 +238,10 @@ def check_pgpass(ctx): @click.option('--target-password', default='augur') @click.option('--host', default='localhost') @click.option('--port', default='5432') [email protected]_context -def init_database(ctx, default_db_name, default_user, default_password, target_db_name, target_user, target_password, host, port): +def init_database(default_db_name, default_user, default_password, target_db_name, target_user, target_password, host, port): """ Create database with the given credentials using the given maintenance database """ - app = ctx.obj config = { 'Database': { 'name': default_db_name, @@ -276,15 +260,15 @@ def init_database(ctx, default_db_name, default_user, default_password, target_d def run_db_creation_psql_command(host, port, user, name, command): call(['psql', '-h', host, '-p', port, '-U', user, '-d', name, '-a', '-w', '-c', command]) -def run_psql_command_in_database(app, target_type, target): +def run_psql_command_in_database(augur_app, target_type, target): if target_type not in ['-f', '-c']: logger.fatal("Invalid target type. Exiting...") exit(1) - call(['psql', '-h', app.read_config('Database', 'host'),\ - '-d', app.read_config('Database', 'name'),\ - '-U', app.read_config('Database', 'user'),\ - '-p', str(app.read_config('Database', 'port')),\ + call(['psql', '-h', augur_app.config.get_value('Database', 'host'),\ + '-d', augur_app.config.get_value('Database', 'name'),\ + '-U', augur_app.config.get_value('Database', 'user'),\ + '-p', str(augur_app.config.get_value('Database', 'port')),\ '-a', '-w', target_type, target ]) diff --git a/augur/cli/logging.py b/augur/cli/logging.py new file mode 100644 --- /dev/null +++ b/augur/cli/logging.py @@ -0,0 +1,84 @@ +import click +import os +from os import walk +from augur.logging import AUGUR_LOG_DIR, WORKER_LOG_DIR + [email protected]("logging", short_help="View Augur's log files") +def cli(): + pass + [email protected]("directory") +def directory(): + """ + Print the location of Augur's logs directory + """ + print(AUGUR_LOG_DIR) + [email protected]("tail") [email protected]("lines", default=20) +def tail(lines): + """ + Output the last n lines of the main Augur and worker logfiles + """ + if lines is None: + lines = 20 + + files = [] + directories = [] + for (_, _, filenames) in walk(AUGUR_LOG_DIR): + for file in filenames: + result = tail(open(AUGUR_LOG_DIR + "/" + file), lines) + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + break + + files = [] + directories = [] + for (dirpath, dirnames, filenames) in walk(WORKER_LOG_DIR): + directories.extend(dirnames) + break + + for directory in directories: + specific_worker_log_dir = WORKER_LOG_DIR + directory + for (_, _, filenames) in walk(specific_worker_log_dir): + files.extend(filenames) + + for file in [file for file in filenames if "collection" in file]: + result = tail(open(specific_worker_log_dir + "/" + file), lines) + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + break + +def tail(f, lines=20, _buffer=4098): + lines_found = [] + + # block counter will be multiplied by buffer + # to get the block size from the end + block_counter = -1 + + # loop until we find X lines + while len(lines_found) < lines: + try: + f.seek(block_counter * _buffer, os.SEEK_END) + except IOError: # either file is too small, or too many lines requested + f.seek(0) + lines_found = f.readlines() + break + + lines_found = f.readlines() + + # we found enough lines, get out + # Removed this line because it was redundant the while will catch + # it, I left it for history + # if len(lines_found) > lines: + # break + + # decrement the block counter to get the + # next X bytes + block_counter -= 1 + + return lines_found[-lines:] \ No newline at end of file diff --git a/augur/cli/run.py b/augur/cli/run.py --- a/augur/cli/run.py +++ b/augur/cli/run.py @@ -4,187 +4,148 @@ """ from copy import deepcopy -import os, time, atexit, subprocess, click +import os, time, atexit, subprocess, click, atexit, logging import multiprocessing as mp import gunicorn.app.base -from gunicorn.six import iteritems from gunicorn.arbiter import Arbiter -from augur.housekeeper.housekeeper import Housekeeper -from augur import logger +from augur.housekeeper.housekeeper import Housekeeper +from augur.logging import reset_logfiles from augur.server import Server - from augur.cli.util import kill_processes -import time +from augur.cli import pass_config, pass_application + +logger = logging.getLogger("augur") @click.command("run") @click.option("--disable-housekeeper", is_flag=True, default=False, help="Turns off the housekeeper") @click.option("--skip-cleanup", is_flag=True, default=False, help="Disables the old process cleanup that runs before Augur starts") +@pass_application @click.pass_context -def cli(ctx, disable_housekeeper, skip_cleanup): +def cli(ctx, augur_app, disable_housekeeper, skip_cleanup): """ Start Augur's backend server """ + reset_logfiles() if not skip_cleanup: logger.info("Cleaning up old Augur processes. Just a moment please...") ctx.invoke(kill_processes) time.sleep(2) else: - logger.info("Skipping cleanup processes.") + logger.info("Skipping cleanup processes...") - def get_process_id(name): - """Return process ids found by name or command - """ - child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False) - response = child.communicate()[0] - return [int(pid) for pid in response.split()] + logger.info('Initializing...') + master = initialize_components(augur_app, disable_housekeeper) + logger.info('Starting Gunicorn server in the background...') + Arbiter(master).run() - app = ctx.obj - - mp.set_start_method('forkserver', force=True) +def initialize_components(augur_app, disable_housekeeper): master = None - manager = None broker = None housekeeper = None - - logger.info("Booting broker and its manager...") - manager = mp.Manager() - broker = manager.dict() - - controller = app.read_config('Workers') - worker_pids = [] worker_processes = [] + mp.set_start_method('forkserver', force=True) if not disable_housekeeper: - if not controller: - return - for worker in controller.keys(): - if not controller[worker]['switch']: - continue - logger.info("Your config has the option set to automatically boot {} instances of the {}".format(controller[worker]['workers'], worker)) - pids = get_process_id("/bin/sh -c cd workers/{} && {}_start".format(worker, worker)) - worker_pids += pids - if len(pids) > 0: - worker_pids.append(pids[0] + 1) - pids.append(pids[0] + 1) - logger.info("Found and preparing to kill previous {} worker pids: {}".format(worker,pids)) - for pid in pids: - try: - os.kill(pid, 9) - except: - logger.info("Worker process {} already killed".format(pid)) - - @atexit.register - def exit(): - try: - for pid in worker_pids: - os.kill(pid, 9) - except: - logger.info("Worker process {} already killed".format(pid)) - for process in worker_processes: - logger.info("Shutting down worker process with pid: {} ...".format(process.pid)) - process.terminate() + logger.info("Booting broker and its manager...") + manager = mp.Manager() + broker = manager.dict() - if master is not None: - master.halt() - logger.info("Shutting down housekeeper updates...") - if housekeeper is not None: - housekeeper.shutdown_updates() - - # if hasattr(manager, "shutdown"): - # wait for the spawner and the worker threads to go down - # - if manager is not None: - manager.shutdown() - # check if it is still alive and kill it if necessary - # if manager._process.is_alive(): - manager._process.terminate() - - # Prevent multiprocessing's atexit from conflicting with gunicorn - logger.info("Killing main augur process with PID: {}".format(os.getpid())) - os.kill(os.getpid(), 9) - os._exit(0) - - if not disable_housekeeper: logger.info("Booting housekeeper...") - jobs = deepcopy(app.read_config('Housekeeper', 'jobs')) - try: - housekeeper = Housekeeper( - jobs, - broker, - broker_host=app.read_config('Server', 'host'), - broker_port=app.read_config('Server', 'port'), - user=app.read_config('Database', 'user'), - password=app.read_config('Database', 'password'), - host=app.read_config('Database', 'host'), - port=app.read_config('Database', 'port'), - dbname=app.read_config('Database', 'name') - ) - except KeyboardInterrupt as e: - exit() - + jobs = deepcopy(augur_app.config.get_value('Housekeeper', 'jobs')) + housekeeper = Housekeeper( + jobs, + broker, + broker_host=augur_app.config.get_value('Server', 'host'), + broker_port=augur_app.config.get_value('Server', 'port'), + user=augur_app.config.get_value('Database', 'user'), + password=augur_app.config.get_value('Database', 'password'), + host=augur_app.config.get_value('Database', 'host'), + port=augur_app.config.get_value('Database', 'port'), + dbname=augur_app.config.get_value('Database', 'name') + ) logger.info("Housekeeper has finished booting.") - if controller: - for worker in controller.keys(): - if controller[worker]['switch']: - for i in range(controller[worker]['workers']): - logger.info("Booting {} #{}".format(worker, i + 1)) - worker_process = mp.Process(target=worker_start, kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True) - worker_process.start() - worker_processes.append(worker_process) - - host = app.read_config('Server', 'host') - port = app.read_config('Server', 'port') - workers = int(app.read_config('Server', 'workers')) - timeout = int(app.read_config('Server', 'timeout')) - options = { - 'bind': '%s:%s' % (host, port), - 'workers': workers, - 'accesslog': '-', - 'access_log_format': '%(h)s - %(t)s - %(r)s', - 'timeout': timeout - } - logger.info('Starting server...') - master = Arbiter(AugurGunicornApp(options, manager=manager, broker=broker, housekeeper=housekeeper)).run() + controller = augur_app.config.get_section('Workers') + + for worker in controller.keys(): + if controller[worker]['switch']: + logger.debug("Your config has the option set to automatically boot {} instances of the {}".format(controller[worker]['workers'], worker)) + for i in range(controller[worker]['workers']): + logger.info("Booting {} #{}".format(worker, i + 1)) + worker_process = mp.Process(target=worker_start, kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True) + worker_process.start() + worker_processes.append(worker_process) + + atexit.register(exit, worker_processes, master, housekeeper, manager) + + return AugurGunicornApp(augur_app.gunicorn_options, manager=manager, broker=broker, housekeeper=housekeeper, augur_app=augur_app) def worker_start(worker_name=None, instance_number=0, worker_port=None): - time.sleep(120 * instance_number) - destination = subprocess.DEVNULL try: - destination = open("workers/{}/worker_{}.log".format(worker_name, worker_port), "a+") - except IOError as e: - logger.error("Error opening log file for auto-started worker {}: {}".format(worker_name, e)) - process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT) - logger.info("{} booted.".format(worker_name)) + time.sleep(30 * instance_number) + destination = subprocess.DEVNULL + process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT) + logger.info("{} #{} booted.".format(worker_name,instance_number+1)) + except KeyboardInterrupt as e: + pass + +def exit(worker_processes, master, housekeeper, manager): + if worker_processes: + for process in worker_processes: + logger.info("Shutting down worker process with pid: {}...".format(process.pid)) + process.terminate() + + if master is not None: + logger.info("Shutting down Gunicorn server...") + master.halt() + + if housekeeper is not None: + logger.info("Shutting down housekeeper updates...") + housekeeper.shutdown_updates() + + if manager is not None: + logger.info("Shutting down manager...") + manager.shutdown() + + logger.info("Killing main augur process with PID: {}".format(os.getpid())) + os._exit(0) class AugurGunicornApp(gunicorn.app.base.BaseApplication): """ Loads configurations, initializes Gunicorn, loads server """ - def __init__(self, options=None, manager=None, broker=None, housekeeper=None): + def __init__(self, options=None, manager=None, broker=None, housekeeper=None, augur_app=None): self.options = options or {} self.manager = manager self.broker = broker self.housekeeper = housekeeper + self.augur_app = augur_app + self.server = None super(AugurGunicornApp, self).__init__() - # self.cfg.pre_request.set(pre_request) def load_config(self): """ Sets the values for configurations """ - config = dict([(key, value) for key, value in iteritems(self.options) - if key in self.cfg.settings and value is not None]) - for key, value in iteritems(config): + config = {key: value for key, value in self.options.items() + if key in self.cfg.settings and value is not None} + for key, value in config.items(): self.cfg.set(key.lower(), value) - def load(self): + def get_augur_app(self): """ Returns the loaded server """ - server = Server(manager=self.manager, broker=self.broker, housekeeper=self.housekeeper) - return server.app + self.load() + return self.server.augur_app + def load(self): + """ + Returns the loaded server + """ + if self.server is None: + self.server = Server(manager=self.manager, broker=self.broker, housekeeper=self.housekeeper, augur_app=self.augur_app) + return self.server.app diff --git a/augur/cli/util.py b/augur/cli/util.py --- a/augur/cli/util.py +++ b/augur/cli/util.py @@ -5,6 +5,7 @@ import os import signal +import logging from subprocess import call, run import psutil @@ -12,27 +13,27 @@ import pandas as pd import sqlalchemy as s -from augur import logger -from augur.cli.configure import default_config +from augur.cli import pass_config, pass_application + +logger = logging.getLogger("augur.cli") @click.group('util', short_help='Miscellaneous utilities') def cli(): pass @cli.command('export-env') [email protected]_context -def export_env(ctx): +@pass_config +def export_env(config): """ Exports your GitHub key and database credentials """ - app = ctx.obj export_file = open(os.getenv('AUGUR_EXPORT_FILE', 'augur_export_env.sh'), 'w+') export_file.write('#!/bin/bash') export_file.write('\n') env_file = open(os.getenv('AUGUR_ENV_FILE', 'docker_env.txt'), 'w+') - for env_var in app.env_config.items(): + for env_var in config.get_env_config().items(): export_file.write('export ' + env_var[0] + '="' + str(env_var[1]) + '"\n') env_file.write(env_var[0] + '=' + str(env_var[1]) + '\n') @@ -40,8 +41,8 @@ def export_env(ctx): env_file.close() @cli.command('kill') [email protected]_context -def kill_processes(ctx): +@pass_config +def kill_processes(config): """ Terminates all currently running backend Augur processes, including any workers. Will only work in a virtual environment. """ @@ -57,7 +58,8 @@ def kill_processes(ctx): pass @cli.command('list',) -def list_processes(): +@pass_config +def list_processes(config): """ Outputs the name and process ID (PID) of all currently running backend Augur processes, including any workers. Will only work in a virtual environment. """ @@ -78,13 +80,11 @@ def get_augur_processes(): return processes @cli.command('repo-reset') [email protected]_context -def repo_reset(ctx): +@pass_application +def repo_reset(augur_app): """ Refresh repo collection to force data collection """ - app = ctx.obj - - app.database.execute("UPDATE augur_data.repo SET repo_path = NULL, repo_name = NULL, repo_status = 'New'; TRUNCATE augur_data.commits CASCADE; ") + augur_app.database.execute("UPDATE augur_data.repo SET repo_path = NULL, repo_name = NULL, repo_status = 'New'; TRUNCATE augur_data.commits CASCADE; ") logger.info("Repos successfully reset") diff --git a/augur/config.py b/augur/config.py new file mode 100644 --- /dev/null +++ b/augur/config.py @@ -0,0 +1,331 @@ +import os +import json +import logging + +ENVVAR_PREFIX = "AUGUR_" + +default_config = { + "Database": { + "name": "augur", + "host": "localhost", + "key": "key", + "password": "augur", + "port": 5432, + "user": "augur", + "gitlab_api_key":"gitlab_api_key" + }, + "Housekeeper": { + "jobs": [ + { + "all_focused": 1, + "delay": 150000, + "given": [ + "github_url" + ], + "model": "issues", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "github_url" + ], + "model": "pull_request_commits", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "github_url" + ], + "model": "repo_info", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "repo_group" + ], + "model": "commits", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "github_url" + ], + "model": "pull_requests", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "contributors", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "insights", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "badges", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "value", + "repo_group_id": 0 + }, + { + "delay": 100000, + "given": [ + "github_url" + ], + "model": "pull_request_files", + "repo_group_id": 0 + }, + { + "delay": 100000, + "given": [ + "github_url" + ], + "model": "releases", + "repo_group_id": 0 + } + ] + }, + "Workers": { + "facade_worker": { + "port": 50100, + "repo_directory": "repos/", + "switch": 1, + "workers": 1 + }, + "github_worker": { + "port": 50200, + "switch": 1, + "workers": 1 + }, + "insight_worker": { + "port": 50300, + "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", + "reviews": "pull_requests", "contributors-new": "new_contributors"}, + "confidence_interval": 95, + "contamination": 0.041, + "switch": 0, + "workers": 1, + "training_days": 365, + "anomaly_days": 2 + }, + "linux_badge_worker": { + "port": 50400, + "switch": 1, + "workers": 1 + }, + "metric_status_worker": { + "port": 50500, + "switch": 0, + "workers": 1 + }, + "pull_request_worker": { + "port": 50600, + "switch": 1, + "workers": 1 + }, + "repo_info_worker": { + "port": 50700, + "switch": 1, + "workers": 1 + }, + "value_worker": { + "port": 50800, + "scc_bin": "scc", + "switch": 0, + "workers": 1 + }, + "contributor_worker": { + "port": 50900, + "switch": 1, + "workers": 1 + }, + "gitlab_issues_worker": { + "port": 51000, + "switch": 1, + "workers": 1 + }, + "release_worker": { + "port": 51100, + "switch": 1, + "workers": 1 + } + }, + "Facade": { + "check_updates": 1, + "clone_repos": 1, + "create_xlsx_summary_files": 1, + "delete_marked_repos": 0, + "fix_affiliations": 1, + "force_analysis": 1, + "force_invalidate_caches": 1, + "force_updates": 1, + "limited_run": 0, + "multithreaded": 0, + "nuke_stored_affiliations": 0, + "pull_repos": 1, + "rebuild_caches": 1, + "run_analysis": 1 + }, + "Server": { + "cache_expire": "3600", + "host": "0.0.0.0", + "port": "5000", + "workers": 4, + "timeout": 60 + }, + "Frontend": { + "host": "0.0.0.0", + "port": "5000" + }, + "Development": { + "log_level": "INFO", + "verbose": 0, + "quiet": 0, + "debug": 0 + } + } + +logger = logging.getLogger(__name__) + +class AugurConfig(): + """docstring for AugurConfig""" + def __init__(self, root_augur_dir): + self._default_config_file_name = 'augur.config.json' + self._root_augur_dir = root_augur_dir + self._default_config = default_config + self._env_config = {} + self.load_config() + + def get_section(self, section_name): + try: + return self._config[section_name] + except KeyError as e: + if not self.using_default_config: + logger.warn(f"{section_name} not found in loaded config. Checking default config") + try: + return self._default_config[section_name] + except KeyError as e: + logger.error(f"No defaults found for {section_name}") + raise(e) + else: + logger.debug(f"Already using default config, skipping check for {section_name}") + + def get_value(self, section_name, value): + try: + return self._config[section_name][value] + except KeyError as e: + if not self.using_default_config: + logger.warn(f"{section_name}:{value} not found in loaded config. Checking default config") + try: + return self._default_config[section_name][value] + except KeyError as e: + logger.error(f"No defaults found for {section_name}:{value}") + raise(e) + else: + logger.debug(f"Already using default config, skipping check for {section_name}:{value}") + + def load_config(self): + self._config = None + self.using_default_config = False + + try: + config_file_path = self.discover_config_file() + try: + with open(config_file_path, 'r+') as config_file_handle: + self._config = json.loads(config_file_handle.read()) + except json.decoder.JSONDecodeError as e: + logger.warning("Unable to parse config. Using default configuration") + self.using_default_config = True + self._config = default_config + except AugurConfigFileNotFoundException as e: + logger.warning("Config file not found. Using default configuration") + self.using_default_config = True + self._config = default_config + + self.load_env_configuration() + + def discover_config_file(self): + default_config_path = self._root_augur_dir + '/' + self._default_config_file_name + config_file_path = None + + config_locations = [self._default_config_file_name, default_config_path + , f"/opt/augur/{self._default_config_file_name}"] + if os.getenv('AUGUR_CONFIG_FILE', None) is not None: + config_file_path = os.getenv('AUGUR_CONFIG_FILE') + else: + for location in config_locations: + try: + f = open(location, "r+") + config_file_path = os.path.abspath(location) + f.close() + break + except FileNotFoundError: + logger.debug(f"No config file found at {config_file_path}") + pass + if config_file_path: + return config_file_path + else: + raise(AugurConfigFileNotFoundException(message=f"{self._default_config_file_name} not found", errors=None)) + + def load_env_configuration(self): + self.set_env_value(section='Database', name='key', environment_variable='AUGUR_GITHUB_API_KEY') + self.set_env_value(section='Database', name='host', environment_variable='AUGUR_DB_HOST') + self.set_env_value(section='Database', name='name', environment_variable='AUGUR_DB_NAME') + self.set_env_value(section='Database', name='port', environment_variable='AUGUR_DB_PORT') + self.set_env_value(section='Database', name='user', environment_variable='AUGUR_DB_USER') + self.set_env_value(section='Database', name='password', environment_variable='AUGUR_DB_PASSWORD') + self.set_env_value(section='Development', name='log_level', environment_variable='AUGUR_LOG_LEVEL') + self.set_env_value(section='Development', name='verbose', environment_variable='AUGUR_LOG_VERBOSE') + self.set_env_value(section='Development', name='quiet', environment_variable='AUGUR_LOG_QUIET') + + def set_env_value(self, section, name, environment_variable, sub_config=None): + """ + Sets names and values of specified config section according to their environment variables. + """ + # using sub_config lets us grab values from nested config blocks + if sub_config is None: + sub_config = self._config + + env_value = os.getenv(environment_variable) + + if env_value is not None: + self._env_config[environment_variable] = env_value + sub_config[section][name] = env_value + logger.debug(f"{section}:{name} set to {env_value} from envvar: {environment_variable}") + else: + self._env_config[environment_variable] = self.get_value(section, name) + + def get_raw_config(self): + return self._config + + def get_default_config(self): + return self._default_config + + def get_env_config(self): + return self._env_config + +class AugurConfigFileNotFoundException(Exception): + def __init__(self, message, errors): + super().__init__(message) diff --git a/augur/housekeeper/housekeeper.py b/augur/housekeeper/housekeeper.py --- a/augur/housekeeper/housekeeper.py +++ b/augur/housekeeper/housekeeper.py @@ -1,16 +1,18 @@ """ Keeps data up to date """ -import logging, os, time, requests +import logging, os, time, requests, logging from multiprocessing import Process from sqlalchemy.ext.automap import automap_base import sqlalchemy as s import pandas as pd from sqlalchemy import MetaData -logging.basicConfig(filename='housekeeper.log') +from augur.logging import create_job_logger import warnings warnings.filterwarnings('ignore') +logger = logging.getLogger("augur") + class Housekeeper: def __init__(self, jobs, broker, broker_host, broker_port, user, password, host, port, dbname): @@ -22,12 +24,12 @@ def __init__(self, jobs, broker, broker_host, broker_port, user, password, host, user, password, host, port, dbname ) - dbschema='augur_data' + dbschema = 'augur_data' self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(dbschema)}) helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, + self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(helper_schema)}) helper_metadata = MetaData() @@ -59,13 +61,13 @@ def updater_process(broker_host, broker_port, broker, job): :param delay: time needed to update :param shared: shared object that is to also be updated """ - + logger = create_job_logger(job["model"]) if 'repo_group_id' in job: repo_group_id = job['repo_group_id'] - logging.info('Housekeeper spawned {} model updater process for repo group id {} with PID {}\n'.format(job['model'], repo_group_id, os.getpid())) + logger.info('Housekeeper spawned {} model updater process for repo group id {} with PID {}\n'.format(job['model'], repo_group_id, os.getpid())) else: repo_group_id = None - logging.info('Housekeeper spawned {} model updater process for repo ids {} with PID {}\n'.format(job['model'], job['repo_ids'], os.getpid())) + logger.info('Housekeeper spawned {} model updater process for repo ids {} with PID {}\n'.format(job['model'], job['repo_ids'], os.getpid())) try: compatible_worker_found = False @@ -78,10 +80,10 @@ def updater_process(broker_host, broker_port, broker, job): time.sleep(3) continue - logging.info("Housekeeper recognized that the broker has a worker that " + + logger.info("Housekeeper recognized that the broker has a worker that " + "can handle the {} model... beginning to distribute maintained tasks\n".format(job['model'])) while True: - logging.info('Housekeeper updating {} model with given {}...\n'.format( + logger.info('Housekeeper updating {} model with given {}...\n'.format( job['model'], job['given'][0])) if job['given'][0] == 'git_url' or job['given'][0] == 'github_url': @@ -102,9 +104,9 @@ def updater_process(broker_host, broker_port, broker, job): requests.post('http://{}:{}/api/unstable/task'.format( broker_host,broker_port), json=task, timeout=10) except Exception as e: - logging.info("Error encountered: {}\n".format(e)) + logger.info("Error encountered: {}\n".format(e)) - logging.info(task) + logger.info(task) time.sleep(15) @@ -121,9 +123,9 @@ def updater_process(broker_host, broker_port, broker, job): requests.post('http://{}:{}/api/unstable/task'.format( broker_host,broker_port), json=task, timeout=10) except Exception as e: - logging.info("Error encountered: {}\n".format(e)) + logger.info("Error encountered: {}\n".format(e)) - logging.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)\n".format(len(job['repos']))) + logger.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)\n".format(len(job['repos']))) time.sleep(job['delay']) except KeyboardInterrupt: @@ -136,7 +138,7 @@ def __updater(self, jobs=None): """ Starts update processes """ - logging.info("Starting update processes...") + logger.info("Starting update processes...") if jobs is None: jobs = self.__updatable for job in jobs: @@ -156,7 +158,7 @@ def schedule_updates(self): Schedules updates """ # don't use this, - logging.debug('Scheduling updates...') + logger.debug('Scheduling updates...') self.__updater() def join_updates(self): @@ -271,7 +273,7 @@ def prep_jobs(self, jobs): reorganized_repos = pd.read_sql(repo_url_sql, self.db, params={}) if len(reorganized_repos) == 0: - logging.info("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql)) + logger.info("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql)) job['repos'] = [] continue @@ -292,7 +294,7 @@ def prep_jobs(self, jobs): 'oauth_id': 0 } result = self.helper_db.execute(self.job_table.insert().values(job_tuple)) - logging.info("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple)) + logger.info("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple)) # If a last id is not recorded, start from beginning of repos # (first id is not necessarily 0) diff --git a/augur/logging.py b/augur/logging.py new file mode 100644 --- /dev/null +++ b/augur/logging.py @@ -0,0 +1,94 @@ +import logging +from logging import FileHandler, StreamHandler, Formatter +import os +from pathlib import Path +import coloredlogs + +verbose_formatter = Formatter(fmt='%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s') +generic_formatter = Formatter(fmt='%(asctime)s [%(process)d] [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S') + +FORMATTER = generic_formatter +LOG_LEVEL = "INFO" +VERBOSE = False +QUIET = False + +ROOT_AUGUR_DIRECTORY = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +AUGUR_LOG_DIR = ROOT_AUGUR_DIRECTORY + "/logs/" +HOUSEKEEPER_LOG_DIR = AUGUR_LOG_DIR + "/housekeeper/" +WORKER_LOG_DIR = AUGUR_LOG_DIR + "/workers/" + +Path(AUGUR_LOG_DIR).mkdir(exist_ok=True) +Path(HOUSEKEEPER_LOG_DIR).mkdir(exist_ok=True) +Path(WORKER_LOG_DIR).mkdir(exist_ok=True) + +housekeeper_file_handler = FileHandler(HOUSEKEEPER_LOG_DIR + "all_jobs.log", mode="a") +housekeeper_file_handler.setFormatter(FORMATTER) + +console_handler = StreamHandler() +console_handler.setLevel(LOG_LEVEL) +console_handler.setFormatter(FORMATTER) + +log_file_handler = FileHandler(AUGUR_LOG_DIR + "augur.log", mode="a") +log_file_handler.setLevel(LOG_LEVEL) +log_file_handler.setFormatter(FORMATTER) + +gunicorn_log_file_handler = FileHandler(AUGUR_LOG_DIR + "gunicorn.log", mode="a") +gunicorn_log_file_handler.setLevel(LOG_LEVEL) +gunicorn_log_file_handler.setFormatter(FORMATTER) + +def initialize_logging(augur_config): + LOG_LEVEL = augur_config.get_value("Development", "log_level") + VERBOSE = augur_config.get_value("Development", "verbose") + QUIET = augur_config.get_value("Development", "quiet") + + if VERBOSE is True: + FORMATTER = verbose_formatter + else: + FORMATTER = generic_formatter + + augur_logger = create_logger("augur", [log_file_handler,console_handler]) + cli_logger = create_logger("augur.cli", [console_handler]) + cli_logger.propagate = False + + if QUIET is True: + augur_logger.disabled = True + +def create_logger(name, handlers): + logger = logging.getLogger(name) + logger.handlers = [] + for handler in handlers: + handler.setFormatter(FORMATTER) + handler.setLevel(LOG_LEVEL) + logger.addHandler(handler) + logger.setLevel(LOG_LEVEL) + coloredlogs.install(logger=logger) + return logger + +def create_job_logger(model): + job_log_file = HOUSEKEEPER_LOG_DIR + f"{model}_jobs.log" + + open(job_log_file, "w").close() + job_handler = FileHandler(job_log_file) + + job_logger = create_logger(f"augur.housekeeper.{model}", [housekeeper_file_handler, job_handler, console_handler]) + job_logger.propagate = False + + if QUIET is True: + job_logger.disabled = True + + return job_logger + +def reset_logfiles(): + open(AUGUR_LOG_DIR + "augur.log", "w").close() + open(AUGUR_LOG_DIR + "gunicorn.log", "w").close() + open(HOUSEKEEPER_LOG_DIR + "all_jobs.log", "w").close() + +def set_gunicorn_log_options(): + gunicorn_log_file = AUGUR_LOG_DIR + "gunicorn.log" + options = { + 'errorlog': gunicorn_log_file, + 'accesslog': gunicorn_log_file, + 'loglevel': LOG_LEVEL, + 'capture_output': True if not QUIET else False + } + return options \ No newline at end of file diff --git a/augur/metrics/metrics.py b/augur/metrics/metrics.py --- a/augur/metrics/metrics.py +++ b/augur/metrics/metrics.py @@ -4,7 +4,9 @@ import inspect import types import importlib -from augur import logger +import logging + +logger = logging.getLogger(__name__) class Metrics(): def __init__(self, app): diff --git a/augur/metrics/repo_meta.py b/augur/metrics/repo_meta.py --- a/augur/metrics/repo_meta.py +++ b/augur/metrics/repo_meta.py @@ -5,9 +5,12 @@ import datetime import sqlalchemy as s import pandas as pd -from augur import logger -from augur.util import register_metric import math +import logging + +from augur.util import register_metric + +logger = logging.getLogger("augur") @register_metric() def code_changes(self, repo_group_id, repo_id=None, period='week', begin_date=None, end_date=None): diff --git a/augur/models/__init__.py b/augur/models/__init__.py deleted file mode 100644 --- a/augur/models/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from sqlalchemy.orm import sessionmaker -from .user import User -from .repo import Repo, RepoGroup - - -__all__ = ['User', 'RepoGroup', 'Repo'] \ No newline at end of file diff --git a/augur/models/common.py b/augur/models/common.py deleted file mode 100644 --- a/augur/models/common.py +++ /dev/null @@ -1,2 +0,0 @@ -from sqlalchemy.ext.declarative import declarative_base -Base = declarative_base() \ No newline at end of file diff --git a/augur/models/repo.py b/augur/models/repo.py deleted file mode 100644 --- a/augur/models/repo.py +++ /dev/null @@ -1,48 +0,0 @@ -import datetime -from sqlalchemy import Table, ForeignKey, Column, Integer, String, DateTime -from sqlalchemy.orm import relationship -from .common import Base -from .user import user_has_repo_group - -repo_group_has_project = Table('repo_group_has_project', - Base.metadata, - Column('repo_group_id', ForeignKey('repo_group.id'), primary_key=True), - Column('repo_id', ForeignKey('repo.url'), primary_key=True), -) - -class Repo(Base): - """ - The Repo object models a VCS repository - """ - __tablename__ = 'repo' - - # Keys - url = Column(String(1024), primary_key=True) - vcs = Column(String(64), default='git') - - # Fields - created_at = Column(DateTime, default=datetime.datetime.utcnow) - - # Foreign Keys - repo_groups_member_of = relationship('RepoGroup', secondary=repo_group_has_project, back_populates='projects') - - def __repr__(self): - return f"<Repo(giturl='{self.password}')>" - - -class RepoGroup(Base): - """ - The RepoGroup class models lists of projects that a user wants to keep track of - """ - __tablename__ = 'repo_group' - - # Keys - id = Column(Integer, primary_key=True) - name = Column(String(128)) - - # Fields - created_at = Column(DateTime, default=datetime.datetime.utcnow) - - # Foreign Keys - projects = relationship('Repo', secondary=repo_group_has_project, back_populates='repo_groups_member_of') - users_of = relationship('User', secondary=user_has_repo_group, back_populates='repo_groups') \ No newline at end of file diff --git a/augur/models/user.py b/augur/models/user.py deleted file mode 100644 --- a/augur/models/user.py +++ /dev/null @@ -1,61 +0,0 @@ -import datetime -from sqlalchemy import Table, ForeignKey, Column, Integer, String, DateTime, Boolean -from sqlalchemy.orm import relationship -from sqlalchemy.ext.hybrid import hybrid_property -from .common import Base -from werkzeug.security import generate_password_hash, check_password_hash -from flask_login import UserMixin - -user_has_repo_group = Table('user_has_repo_group', - Base.metadata, - Column('user_id', ForeignKey('user.id'), primary_key=True), - Column('repo_group_id', ForeignKey('repo_group.id'), primary_key=True), -) - -class User(Base): - """ - The User object models users in the database. - """ - __tablename__ = 'user' - - # Keys - id = Column(Integer, primary_key=True) - username = Column(String(64), unique=True, nullable=False) - email = Column(String(64), unique=True, nullable=False) - - # Fields - password_hash = Column(String(128)) - email_confirmation_token = Column(String(128), nullable=True) - created_at = Column(DateTime, default=datetime.datetime.utcnow) - password_updated_at = Column(DateTime, default=datetime.datetime.utcnow) - last_login_at = Column(DateTime, nullable=True) - authenticated = Column(Boolean, default=False) - active = Column(Boolean, default=True) - administrator = Column(Boolean, default=False) - - # Foreign Keys - repo_groups = relationship('RepoGroup', secondary=user_has_repo_group, back_populates='users_of') - - def get_id(self): - return self.id - - def __repr__(self): - return f"<User(username='{self.username}', email='{self.email}')>" - - @hybrid_property - def password(self): - return self.password_hash - - @password.setter - def password(self, password): - self.password_hash = generate_password_hash(password) - - def check_password(self, password): - return check_password_hash(self.password_hash, password) - - def is_authenticated(self): - return self.authenticated - - def is_active(self): - # False as we do not support annonymity - return self.active diff --git a/augur/routes/__init__.py b/augur/routes/__init__.py --- a/augur/routes/__init__.py +++ b/augur/routes/__init__.py @@ -5,19 +5,17 @@ import sys import inspect -from augur import logger - def get_route_files(): route_files = [] def get_file_id(path): return os.path.splitext(os.path.basename(path))[0] - for filename in glob.iglob("**/routes/*"): + for filename in glob.iglob("augur/routes/*"): file_id = get_file_id(filename) if not file_id.startswith('__') and filename.endswith('.py'): route_files.append(file_id) - + return route_files route_files = get_route_files() diff --git a/augur/routes/batch.py b/augur/routes/batch.py --- a/augur/routes/batch.py +++ b/augur/routes/batch.py @@ -10,9 +10,10 @@ from sqlalchemy import exc from flask import request, Response from augur.util import metric_metadata -from augur import logger import json +logger = logging.getLogger("augur") + def create_routes(server): @server.app.route('/{}/batch'.format(server.api_version), methods=['GET', 'POST']) diff --git a/augur/routes/manager.py b/augur/routes/manager.py --- a/augur/routes/manager.py +++ b/augur/routes/manager.py @@ -285,15 +285,15 @@ def get_repo_name(self): repo = self.name return repo[repo.find('/')+1:] -def authenticate_request(app, request): +def authenticate_request(augur_app, request): # do I like doing it like this? not at all # do I have the time to implement a better solution right now? not at all - user = app.read_config('Database', 'user') - password = app.read_config('Database', 'password') - host = app.read_config('Database', 'host') - port = app.read_config('Database', 'port') - dbname = app.read_config('Database', 'name') + user = augur_app.config.get_value('Database', 'user') + password = augur_app.config.get_value('Database', 'password') + host = augur_app.config.get_value('Database', 'host') + port = augur_app.config.get_value('Database', 'port') + dbname = augur_app.config.get_value('Database', 'name') DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( user, password, host, port, dbname diff --git a/augur/routes/util.py b/augur/routes/util.py --- a/augur/routes/util.py +++ b/augur/routes/util.py @@ -200,7 +200,7 @@ def get_issues(repo_group_id, repo_id=None): @server.app.route('/{}/api-port'.format(server.api_version)) def api_port(): - response = {'port': server.augur_app.read_config('Server', 'port')} + response = {'port': server.augur_app.config.get_value('Server', 'port')} return Response(response=json.dumps(response), status=200, mimetype="application/json") diff --git a/augur/server.py b/augur/server.py --- a/augur/server.py +++ b/augur/server.py @@ -10,50 +10,40 @@ import json import os import base64 +import logging from flask import Flask, request, Response, redirect from flask_cors import CORS import pandas as pd import augur -from augur.util import logger from augur.routes import create_routes AUGUR_API_VERSION = 'api/unstable' -class VueCompatibleFlask(Flask): - jinja_options = Flask.jinja_options.copy() - jinja_options.update(dict( - block_start_string='(%', - block_end_string='%)', - variable_start_string='%%', - variable_end_string='%%', - comment_start_string='(#', - comment_end_string='#)', - )) - +logger = logging.getLogger(__name__) class Server(object): """ Defines Augur's server's behavior """ - def __init__(self, frontend_folder='../frontend/public', manager=None, broker=None, housekeeper=None): + def __init__(self, manager=None, broker=None, housekeeper=None, augur_app=None): """ Initializes the server, creating both the Flask application and Augur application """ # Create Flask application - self.app = VueCompatibleFlask(__name__, static_folder=frontend_folder, template_folder=frontend_folder) + self.app = Flask(__name__) self.api_version = AUGUR_API_VERSION app = self.app CORS(app) app.url_map.strict_slashes = False # Create Augur application - self.augur_app = augur.Application() + self.augur_app = augur_app # Initialize cache - expire = int(self.augur_app.read_config('Server', 'cache_expire')) + expire = int(self.augur_app.config.get_value('Server', 'cache_expire')) self.cache = self.augur_app.cache.get_cache('server', expire=expire) self.cache.clear() @@ -196,8 +186,8 @@ def run(): Runs server with configured hosts/ports """ server = Server() - host = server.augur_app.read_config('Server', 'host') - port = server.augur_app.read_config('Server', 'port') + host = server.augur_app.config.get_value('Server', 'host') + port = server.augur_app.config.get_value('Server', 'port') Server().app.run(host=host, port=int(port), debug=True) wsgi_app = None diff --git a/augur/util.py b/augur/util.py --- a/augur/util.py +++ b/augur/util.py @@ -8,8 +8,9 @@ import types import sys import beaker +import logging -from augur import logger +logger = logging.getLogger(__name__) __ROOT = os.path.abspath(os.path.dirname(__file__)) def get_data_path(path): diff --git a/conftest.py b/conftest.py new file mode 100644 --- /dev/null +++ b/conftest.py @@ -0,0 +1,32 @@ +import pytest +import re + +from augur.application import Application +from augur.cli.run import initialize_components + +default_repo_id = "25430" +default_repo_group_id = "10" + +def create_full_routes(routes): + full_routes = [] + for route in routes: + route = re.sub("<default_repo_id>", default_repo_id, route) + route = re.sub("<default_repo_group_id>", default_repo_group_id, route) + route = "http://localhost:5000/api/unstable/" + route + full_routes.append(route) + return full_routes + [email protected](scope="session") +def augur_app(): + augur_app = Application() + return augur_app + [email protected](scope="session") +def metrics(augur_app): + return augur_app.metrics + [email protected](scope="session") +def client(augur_app): + augur_app.logger.disabled = True + flask_client = initialize_components(augur_app, disable_housekeeper=True).load() + return flask_client.test_client() diff --git a/metadata.py b/metadata.py --- a/metadata.py +++ b/metadata.py @@ -6,8 +6,8 @@ __short_description__ = "Python 3 package for free/libre and open-source software community metrics & data collection" -__version__ = "0.12.0" -__release__ = "0.12.0" +__version__ = "0.12.1" +__release__ = "0.12.1" __license__ = "MIT" __copyright__ = "CHAOSS & Augurlabs 2020" diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ "psycopg2-binary", "click", "psutil", - "gunicorn==19.9.0", + "gunicorn", "six>=1.14.0" ], extras_require={ @@ -61,7 +61,7 @@ }, entry_points={ "console_scripts": [ - "augur=augur.runtime:run" + "augur=augur.cli._multicommand:run" ], } ) diff --git a/util/alembic/env.py b/util/alembic/env.py deleted file mode 100644 --- a/util/alembic/env.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import with_statement -from alembic import context -from sqlalchemy import engine_from_config, pool -from logging.config import fileConfig - - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -from augur.models.common import Base -target_metadata = Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, target_metadata=target_metadata, literal_binds=True) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - import augur.application - app = augur.application.Application() - - context.configure( - connection=app.db.connect(), - target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py b/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py deleted file mode 100644 --- a/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Create basic tables - -Revision ID: 2eaa930b1f5a -Revises: -Create Date: 2019-02-09 16:10:24.251828 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '2eaa930b1f5a' -down_revision = None -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.create_table('repo', - sa.Column('url', sa.String(length=1024), nullable=False), - sa.Column('vcs', sa.String(length=64), nullable=True), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('url') - ) - op.create_table('repo_group', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('id') - ) - op.create_table('user', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('username', sa.String(length=64), nullable=False), - sa.Column('email', sa.String(length=64), nullable=False), - sa.Column('password_hash', sa.String(length=128), nullable=True), - sa.Column('email_confirmation_token', sa.String(length=128), nullable=True), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('password_updated_at', sa.DateTime(), nullable=True), - sa.Column('last_login_at', sa.DateTime(), nullable=True), - sa.Column('authenticated', sa.Boolean(), nullable=True), - sa.Column('active', sa.Boolean(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('email'), - sa.UniqueConstraint('username') - ) - op.create_table('repo_group_has_project', - sa.Column('repo_group_id', sa.Integer(), nullable=False), - sa.Column('repo_id', sa.String(length=1024), nullable=False), - sa.ForeignKeyConstraint(['repo_group_id'], ['repo_group.id'], ), - sa.ForeignKeyConstraint(['repo_id'], ['repo.url'], ), - sa.PrimaryKeyConstraint('repo_group_id', 'repo_id') - ) - op.create_table('user_has_repo_group', - sa.Column('user_id', sa.Integer(), nullable=False), - sa.Column('repo_group_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['repo_group_id'], ['repo_group.id'], ), - sa.ForeignKeyConstraint(['user_id'], ['user.id'], ), - sa.PrimaryKeyConstraint('user_id', 'repo_group_id') - ) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.drop_table('user_has_repo_group') - op.drop_table('repo_group_has_project') - op.drop_table('user') - op.drop_table('repo_group') - op.drop_table('repo') - # ### end Alembic commands ### diff --git a/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py b/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py deleted file mode 100644 --- a/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Add admin to User, name to RepoGroup - -Revision ID: a051167419fa -Revises: 2eaa930b1f5a -Create Date: 2019-02-17 13:09:42.138936 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = 'a051167419fa' -down_revision = '2eaa930b1f5a' -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.add_column('repo_group', sa.Column('name', sa.String(length=128), nullable=True)) - op.add_column('user', sa.Column('administrator', sa.Boolean(), nullable=True)) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.drop_column('user', 'administrator') - op.drop_column('repo_group', 'name') - # ### end Alembic commands ### diff --git a/workers/contributor_worker/__init__.py b/workers/contributor_worker/__init__.py new file mode 100644 diff --git a/workers/contributor_worker/contributor_worker/worker.py b/workers/contributor_worker/contributor_worker.py similarity index 83% rename from workers/contributor_worker/contributor_worker/worker.py rename to workers/contributor_worker/contributor_worker.py --- a/workers/contributor_worker/contributor_worker/worker.py +++ b/workers/contributor_worker/contributor_worker.py @@ -8,10 +8,8 @@ import statistics, logging, os, json, time import numpy as np import datetime -from workers.standard_methods import * + from workers.worker_base import Worker -import warnings -warnings.filterwarnings('ignore') class ContributorWorker(Worker): """ Worker that detects anomalies on a select few of our metrics @@ -20,7 +18,9 @@ class ContributorWorker(Worker): queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): + def __init__(self, config={}): + + worker_type = "contributor_worker" given = [['git_url']] models = ['contributors'] @@ -31,27 +31,27 @@ def __init__(self, config, task=None): operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) # These 3 are included in every tuple the worker inserts (data collection info) self.tool_source = 'Contributor Worker' - self.tool_version = '0.0.1' # See __init__.py + self.tool_version = '0.0.1' self.data_source = 'Augur Commit Data' + def contributors_model(self, entry_info, repo_id): + # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's - logging.info("Querying starting ids info...\n") + self.logger.info("Querying starting ids info...\n") self.cntrb_id_inc = self.get_max_id('contributors', 'cntrb_id') - def contributors_model(self, entry_info, repo_id): - # Get and insert all users (emails) found by the facade worker self.insert_facade_contributors(entry_info, repo_id) # Get and insert all users github considers to be contributors for this repo self.query_github_contributors(entry_info, repo_id) - logging.info("Searching users for commits from the facade worker for repo with entry info: {}\n".format(entry_info)) + self.logger.info("Searching users for commits from the facade worker for repo with entry info: {}\n".format(entry_info)) # Get all distinct combinations of emails and names by querying the repo's commits userSQL = s.sql.text(""" @@ -103,7 +103,7 @@ def contributors_model(self, entry_info, repo_id): commit_cntrbs = json.loads(pd.read_sql(userSQL, self.db, \ params={'repo_id': repo_id}).to_json(orient="records")) - logging.info("We found {} distinct emails to search for in this repo (repo_id = {})\n".format( + self.logger.info("We found {} distinct emails to search for in this repo (repo_id = {})\n".format( len(commit_cntrbs), repo_id)) # For every unique commit contributor info combination... @@ -144,7 +144,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(times_used_tuple)) self.results_counter += 1 - logging.info("Updated cntrb_created_at and cntrb_last_used columns for existing " + self.logger.info("Updated cntrb_created_at and cntrb_last_used columns for existing " "tuple in the contributors table with email: {}\n".format(contributor['commit_email'])) # If cntrb_full_name column is not filled, go ahead and fill it bc we have that info @@ -158,7 +158,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(name_col)) - logging.info("Inserted cntrb_full_name column for existing tuple in the contributors " + self.logger.info("Inserted cntrb_full_name column for existing tuple in the contributors " "table with email: {}\n".format(contributor['cntrb_email'])) # If cntrb_canonical column is not filled, go ahead and fill it w main email bc @@ -173,7 +173,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(canonical_col)) - logging.info("Inserted cntrb_canonical column for existing tuple in the contributors " + self.logger.info("Inserted cntrb_canonical column for existing tuple in the contributors " "table with email: {}\n".format(contributor['cntrb_email'])) @@ -208,20 +208,20 @@ def contributors_model(self, entry_info, repo_id): url = 'https://api.github.com/search/users?q={}+in:email'.format( cmt_cntrb['email']) - logging.info("Hitting endpoint: " + url + " ...\n") + self.logger.info("Hitting endpoint: " + url + " ...\n") r = requests.get(url=url, headers=self.headers) self.update_gh_rate_limit(r) results = r.json() # If no matches or bad response, continue with other contributors if 'total_count' not in results: - logging.info("Search query returned an empty response, moving on...\n") + self.logger.info("Search query returned an empty response, moving on...\n") continue if results['total_count'] == 0: - logging.info("Search query did not return any results, moving on...\n") + self.logger.info("Search query did not return any results, moving on...\n") continue - logging.info("When searching for a contributor with info {}, we found the following users: {}\n".format( + self.logger.info("When searching for a contributor with info {}, we found the following users: {}\n".format( cmt_cntrb, results)) # Grab first result and make sure it has the highest match score @@ -231,7 +231,7 @@ def contributors_model(self, entry_info, repo_id): match = item cntrb_url = ("https://api.github.com/users/" + match['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) self.update_gh_rate_limit(r) contributor = r.json() @@ -268,12 +268,12 @@ def contributors_model(self, entry_info, repo_id): } result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==self.cntrb_id_inc).values(cntrb_gh_info)) - logging.info("Updated existing tuple in the contributors table with github info after " + self.logger.info("Updated existing tuple in the contributors table with github info after " "a successful search query on a facade commit's author : {} {}\n".format(contributor, cntrb_gh_info)) # Dupe check - logging.info('Checking dupes.\n') + self.logger.info('Checking dupes.\n') dupe_cntrb_sql = s.sql.text(""" SELECT contributors.* FROM contributors inner join ( @@ -286,19 +286,21 @@ def contributors_model(self, entry_info, repo_id): dupe_cntrbs = pd.read_sql(dupe_cntrb_sql, self.db, params={}) - logging.info(f'There are {len(dupe_cntrbs)} duplicates.\n') + self.logger.info(f'There are {len(dupe_cntrbs)} duplicates.\n') - # Turn this column from nan to None + # Turn these columns from nan/nat to None dupe_cntrbs['gh_user_id'] = dupe_cntrbs['gh_user_id'].where( pd.notnull(dupe_cntrbs['gh_user_id']), None) dupe_cntrbs['cntrb_created_at'] = dupe_cntrbs['cntrb_created_at'].where( pd.notnull(dupe_cntrbs['cntrb_created_at']), None) + dupe_cntrbs['cntrb_last_used'] = dupe_cntrbs['cntrb_last_used'].astype( + object).where(dupe_cntrbs['cntrb_last_used'].notnull(), None) for i, cntrb_existing in dupe_cntrbs.iterrows(): - logging.info(f'Processing dupe: {cntrb_existing}.\n') + self.logger.info(f'Processing dupe: {cntrb_existing}.\n') if i == 0: - logging.info('skipping first\n') + self.logger.info('skipping first\n') continue cntrb_new = cntrb_existing.copy() @@ -329,20 +331,20 @@ def contributors_model(self, entry_info, repo_id): AND cntrb_email = '{}'; """.format(pk, cntrb_new['cntrb_email'])) - logging.info(f'Trying to delete dupes with sql: {delete_dupe_ids_sql}') + self.logger.info(f'Trying to delete dupes with sql: {delete_dupe_ids_sql}') try: result = self.db.execute(delete_dupe_ids_sql) except Exception as e: - logging.info(f'Deleting dupes failed with error: {e}') + self.logger.info(f'Deleting dupes failed with error: {e}') - logging.info('Deleted duplicates.\n') + self.logger.info('Deleted duplicates.\n') # Register this task as completed self.register_task_completion(entry_info, repo_id, "contributors") def insert_facade_contributors(self, entry_info, repo_id): - logging.info("Beginning process to insert contributors from facade commits for repo w entry info: {}\n".format(entry_info)) + self.logger.info("Beginning process to insert contributors from facade commits for repo w entry info: {}\n".format(entry_info)) # Get all distinct combinations of emails and names by querying the repo's commits userSQL = s.sql.text(""" @@ -378,7 +380,7 @@ def insert_facade_contributors(self, entry_info, repo_id): """) commit_cntrbs = json.loads(pd.read_sql(userSQL, self.db, params={'repo_id': repo_id}).to_json(orient="records")) - logging.info("We found {} distinct contributors needing insertion (repo_id = {})".format( + self.logger.info("We found {} distinct contributors needing insertion (repo_id = {})".format( len(commit_cntrbs), repo_id)) for cntrb in commit_cntrbs: @@ -391,10 +393,10 @@ def insert_facade_contributors(self, entry_info, repo_id): 'cntrb_full_name': cntrb['name'] } result = self.db.execute(self.contributors_table.insert().values(cntrb_tuple)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: {}\n".format(cntrb['email'])) + self.logger.info("Inserted contributor: {}\n".format(cntrb['email'])) def handle_alias(self, tuple): cntrb_email = tuple['cntrb_email'] # canonical @@ -423,15 +425,15 @@ def handle_alias(self, tuple): del cntrb['cntrb_id'] result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) + self.logger.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) alias_id = self.cntrb_id_inc elif len(existing_tuples) > 1: # fix all dupe references to dupe cntrb ids before we delete them - logging.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the contributors table, we will delete all tuples with this cntrb_email and re-insert only 1\n") - logging.info("For cntrb_email: {}".format(tuple['commit_email'])) + self.logger.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the contributors table, we will delete all tuples with this cntrb_email and re-insert only 1\n") + self.logger.info("For cntrb_email: {}".format(tuple['commit_email'])) """ Insert alias tuple into the contributor table """ @@ -456,7 +458,7 @@ def handle_alias(self, tuple): del cntrb['cntrb_id'] result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) + self.logger.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) alias_id = self.cntrb_id_inc @@ -490,14 +492,14 @@ def handle_alias(self, tuple): try: # Delete all dupes result = self.db.execute(deleteSQL) - logging.info("Deleted all non-canonical contributors with the email: {}\n".format(commit_email)) + self.logger.info("Deleted all non-canonical contributors with the email: {}\n".format(commit_email)) except Exception as e: - logging.info("When trying to delete a duplicate contributor, worker ran into error: {}".format(e)) + self.logger.info("When trying to delete a duplicate contributor, worker ran into error: {}".format(e)) else: #then there would be exactly 1 existing tuple, so that id is the one we want alias_id = existing_tuples[0]['cntrb_id'] - logging.info('Checking canonicals match.\n') + self.logger.info('Checking canonicals match.\n') alias_sql = s.sql.text(""" SELECT * FROM contributors @@ -516,14 +518,14 @@ def handle_alias(self, tuple): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_canonical==canonical_id_result.iloc[0]['cntrb_canonical'] ).values(canonical_col)) - logging.info("Updated cntrb_canonical column for existing tuple in the contributors " + self.logger.info("Updated cntrb_canonical column for existing tuple in the contributors " "table with email: {}\n".format(tuple['cntrb_email'])) # Now check existing alias table tuple existing_tuples = self.retrieve_tuple({'alias_email': commit_email}, ['contributors_aliases']) if len(existing_tuples) == 0: - logging.info("Finding cntrb_id for canonical email: {}".format(cntrb_email)) + self.logger.info("Finding cntrb_id for canonical email: {}".format(cntrb_email)) canonical_id_sql = s.sql.text(""" SELECT cntrb_id as canonical_id from contributors @@ -532,7 +534,7 @@ def handle_alias(self, tuple): canonical_id_result = json.loads(pd.read_sql(canonical_id_sql, self.db, params={'email': cntrb_email}).to_json( orient="records")) if len(canonical_id_result) > 1: - logging.info("MORE THAN ONE CANONICAL CONTRIBUTOR found for email: {}".format(cntrb_email)) + self.logger.info("MORE THAN ONE CANONICAL CONTRIBUTOR found for email: {}".format(cntrb_email)) alias_tuple = { 'cntrb_id': canonical_id_result[0]['canonical_id'], 'cntrb_a_id': alias_id, @@ -545,9 +547,9 @@ def handle_alias(self, tuple): } result = self.db.execute(self.contributors_aliases_table.insert().values(alias_tuple)) self.results_counter += 1 - logging.info("Inserted alias with email: {}\n".format(commit_email)) + self.logger.info("Inserted alias with email: {}\n".format(commit_email)) if len(existing_tuples) > 1: - logging.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the alias " + self.logger.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the alias " "table AND NEED TO ADD DELETION LOGIC: {}\n".format(existing_tuples)) def map_new_id(self, dupe_ids, new_id): @@ -573,49 +575,49 @@ def map_new_id(self, dupe_ids, new_id): alias_result = self.db.execute(self.contributors_aliases_table.update().where( self.contributors_aliases_table.c.cntrb_a_id.in_(dupe_ids)).values(alias_update_col)) - logging.info("Updated cntrb_a_id column for tuples in the contributors_aliases table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info("Updated cntrb_a_id column for tuples in the contributors_aliases table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) except Exception as e: - logging.info(f'Alias re-map already done... error: {e}') + self.logger.info(f'Alias re-map already done... error: {e}') issue_events_result = self.db.execute(self.issue_events_table.update().where( self.issue_events_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the issue_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the issue_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_events_result = self.db.execute(self.pull_request_events_table.update().where( self.pull_request_events_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the pull_request_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the pull_request_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issues_cntrb_result = self.db.execute(self.issues_table.update().where( self.issues_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issues_reporter_result = self.db.execute(self.issues_table.update().where( self.issues_table.c.reporter_id.in_(dupe_ids)).values(reporter_col)) - logging.info("Updated reporter_id column in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info("Updated reporter_id column in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issue_assignee_result = self.db.execute(self.issue_assignees_table.update().where( self.issue_assignees_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the issue_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the issue_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_assignee_result = self.db.execute(self.pull_request_assignees_table.update().where( self.pull_request_assignees_table.c.contrib_id.in_(dupe_ids)).values(pr_assignee_col)) - logging.info("Updated contrib_id column for tuple in the pull_request_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info("Updated contrib_id column for tuple in the pull_request_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) message_result = self.db.execute(self.message_table.update().where( self.message_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the message table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the message table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_reviewers_result = self.db.execute(self.pull_request_reviewers_table.update().where( self.pull_request_reviewers_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_reviewers table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_reviewers table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_meta_result = self.db.execute(self.pull_request_meta_table.update().where( self.pull_request_meta_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_meta table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_meta table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_repo_result = self.db.execute(self.pull_request_repo_table.update().where( self.pull_request_repo_table.c.pr_cntrb_id.in_(dupe_ids)).values(pr_repo_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_repo table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_repo table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) - logging.info('Done mapping new id.\n') + self.logger.info('Done mapping new id.\n') diff --git a/workers/contributor_worker/contributor_worker/__init__.py b/workers/contributor_worker/contributor_worker/__init__.py deleted file mode 100644 --- a/workers/contributor_worker/contributor_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.0.1' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/contributor_worker/contributor_worker/runtime.py b/workers/contributor_worker/contributor_worker/runtime.py deleted file mode 100644 --- a/workers/contributor_worker/contributor_worker/runtime.py +++ /dev/null @@ -1,53 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from contributor_worker.worker import ContributorWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51236, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - worker_info = read_config('Workers', 'contributor_worker', None, None) - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: # for multiple instances of workers - try: # trying each port for an already-alive worker until a free port is found - print("New github worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.contributor_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - 'gh_api_key': read_config('Database', 'key', 'AUGUR_GITHUB_API_KEY', 'key') - } - - #create instance of the worker - app.worker = ContributorWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.worker._child is not None: - app.worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/contributor_worker/runtime.py b/workers/contributor_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/contributor_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.contributor_worker.contributor_worker import ContributorWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ContributorWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/contributor_worker/setup.py b/workers/contributor_worker/setup.py --- a/workers/contributor_worker/setup.py +++ b/workers/contributor_worker/setup.py @@ -31,7 +31,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'contributor_worker_start=contributor_worker.runtime:main', + 'contributor_worker_start=workers.contributor_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/facade_worker/facade_worker/facade00mainprogram.py b/workers/facade_worker/facade_worker/facade00mainprogram.py --- a/workers/facade_worker/facade_worker/facade00mainprogram.py +++ b/workers/facade_worker/facade_worker/facade00mainprogram.py @@ -26,20 +26,8 @@ # repos. It also rebuilds analysis data, checks any changed affiliations and # aliases, and caches data for display. -import pymysql -import sys -import platform -import imp -import time -import datetime -import html.parser -import subprocess -import os -import getopt -import xlsxwriter -import configparser +import pymysql, sys, platform, imp, time, datetime, html.parser, subprocess, os, getopt, xlsxwriter, configparser, logging from multiprocessing import Process, Queue - from facade_worker.facade01config import Config#increment_db, update_db, migrate_database_config, database_connection, get_setting, update_status, log_activity from facade_worker.facade02utilitymethods import update_repo_log, trim_commit, store_working_author, trim_author from facade_worker.facade03analyzecommit import analyze_commit @@ -48,55 +36,45 @@ from facade_worker.facade06analyze import analysis from facade_worker.facade07rebuildcache import nuke_affiliations, fill_empty_affiliations, invalidate_caches, rebuild_unknown_affiliation_and_web_caches -from workers.standard_methods import read_config +from workers.util import read_config +from workers.worker_base import Worker + +html = html.parser.HTMLParser() -import logging +class FacadeWorker(Worker): + def __init__(self, config={}, task=None): + worker_type = "facade_worker" -# if platform.python_implementation() == 'PyPy': -# import pymysql -# else: -# import MySQLdb -# ## End Imports + # Define what this worker can be given and know how to interpret + given = [['repo_group']] + models = ['commits'] -html = html.parser.HTMLParser() + # Define the tables needed to insert, update, or delete on + data_tables = [] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Facade-specific config + self.cfg = Config(self.logger) + + # Define data collection info + self.tool_source = 'Facade Worker' + self.tool_version = '0.0.1' + self.data_source = 'Git Log' -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class FacadeWorker: - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(os.getpid())) - - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.cfg = Config() - - ### The real program starts here ### + def initialize_database_connections(self): # Set up the database - db_user = self.config['user'] - db_pass = self.config['password'] - db_name = self.config['database'] - db_host = self.config['host'] - db_port = self.config['port'] - db_user_people = self.config['user'] - db_pass_people = self.config['password'] - db_name_people = self.config['database'] - db_host_people = self.config['host'] - db_port_people = self.config['port'] + db_user = self.config['user_database'] + db_pass = self.config['password_database'] + db_name = self.config['name_database'] + db_host = self.config['host_database'] + db_port = self.config['port_database'] # Open a general-purpose connection - db,cursor = self.cfg.database_connection( + self.db, self.cursor = self.cfg.database_connection( db_host, db_user, db_pass, @@ -104,157 +82,68 @@ def __init__(self, config, task=None): db_port, False, False) # Open a connection for the people database - db_people,cursor_people = self.cfg.database_connection( - db_host_people, - db_user_people, - db_pass_people, - db_name_people, - db_port_people, True, False) + self.db_people,self.cursor_people = self.cfg.database_connection( + db_host, + db_user, + db_pass, + db_name, + db_port, True, False) # Check if the database is current and update it if necessary try: - current_db = int(self.cfg.get_setting('database_version')) + self.current_db = int(self.cfg.get_setting('database_version')) except: # Catch databases which existed before database versioning - current_db = -1 - - #WHAT IS THE UPSTREAM_DB??? - # if current_db < upstream_db: - - # print(("Current database version: %s\nUpstream database version %s\n" % - # (current_db, upstream_db))) - - # self.cfg.update_db(current_db); + self.current_db = -1 - self.commit_model() - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - rg_id = value['given']['repo_group_id'] - - """ Query all repos """ - # repoUrlSQL = s.sql.text(""" - # SELECT repo_id,repo_group_id,repo_git FROM repo WHERE repo_group_id = '{}' - # """.format(rg_id)) - # rs = pd.read_sql(repoUrlSQL, self.db, params={}) - try: - if value['job_type'] == "UPDATE": - self._queue.put(CollectorTask(message_type='TASK', entry_info=value)) - elif value['job_type'] == "MAINTAIN": - self._maintain_queue.put(CollectorTask(message_type='TASK', entry_info=value)) - - except Exception as e: - logging.info("error: {}".format(e)) - - self._task = CollectorTask(message_type='TASK', entry_info={"task": value, "repo_id": repo_id}) - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...") - if self._child is None: - self._child = Process(target=self.collect, args=()) - self._child.start() - def collect(self): """ Function to process each entry in the worker's task queue Determines what action to take based off the message type """ + self.initialize_logging() # need to initialize logging again in child process cause multiprocessing + self.logger.info("Starting data collection process\n") + self.initialize_database_connections() while True: - time.sleep(0.5) if not self._queue.empty(): - message = self._queue.get() - logging.info("Popped off message: {}".format(message.entry_info)) - self.working_on = "UPDATE" + message = self._queue.get() # Get the task off our MP queue else: - if not self._maintain_queue.empty(): - message = self._maintain_queue.get() - logging.info("Popped off message: {}".format(message.entry_info)) - self.working_on = "MAINTAIN" - else: - break - - if message.type == 'EXIT': + break + self.logger.info("Popped off message: {}\n".format(str(message))) + + if message['job_type'] == 'STOP': break - if message.type != 'TASK': - raise ValueError(f'{message.type} is not a recognized task type') - - if message.type == 'TASK': - try: - git_url = message.entry_info['task']['given']['git_url'] - self.query_issues({'git_url': git_url, 'repo_id': message.entry_info['repo_id']}) - except Exception as e: - logging.info("Worker ran into an error for task: {}\n".format(message.entry_info['task'])) - logging.info("Error encountered: " + repr(e) + "\n") - logging.info("Notifying broker and logging task failure in database...\n") - message.entry_info['task']['worker_id'] = self.config['id'] - requests.post("http://{}:{}/api/unstable/task_error".format( - self.config['broker_host'],self.config['broker_port']), json=message.entry_info['task']) - # Add to history table - task_history = { - "repo_id": message.entry_info['repo_id'], - "worker": self.config['id'], - "job_model": message.entry_info['task']['models'][0], - "oauth_id": self.config['zombie_id'], - "timestamp": datetime.datetime.now(), - "status": "Error", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job error for: " + str(message.entry_info['task']) + "\n") - - # Update job process table - updated_job = { - "since_id_str": message.entry_info['repo_id'], - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==message.entry_info['task']['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + message.entry_info['task']['models'][0] + "\n") - - # Reset results counter for next task - self.results_counter = 0 - pass - - def commit_model(self): + # If task is not a valid job type + if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': + raise ValueError('{} is not a recognized task type'.format(message['job_type'])) + pass + try: + self.commits_model(message) + except Exception as e: + self.logger.error(e) + raise(e) + break + + def commits_model(self, message): # Figure out what we need to do - limited_run = read_config("Facade", name="limited_run", default=0) - delete_marked_repos = read_config("Facade", name="delete_marked_repos", default=0) - pull_repos = read_config("Facade", name="pull_repos", default=0) - clone_repos = read_config("Facade", name="clone_repos", default=1) - check_updates = read_config("Facade", name="check_updates", default=0) - force_updates = read_config("Facade", name="force_updates", default=0) - run_analysis = read_config("Facade", name="run_analysis", default=0) - force_analysis = read_config("Facade", name="force_analysis", default=0) - nuke_stored_affiliations = read_config("Facade", name="nuke_stored_affiliations", default=0) - fix_affiliations = read_config("Facade", name="fix_affiliations", default=1) - force_invalidate_caches = read_config("Facade", name="force_invalidate_caches", default=0) - rebuild_caches = read_config("Facade", name="rebuild_caches", default=1) #if abs((datetime.datetime.strptime(self.cfg.get_setting('aliases_processed')[:-3], + limited_run = self.augur_config.get_value("Facade", "limited_run") + delete_marked_repos = self.augur_config.get_value("Facade", "delete_marked_repos") + pull_repos = self.augur_config.get_value("Facade", "pull_repos") + clone_repos = self.augur_config.get_value("Facade", "clone_repos") + check_updates = self.augur_config.get_value("Facade", "check_updates") + force_updates = self.augur_config.get_value("Facade", "force_updates") + run_analysis = self.augur_config.get_value("Facade", "run_analysis") + force_analysis = self.augur_config.get_value("Facade", "force_analysis") + nuke_stored_affiliations = self.augur_config.get_value("Facade", "nuke_stored_affiliations") + fix_affiliations = self.augur_config.get_value("Facade", "fix_affiliations") + force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches") + rebuild_caches = self.augur_config.get_value("Facade", "rebuild_caches") #if abs((datetime.datetime.strptime(self.cfg.get_setting('aliases_processed')[:-3], # '%Y-%m-%d %I:%M:%S.%f') - datetime.datetime.now()).total_seconds()) // 3600 > int(self.cfg.get_setting( # 'update_frequency')) else 0 - force_invalidate_caches = read_config("Facade", name="force_invalidate_caches", default=0) - create_xlsx_summary_files = read_config("Facade", name="create_xlsx_summary_files", default=0) - multithreaded = read_config("Facade", name="multithreaded", default=1) + force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches") + create_xlsx_summary_files = self.augur_config.get_value("Facade", "create_xlsx_summary_files") + multithreaded = self.augur_config.get_value("Facade", "multithreaded") opts,args = getopt.getopt(sys.argv[1:],'hdpcuUaAmnfIrx') for opt in opts: @@ -355,9 +244,9 @@ def commit_model(self): if len(repo_base_directory) == 0: self.cfg.log_activity('Error','No base directory. It is unsafe to continue.') - update_status('Failed: No base directory') + self.cfg.update_status('Failed: No base directory') sys.exit(1) - + # Begin working start_time = time.time() diff --git a/workers/facade_worker/facade_worker/facade01config.py b/workers/facade_worker/facade_worker/facade01config.py --- a/workers/facade_worker/facade_worker/facade01config.py +++ b/workers/facade_worker/facade_worker/facade01config.py @@ -39,15 +39,15 @@ import json import logging -from workers.standard_methods import read_config - +from workers.util import read_config class Config: - def __init__(self): + def __init__(self, logger=None): self.upstream_db = 7 self.cursor = None self.cursor_people = None + self.logger = logger self.db = None self.db_people = None @@ -199,7 +199,7 @@ def log_activity(self, level, status): # "Debug", then just print it and don't save it in the database. log_options = ('Error','Quiet','Info','Verbose','Debug') - logging.info("* %s\n" % status) + self.logger.info("* %s\n" % status) if self.log_level == 'Debug' and level == 'Debug': return @@ -209,7 +209,7 @@ def log_activity(self, level, status): self.cursor.execute(query, (level, status)) self.db.commit() except Exception as e: - logging.info('Error encountered: {}\n'.format(e)) + self.logger.info('Error encountered: {}\n'.format(e)) # Set up the database db_user = read_config('Database', 'user', 'AUGUR_DB_USER', 'augur') diff --git a/workers/facade_worker/facade_worker/facade03analyzecommit.py b/workers/facade_worker/facade_worker/facade03analyzecommit.py --- a/workers/facade_worker/facade_worker/facade03analyzecommit.py +++ b/workers/facade_worker/facade_worker/facade03analyzecommit.py @@ -38,7 +38,7 @@ import configparser import traceback -from workers.standard_methods import read_config +from workers.util import read_config def analyze_commit(cfg, repo_id, repo_loc, commit, multithreaded): diff --git a/workers/facade_worker/facade_worker/runtime.py b/workers/facade_worker/facade_worker/runtime.py --- a/workers/facade_worker/facade_worker/runtime.py +++ b/workers/facade_worker/facade_worker/runtime.py @@ -1,57 +1,23 @@ from flask import Flask, jsonify, request, Response import click, os, json, requests, logging -from facade_worker.worker import FacadeWorker -from workers.util import read_config, create_server +from workers.facade_worker.facade_worker.facade00mainprogram import FacadeWorker +from workers.util import create_server, WorkerGunicornApplication [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51236, help='Port') -def main(augur_url, host, port): +def main(): """ Declares singular worker and creates the server and flask app that it will be running on """ - app = Flask(__name__) - - #load credentials - worker_info = read_config('Workers', 'github_worker', None, None) - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: # for multiple instances of workers - try: # trying each port for an already-alive worker until a free port is found - print("New github worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.facade_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - 'password': read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - 'port': read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - 'user': read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - 'database': read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - 'host': read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - } + app = Flask(__name__) + app.worker = FacadeWorker() - #create instance of the worker - app.worker = ContributorWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") + create_server(app) + WorkerGunicornApplication(app).run() - app.run(debug=app.debug, host=host, port=worker_port) if app.worker._child is not None: app.worker._child.terminate() try: requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) except: pass - - logging.info("Killing Flask App: " + str(os.getpid())) + os.kill(os.getpid(), 9) diff --git a/workers/facade_worker/setup.py b/workers/facade_worker/setup.py --- a/workers/facade_worker/setup.py +++ b/workers/facade_worker/setup.py @@ -30,7 +30,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'facade_worker_start=facade_worker.runtime:main', + 'facade_worker_start=workers.facade_worker.facade_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/github_worker/__init__.py b/workers/github_worker/__init__.py new file mode 100644 diff --git a/workers/github_worker/github_worker/worker.py b/workers/github_worker/github_worker.py similarity index 82% rename from workers/github_worker/github_worker/worker.py rename to workers/github_worker/github_worker.py --- a/workers/github_worker/github_worker/worker.py +++ b/workers/github_worker/github_worker.py @@ -13,7 +13,9 @@ class GitHubWorker(Worker): queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config): + def __init__(self, config={}): + + worker_type = "github_worker" given = [['github_url']] models = ['issues'] @@ -24,9 +26,6 @@ def __init__(self, config): 'pull_request_repo'] operations_tables = ['worker_history', 'worker_job'] - # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) - # These 3 are included in every tuple the worker inserts (data collection info) self.tool_source = 'GitHub API Worker' self.tool_version = '0.0.3' # See __init__.py @@ -35,21 +34,25 @@ def __init__(self, config): self.finishing_task = True # if we are finishing a previous task, pagination works differenty self.platform_id = 25150 # GitHub - # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's - logging.info("Querying starting ids info...\n") - - self.issue_id_inc = self.get_max_id('issues', 'issue_id') + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) - self.msg_id_inc = self.get_max_id('message', 'msg_id') def issues_model(self, entry_info, repo_id): """ Data collection function Query the GitHub API for issues """ + + # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's + self.logger.info("Querying starting ids info...\n") + + self.issue_id_inc = self.get_max_id('issues', 'issue_id') + + self.msg_id_inc = self.get_max_id('message', 'msg_id') github_url = entry_info['given']['github_url'] - logging.info("Beginning filling the issues model for repo: " + github_url + "\n") + self.logger.info("Beginning filling the issues model for repo: " + github_url + "\n") # Contributors are part of this model, and finding all for the repo saves us # from having to add them as we discover committers in the issue process @@ -83,10 +86,10 @@ def issues_model(self, entry_info, repo_id): 'WHERE repo_id = {}'.format(repo_id)) # Discover and remove duplicates before we start inserting - logging.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") + self.logger.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") for issue_dict in issues: - logging.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") + self.logger.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") # Add the FK repo_id to the dict being inserted issue_dict['repo_id'] = repo_id @@ -95,11 +98,11 @@ def issues_model(self, entry_info, repo_id): # still unsure about this key value pair/what it means pr_id = None if "pull_request" in issue_dict: - logging.info("Issue is a PR\n") + self.logger.info("Issue is a PR\n") # Right now we are just storing our issue id as the PR id if it is one pr_id = self.issue_id_inc else: - logging.info("Issue is not a PR\n") + self.logger.info("Issue is not a PR\n") # Begin on the actual issue... issue = { @@ -133,20 +136,20 @@ def issues_model(self, entry_info, repo_id): if issue_dict['flag'] == 'need_update': result = self.db.execute(self.issues_table.update().where( self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) - logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( + self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( issue_dict['id'])) self.issue_id_inc = issue_dict['pkey'] elif issue_dict['flag'] == 'need_insertion': try: result = self.db.execute(self.issues_table.insert().values(issue)) - logging.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) self.results_counter += 1 self.issue_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + + self.logger.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + " and title of: {} and gh_issue_num of: {}\n".format(issue_dict['title'],issue_dict['number'])) except Exception as e: - logging.info("When inserting an issue, ran into the following error: {}\n".format(e)) - logging.info(issue) + self.logger.info("When inserting an issue, ran into the following error: {}\n".format(e)) + self.logger.info(issue) continue # Check if the assignee key's value is already recorded in the assignees key's value @@ -157,7 +160,7 @@ def issues_model(self, entry_info, repo_id): # Handles case if there are no assignees if collected_assignees[0] is not None: - logging.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") + self.logger.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") for assignee_dict in collected_assignees: if type(assignee_dict) != dict: continue @@ -172,13 +175,13 @@ def issues_model(self, entry_info, repo_id): } # Commit insertion to the assignee table result = self.db.execute(self.issue_assignees_table.insert().values(assignee)) - logging.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + + self.logger.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + " with login/cntrb_id: " + assignee_dict['login'] + " " + str(assignee['cntrb_id']) + "\n") else: - logging.info("Issue does not have any assignees\n") + self.logger.info("Issue does not have any assignees\n") # Insert the issue labels to the issue_labels table for label_dict in issue_dict['labels']: @@ -198,10 +201,10 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issue_labels_table.insert().values(label)) - logging.info("Primary key inserted into the issue_labels table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issue_labels table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted issue label with text: " + label_dict['name'] + "\n") + self.logger.info("Inserted issue label with text: " + label_dict['name'] + "\n") #### Messages/comments and events insertion @@ -220,7 +223,7 @@ def issues_model(self, entry_info, repo_id): where_clause="WHERE msg_id IN (SELECT msg_id FROM issue_message_ref WHERE issue_id = {})".format( self.issue_id_inc)) - logging.info("Number of comments needing insertion: {}\n".format(len(issue_comments))) + self.logger.info("Number of comments needing insertion: {}\n".format(len(issue_comments))) for comment in issue_comments: try: @@ -238,13 +241,13 @@ def issues_model(self, entry_info, repo_id): } try: result = self.db.execute(self.message_table.insert().values(issue_comment)) - logging.info("Primary key inserted into the message table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the message table: {}".format(result.inserted_primary_key)) self.results_counter += 1 self.msg_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted issue comment with id: {}\n".format(self.msg_id_inc)) + self.logger.info("Inserted issue comment with id: {}\n".format(self.msg_id_inc)) except Exception as e: - logging.info("Worker ran into error when inserting a message, likely had invalid characters. error: {}".format(e)) + self.logger.info("Worker ran into error when inserting a message, likely had invalid characters. error: {}".format(e)) ### ISSUE MESSAGE REF TABLE ### @@ -259,7 +262,7 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issue_message_ref_table.insert().values(issue_message_ref)) - logging.info("Primary key inserted into the issue_message_ref table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issue_message_ref table: {}".format(result.inserted_primary_key)) self.results_counter += 1 # Base of the url for event endpoints @@ -283,7 +286,7 @@ def issues_model(self, entry_info, repo_id): multiple_pages = False while True: - logging.info("Hitting endpoint: " + events_url.format(i) + " ...\n") + self.logger.info("Hitting endpoint: " + events_url.format(i) + " ...\n") r = requests.get(url=events_url.format(i), headers=self.headers) self.update_gh_rate_limit(r) @@ -291,21 +294,21 @@ def issues_model(self, entry_info, repo_id): if 'last' in r.links and not multiple_pages and not self.finishing_task: param = r.links['last']['url'][-6:] i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") + self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n") multiple_pages = True elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") + self.logger.info("Only 1 page of request\n") elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ... " + self.logger.info("Finishing a previous task, paginating forwards ... " "excess rate limit requests will be made\n") j = r.json() # Checking contents of requests with what we already have in the db - new_events = check_duplicates(j, event_table_values, pseudo_key_gh) + new_events = self.check_duplicates(j, event_table_values, pseudo_key_gh) if len(new_events) == 0 and multiple_pages and 'last' in r.links: if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown events, breaking from pagination.\n") + self.logger.info("No more pages with unknown events, breaking from pagination.\n") break elif len(new_events) != 0: to_add = [obj for obj in new_events if obj not in issue_events] @@ -315,17 +318,17 @@ def issues_model(self, entry_info, repo_id): # Since we already wouldve checked the first page... break if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") + self.logger.info("No more pages to check, breaking from pagination.\n") break - logging.info("Number of events needing insertion: " + str(len(issue_events)) + "\n") + self.logger.info("Number of events needing insertion: " + str(len(issue_events)) + "\n") # If the issue is closed, then we search for the closing event and store the user's id cntrb_id = None if 'closed_at' in issue_dict: for event in issue_events: if str(event['event']) != "closed": - logging.info("not closed, continuing") + self.logger.info("not closed, continuing") continue if not event['actor']: continue @@ -335,7 +338,7 @@ def issues_model(self, entry_info, repo_id): # Need to hit this single contributor endpoint to get extra created at data... cntrb_url = ("https://api.github.com/users/" + event['actor']['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) self.update_gh_rate_limit(r) contributor = r.json() @@ -384,17 +387,17 @@ def issues_model(self, entry_info, repo_id): # Commit insertion to table result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format( + self.logger.info("Primary key inserted into the contributors table: {}".format( result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: " + contributor['login'] + "\n") + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") for event in issue_events: if event['actor'] is not None: event['cntrb_id'] = self.find_id_from_login(event['actor']['login']) if event['cntrb_id'] is None: - logging.info("SOMETHING WRONG WITH FINDING ID FROM LOGIN") + self.logger.info("SOMETHING WRONG WITH FINDING ID FROM LOGIN") continue # event['cntrb_id'] = None else: @@ -416,10 +419,10 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issue_events_table.insert().values(issue_event)) - logging.info("Primary key inserted into the issue_events table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issue_events table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted issue event: " + event['event'] + " for issue id: {}\n".format(self.issue_id_inc)) + self.logger.info("Inserted issue event: " + event['event'] + " for issue id: {}\n".format(self.issue_id_inc)) if cntrb_id is not None: update_closing_cntrb = { @@ -427,7 +430,7 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issues_table.update().where( self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) - logging.info("Updated tuple in the issues table with contributor that closed it, issue_id: {}\n".format( + self.logger.info("Updated tuple in the issues table with contributor that closed it, issue_id: {}\n".format( issue_dict['id'])) self.issue_id_inc += 1 diff --git a/workers/github_worker/github_worker/__init__.py b/workers/github_worker/github_worker/__init__.py deleted file mode 100644 --- a/workers/github_worker/github_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.1.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/github_worker/github_worker/runtime.py b/workers/github_worker/github_worker/runtime.py deleted file mode 100644 --- a/workers/github_worker/github_worker/runtime.py +++ /dev/null @@ -1,53 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from github_worker.worker import GitHubWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51236, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - worker_info = read_config('Workers', 'github_worker', None, None) - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: # for multiple instances of workers - try: # trying each port for an already-alive worker until a free port is found - print("New github worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.github_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - 'gh_api_key': read_config('Database', 'key', 'AUGUR_GITHUB_API_KEY', 'key') - } - - #create instance of the worker - app.worker = GitHubWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.worker._child is not None: - app.worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/github_worker/runtime.py b/workers/github_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/github_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.github_worker.github_worker import GitHubWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitHubWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/github_worker/setup.py b/workers/github_worker/setup.py --- a/workers/github_worker/setup.py +++ b/workers/github_worker/setup.py @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'github_worker_start=github_worker.runtime:main', + 'github_worker_start=workers.github_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/insight_worker/insight_worker/__init__.py b/workers/insight_worker/__init__.py similarity index 100% rename from workers/insight_worker/insight_worker/__init__.py rename to workers/insight_worker/__init__.py diff --git a/workers/insight_worker/insight_worker/worker.py b/workers/insight_worker/insight_worker.py similarity index 99% rename from workers/insight_worker/insight_worker/worker.py rename to workers/insight_worker/insight_worker.py --- a/workers/insight_worker/insight_worker/worker.py +++ b/workers/insight_worker/insight_worker.py @@ -21,8 +21,10 @@ class InsightWorker(Worker): queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - + def __init__(self, config={}): + + worker_type = "insight_worker" + given = [['git_url']] models = ['insights'] @@ -30,7 +32,12 @@ def __init__(self, config, task=None): operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({ + 'api_host': self.augur_config.get_value('Server', 'host'), + 'api_port': self.augur_config.get_value('Server', 'port') + }) # These 3 are included in every tuple the worker inserts (data collection info) self.tool_source = 'Insight Worker' diff --git a/workers/insight_worker/insight_worker/runtime.py b/workers/insight_worker/insight_worker/runtime.py deleted file mode 100644 --- a/workers/insight_worker/insight_worker/runtime.py +++ /dev/null @@ -1,61 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from insight_worker.worker import InsightWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51236, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - worker_info = read_config('Workers', 'insight_worker', None, None) - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: # for multiple instances of workers - try: # trying each port for an already-alive worker until a free port is found - print("New worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.insight_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'), worker_port), - 'anomaly_days': worker_info['anomaly_days'] if 'anomaly_days' in worker_info else 2, - 'training_days': worker_info['training_days'] if 'training_days' in worker_info else 365, - 'confidence_interval': worker_info['confidence_interval'] if 'confidence_interval' in worker_info else .95, - 'contamination': worker_info['contamination'] if 'contamination' in worker_info else 0.041, - 'metrics': worker_info['metrics'] if 'metrics' in worker_info else {"issues-new": "issues", - "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"}, - 'api_host': read_config('Server', 'host', 'AUGUR_HOST', 'localhost'), - 'api_port': read_config('Server', 'port', 'AUGUR_PORT', '5000') - } - - #create instance of the worker - app.worker = InsightWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.worker._child is not None: - app.worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/insight_worker/runtime.py b/workers/insight_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/insight_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.insight_worker.insight_worker import InsightWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = InsightWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/insight_worker/setup.py b/workers/insight_worker/setup.py --- a/workers/insight_worker/setup.py +++ b/workers/insight_worker/setup.py @@ -31,7 +31,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'insight_worker_start=insight_worker.runtime:main', + 'insight_worker_start=workers.insight_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/linux_badge_worker/__init__.py b/workers/linux_badge_worker/__init__.py new file mode 100644 diff --git a/workers/linux_badge_worker/linux_badge_worker/worker.py b/workers/linux_badge_worker/linux_badge_worker.py similarity index 59% rename from workers/linux_badge_worker/linux_badge_worker/worker.py rename to workers/linux_badge_worker/linux_badge_worker.py --- a/workers/linux_badge_worker/linux_badge_worker/worker.py +++ b/workers/linux_badge_worker/linux_badge_worker.py @@ -6,18 +6,20 @@ from urllib.parse import quote from multiprocessing import Process, Queue -from linux_badge_worker import __data_source__, __tool_source__, __tool_version__ import pandas as pd import sqlalchemy as s from sqlalchemy.ext.automap import automap_base from sqlalchemy import MetaData from workers.worker_base import Worker -class BadgeWorker(Worker): +class LinuxBadgeWorker(Worker): """ Worker that collects repo badging data from CII config: database credentials, broker information, and ID """ - def __init__(self, config, task=None): + def __init__(self, config={}): + + worker_type = "linux_badge_worker" + given = [['git_url']] models = ['badges'] @@ -25,31 +27,37 @@ def __init__(self, config, task=None): operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({"endpoint": "https://bestpractices.coreinfrastructure.org/projects.json?pq="}) + self.tool_source = 'Linux Badge Worker' + self.tool_version = '0.1.0' + self.data_source = 'CII Badging API' + def badges_model(self, entry_info, repo_id): """ Data collection and storage method Query the CII API and store the result in the DB for the badges model """ git_url = entry_info['given']['git_url'] - logging.info("Collecting data for {}".format(git_url)) + self.logger.info("Collecting data for {}".format(git_url)) extension = quote(git_url[0:-4]) url = self.config['endpoint'] + extension - logging.info("Hitting CII endpoint: " + url + " ...") + self.logger.info("Hitting CII endpoint: " + url + " ...") data = requests.get(url=url).json() if data != []: - logging.info("Inserting badging data for " + git_url) + self.logger.info("Inserting badging data for " + git_url) self.db.execute(self.repo_badging_table.insert()\ .values(repo_id=repo_id, data=data, - tool_source=__tool_source__, - tool_version=__tool_version__, - data_source=__data_source__)) + tool_source=self.tool_source, + tool_version=self.tool_version, + data_source=self.data_source)) self.results_counter += 1 else: - logging.info("No CII data found for {}\n".format(git_url)) + self.logger.info("No CII data found for {}\n".format(git_url)) self.register_task_completion(entry_info, repo_id, "badges") diff --git a/workers/linux_badge_worker/linux_badge_worker/__init__.py b/workers/linux_badge_worker/linux_badge_worker/__init__.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""linux_badge_worker - Augur worker that collects CII badging data""" - -__tool_source__ = 'Linux Badge Worker' -__tool_version__ = '0.1.0' -__data_source__ = 'CII Badging API' - -__author__ = 'Augur Team <[email protected]>' -__all__ = [] - diff --git a/workers/linux_badge_worker/linux_badge_worker/runtime.py b/workers/linux_badge_worker/linux_badge_worker/runtime.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/runtime.py +++ /dev/null @@ -1,54 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from linux_badge_worker.worker import BadgeWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - worker_info = read_config('Workers', 'linux_badge_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(broker_host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.badge_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'), worker_port) - } - - #create instance of the worker - app.worker = BadgeWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - - if app.worker._child is not None: - app.worker._child.terminate() - - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/linux_badge_worker/runtime.py b/workers/linux_badge_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/linux_badge_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.linux_badge_worker.linux_badge_worker import LinuxBadgeWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = LinuxBadgeWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/linux_badge_worker/setup.py b/workers/linux_badge_worker/setup.py --- a/workers/linux_badge_worker/setup.py +++ b/workers/linux_badge_worker/setup.py @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'linux_badge_worker_start=linux_badge_worker.runtime:main', + 'linux_badge_worker_start=workers.linux_badge_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/metric_status_worker/metric_status_worker/__init__.py b/workers/metric_status_worker/metric_status_worker/__init__.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.1.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/metric_status_worker/metric_status_worker/runtime.py b/workers/metric_status_worker/metric_status_worker/runtime.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/runtime.py +++ /dev/null @@ -1,54 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from metric_status_worker.worker import MetricStatusWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - worker_info = read_config('Workers', 'metric_status_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(broker_host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.metric_status_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'), worker_port) - } - - #create instance of the worker - app.worker = MetricStatusWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - - if app.worker._child is not None: - app.worker._child.terminate() - - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/metric_status_worker/metric_status_worker/worker.py b/workers/metric_status_worker/metric_status_worker/worker.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/worker.py +++ /dev/null @@ -1,549 +0,0 @@ -import base64 -import logging -import os -import re -import sys -import json -import time -from abc import ABC -from datetime import datetime -from multiprocessing import Process, Queue -from urllib.parse import urlparse - -import pandas as pd -import requests -import sqlalchemy as s -from github import Github -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base -from workers.worker_base import Worker - -class MetricStatusWorker(Worker): - def __init__(self, config, task=None): - given = [['git_url']] - models = ['chaoss_metric_status'] - - data_tables = ['chaoss_metric_status'] - operations_tables = ['worker_history', 'worker_job'] - - # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) - - # These 3 are included in every tuple the worker inserts (data collection info) - self.tool_source = 'Metric Status Worker' - self.tool_version = '0.0.1' - self.data_source = 'GitHub API' - - def chaoss_metric_status_model(self, entry_info, repo_id): - """ Data colletction function - Query the github api for metric status - """ - status = MetricsStatus(self.API_KEY) - status.create_metrics_status() - metrics = status.metrics_status - - # convert to dict - dict_metrics = [] - for metric in metrics: - metric_info = { - 'cm_group': metric['group'], - 'cm_source': metric['data_source'], - 'cm_type': metric['metric_type'], - 'cm_backend_status': metric['backend_status'], - 'cm_frontend_status': metric['frontend_status'], - 'cm_api_endpoint_repo': metric['endpoint_repo'], - 'cm_api_endpoint_rg': metric['endpoint_group'], - 'cm_defined': metric['is_defined'], - 'cm_name': metric['display_name'], - 'cm_working_group': metric['group'], - 'cm_info': metric['tag'], - 'cm_working_group_focus_area': metric['focus_area'], - 'tool_source': self.tool_source, - 'tool_version': self.tool_version, - 'data_source': self.data_source, - } - dict_metrics.append(metric_info) - - need_insertion = self.filter_duplicates({'cm_api_endpoint_repo': "cm_api_endpoint_repo", 'cm_backend_status':'cm_api_endpoint_rg'}, ['chaoss_metric_status'], - dict_metrics) - logging.info("Count of contributors needing insertion: " + str(len(need_insertion)) + "\n") - for metric in need_insertion: - result = self.db.execute(self.chaoss_metric_status_table.insert().values(metric)) - logging.info("Primary key inserted into the metrics table: " + str(result.inserted_primary_key)) - self.results_counter += 1 - - register_task_completion(self, entry_info, repo_id, 'chaoss_metric_status') - - - def update_exist_metrics(self, metrics): - need_update = [] - need_insert = [] - - for metric in metrics: - result = self.db.execute(self.chaoss_metric_status_table.update().where((self.chaoss_metric_status_table.c.cm_name == metric['cm_name'])&(self.chaoss_metric_status_table.c.cm_group == metric['cm_group']) & ((self.chaoss_metric_status_table.c.cm_api_endpoint_repo != metric['cm_api_endpoint_repo']) | (self.chaoss_metric_status_table.c.cm_api_endpoint_rg != metric['cm_api_endpoint_rg'])|(self.chaoss_metric_status_table.c.cm_source != metric['cm_source'])) - ).values(metric)) - - if result.rowcount: - logging.info("Update Metric {}-{}".format(metric['cm_group'], metric['cm_name'])) - - -class FrontendStatusExtractor(object): - - def __init__(self): - pass - self.api_text = open(os.path.abspath(os.path.dirname(os.path.dirname(os.getcwd()))) + - "/frontend/src/AugurAPI.ts", 'r').read() - self.attributes = re.findall( - r'(?:(GitEndpoint|Endpoint|Timeseries|addRepoMetric|addRepoGroupMetric)\()\'(.*)\', \'(.*)\'', - self.api_text) - self.timeseries = [ - attribute for attribute in self.attributes if attribute[0] == "Timeseries"] - self.endpoints = [ - attribute for attribute in self.attributes if attribute[0] == "Endpoint"] - self.git_endpoints = [ - attribute for attribute in self.attributes if attribute[0] == "GitEndpoint"] - self.repo_metrics = [ - attribute for attribute in self.attributes if attribute[0] == 'addRepoMetric'] - self.group_metric = [ - attribute for attribute in self.attributes if attribute[0] == 'addRepoMetric'] - - def determine_frontend_status(self, metric): - metric.frontend_status = 'unimplemented' - attribute = None - - if metric.metric_type == "timeseries": - attribute = next((attribute for attribute in self.timeseries if - "/api/unstable/<owner>/<repo>/timeseries/{}".format(attribute[2]) == metric.endpoint_repo), - None) - - elif metric.metric_type == "metric": - attribute = next((attribute for attribute in self.endpoints if - "/api/unstable/<owner>/<repo>/{}".format(attribute[2]) == metric.endpoint_repo), None) - if not attribute: - attribute = next((attribute for attribute in self.repo_metrics if - "/api/unstable/repo-groups/<repo_group_id>/repos/<repo_id>/{}".format( - attribute[2]) == metric.endpoint_repo), None) - if not attribute and metric.endpoint_group: - attribute = next((attribute for attribute in self.repo_metrics if - "/api/unstable/repo-groups/<repo_group_id>/{}".format(attribute[2]) == metric.endpoint_group), None) - - elif metric.metric_type == "git": - attribute = next((attribute for attribute in self.git_endpoints if - "/api/unstable/git/{}".format(attribute[2]) == metric.endpoint_repo), None) - - if attribute is not None: - metric.frontend_status = 'implemented' - metric.chart_mapping = attribute[1] - else: - metric.frontend_status = 'unimplemented' - - -class Metric(ABC): - - def __init__(self): - self.ID = None - self.tag = None - self.display_name = None - self.group = None - self.backend_status = 'unimplemented' - self.frontend_status = 'unimplemented' - self.chart_mapping = None - self.data_source = None - self.metric_type = None - self.documentation_url = None - self.is_defined = False - self.focus_area = None - self.endpoint_group = None - self.endpoint_repo = None - - -class GroupedMetric(Metric): - - def __init__(self, display_name, group, tag, focus_area): - Metric.__init__(self) - self.display_name = display_name - self.tag = tag - self.ID = re.sub(r'-$|\*', '', 'none' + '-' + self.tag) - self.group = group - self.focus_area = focus_area - - -class ImplementedMetric(Metric): - - def __init__(self, metadata, frontend_status_extractor): - Metric.__init__(self) - - self.ID = metadata['ID'] - self.tag = metadata['tag'] - self.display_name = metadata['metric_name'] - self.backend_status = 'implemented' - self.data_source = metadata['source'] - self.group = "experimental" - self.endpoint_group = None - self.endpoint_repo = None - - - if 'metric_type' in metadata: - self.metric_type = metadata['metric_type'] - else: - self.metric_type = 'metric' - - if 'endpoint' in metadata: - if 'group_endpoint' in metadata: - self.endpoint_group = metadata['group_endpoint'] - if 'repo_endpoint' in metadata: - self.endpoint_repo = metadata['repo_endpoint'] - else: - self.endpoint_repo = metadata['endpoint'] - frontend_status_extractor.determine_frontend_status(self) - - -class MetricsStatus(object): - wg_evolution = { - "repo": "chaoss/wg-evolution", - "focus_area": "focus_areas", - "name": 'evolution' - } - - wg_diversity_inclusion = { - "repo": "chaoss/wg-diversity-inclusion", - "focus_area": "focus-areas", - "name": "diversity-inclusion" - } - - wg_value = { - "repo": "chaoss/wg-value", - "focus_area": 'focus-areas', - "name": "value" - } - - wg_common = { - "repo": "chaoss/wg-common", - "focus_area": "focus-areas", - "name": "common" - } - - wg_risk = { - "repo": "chaoss/wg-risk", - "focus_area": "focus-areas", - "name": "risk" - } - - def __init__(self, githubapi): - self.__githubapi = githubapi - self.github = Github(self.__githubapi) - - # TODO: don't hardcode this - self.groups = { - "evolution": "Evolution", - "diversity-inclusion": "Diversity and Inclusion metrics", - "value": "Value", - "risk": "Risk", - "common": "Common", - "experimental": "Experimental", - "all": "All" - } - - self.implemented_metrics = [] - - self.evo_metrics = [] - self.di_metrics = [] - self.risk_metrics = [] - self.value_metrics = [] - self.common_metrics = [] - self.experimental_metrics = [] - - self.metrics_by_group = [] - - self.metrics_status = [] - - self.data_sources = [] - self.metric_types = [] - self.tags = {} - self.metadata = [] - - def create_metrics_status(self): - - self.build_implemented_metrics() - - self.evo_metrics = self.create_grouped_metrics( - self.wg_evolution, "evolution") - self.risk_metrics = self.create_grouped_metrics(self.wg_risk, "risk") - self.common_metrics = self.create_grouped_metrics( - self.wg_common, 'common') - self.di_metrics = self.create_grouped_metrics( - self.wg_diversity_inclusion, 'diversity-inclusion') - self.value_metrics = self.create_grouped_metrics( - self.wg_value, 'value') - - self.metrics_by_group = [self.evo_metrics, self.risk_metrics, - self.common_metrics, self.di_metrics, self.value_metrics] - - self.create_experimental_metrics() - self.metrics_by_group.append(self.experimental_metrics) - # - self.copy_implemented_metrics() - - self.find_defined_metrics() - - self.build_metrics_status() - - # self.build_metadata() - - def build_implemented_metrics(self): - frontend_status_extractor = FrontendStatusExtractor() - - r = requests.get( - url='http://{}:{}/api/unstable/batch/metadata'.format( - self.config['broker_host'],self.config['broker_port'])) - data = json.loads(r.text) - - for metric in data: - if "ID" in metric.keys(): - self.implemented_metrics.append( - ImplementedMetric(metric, frontend_status_extractor)) - - def create_grouped_metrics(self, group, group_name): - metrics = self.find_metrics_from_focus_area( - group['repo'], group['focus_area']) - - remote_metrics = [] - for metric in metrics: - remote_metrics.append(GroupedMetric(metric.display_name, group['name'], metric.tag, - metric.focus_area)) - - return remote_metrics - - def find_metrics_from_focus_area(self, repo_name, focus_area_path): - focus_areas = self.github.get_repo( - repo_name).get_dir_contents(focus_area_path) - metrics = [] - for area in focus_areas: - # get focus area name from filename - # focus_area_name = re.sub('.md','',re.sub('-', ' ',area.name)) - focus_area_name = None - focus_area_name_splited = [a.capitalize() for a in re.sub( - '.md', '', re.sub('[_]|[-]', ' ', area.name)).split()] - focus_area_name = ' '.join(focus_area_name_splited) - - # extract structure :focus_area_name/readme.md - if area.type == 'dir': - tmp = self.github.get_repo( - repo_name).get_dir_contents(area.path) - readme = [a for a in tmp if 'readme' in a.name.lower()] - if len(readme) == 0: - continue - else: - area = readme[0] - elif 'readme' in area.name.lower() or 'changelog' in area.name.lower(): - continue - - # decode content; github api return encoded content - decoded_content = base64.b64decode(area.content).decode('utf-8') - metric_name_tag = self.parse_table( - decoded_content) or self.parse_list(decoded_content) - - for name, tag in metric_name_tag.items(): - add_metric = Metric() - add_metric.display_name = name - add_metric.tag = tag - add_metric.focus_area = focus_area_name - - metrics.append(add_metric) - - if metric_name_tag is None: - continue - - return metrics - - def parse_table(self, md_content): - # group 0 is header, group 2 is |---|--|, and group 3 is table content - tables = re.findall( - r'^(\|?[^\n]+\|[^\n]+\|?\r?\n)((?:\|?\s*:?[-]+\s*:?)+\|?)(\n(?:\|?[^\n]+\|[^\n]+\|?\r?\n?)*)?$', md_content, - re.MULTILINE) - - if not tables: - return None - - box = [] - metrics_name_tag = {} - for table in tables: - # get metric name by 'metric_name' index in column - metric_index, length_in_row = self.get_metric_index_in_table_row( - table[0]) - table_content = [x.strip() - for x in table[2].replace('\n', '|').split('|')] - # remove two empty str - table_content.pop(0) - table_content.pop() - - raw_metrics = [table_content[a] for a in range( - metric_index, len(table_content), length_in_row)] - - for raw_metric in raw_metrics: - metric_name, metric_link = self.is_has_link( - raw_metric, md_content) - metric_name = re.sub('[\[]|[\]]', '', metric_name) - if not metric_link: - metric_link = re.sub(' ', '-', metric_name).lower() - metrics_name_tag[metric_name] = self.link_to_tag( - metric_name, str(metric_link)) - - return metrics_name_tag - - def get_metric_index_in_table_row(self, row): - header_names = [x.strip().lower() for x in row.split('|')] - # print(header_names) - index = None - if 'metric' in header_names: - index = header_names.index('metric') - elif 'name' in header_names: - index = header_names.index('name') - - return index, len(header_names) - - def parse_list(self, md_content): - matched_lists = re.findall(r'[-]\s+(.+)\n', md_content) - metric_names = {} - # print(matched_lists) - for matched in matched_lists: - # print(matched) - metirc_name = re.sub(r'.+:\s', '', matched) - metirc_name, metric_link = self.is_has_link( - metirc_name, md_content) - metirc_name = re.sub('[\[]|[\]]', '', metirc_name) - metric_names[metirc_name] = self.link_to_tag( - metirc_name, metric_link) - return metric_names - - def is_has_link(self, s, md_content): - # remove leading whitespace if exist - s = s.strip() - pattern_inline = re.compile(r'\[([^\[\]]+)\]\(([^)]+)') - match = pattern_inline.match(s) - - if match: - return match.group(1), match.group(2) - - pattern_ref = re.compile(r'\[([^\[\]]+)\]\[([^]]+)') - match2 = pattern_ref.match(s) - - if match2: - link = match2.group(2) - p = re.compile(r'\n\[' + link + r'\]:\s+(.+)\n') - res = p.search(md_content, re.DOTALL) - if res: - return match2.group(1), res.group(1) - else: - return s, None - - def link_to_tag(self, name, s): - - # generate tag if undefined metric - if not s: - return re.sub(' ', '-', name.lower()) - - pattern = re.compile(r'\/?([a-zA-Z_-]+)(\.md)?$') - m = pattern.search(s) - if m: - return re.sub('_', '-', re.sub('.md', '', m.group(1).lower())) - else: - return re.sub(' ', '-', re.sub('\(s\)', 's', name)) - - def create_experimental_metrics(self): - tags = [] - for group in self.metrics_by_group: - for metric in group: - tags.append(metric.tag) - - self.experimental_metrics = [ - metric for metric in self.implemented_metrics if metric.tag not in tags] - - def copy_implemented_metrics(self): - # takes implemented metrics and copies their data to the appropriate metric object - # I am so very sorry - # TODO: burn this into the ground - for group in enumerate(self.metrics_by_group): - if group[1] is not self.experimental_metrics: - for grouped_metric in group[1]: - defined_implemented_metrics = [ - metric for metric in self.implemented_metrics if grouped_metric.tag == metric.tag] - if defined_implemented_metrics != []: - for metric in defined_implemented_metrics: - metric.group = group[1][0].group - metric.focus_area = grouped_metric.focus_area - group[1].append(metric) - self.implemented_metrics.remove(metric) - grouped_metric.ID = 'n/a' - self.metrics_by_group[group[0]] = [ - metric for metric in group[1] if metric.ID != 'n/a'] - - def find_defined_metrics(self): - # return map {tag: html_url} - repo_names = [self.wg_common['repo'], self.wg_evolution['repo'], - self.wg_diversity_inclusion['repo'], self.wg_risk['repo'], self.wg_value['repo']] - - md_files = {} - - for repo_name in repo_names: - repo = self.github.get_repo(repo_name) - contents = repo.get_contents("") - - while len(contents) > 1: - file_content = contents.pop(0) - if file_content.type == "dir": - contents.extend(repo.get_contents(file_content.path)) - elif '.md' in file_content.name: - name = re.sub( - '_', '-', re.sub('.md', '', file_content.name)) - md_files[name.lower()] = file_content.html_url - - for group in self.metrics_by_group: - for metric in group: - if metric.tag in md_files.keys(): - metric.is_defined = True - metric.documentation_url = md_files[metric.tag] - - def build_metrics_status(self): - for group in self.metrics_by_group: - for metric in group: - self.metrics_status.append(metric.__dict__) - - def build_metadata(self): - self.get_metric_sources() - self.get_metric_types() - self.get_metric_tags() - - self.metadata = { - "remotes": { - "diversity_inclusion_urls": self.diversity_inclusion_urls, - "growth_maturity_decline_urls": self.growth_maturity_decline_urls, - "risk_urls": self.risk_urls, - "value_urls": self.value_urls, - "activity_repo_urls": self.activity_urls - }, - "groups": self.groups, - "data_sources": self.data_sources, - "metric_types": self.metric_types, - "tags": self.tags - } - - def get_metric_sources(self): - for data_source in [metric['data_source'] for metric in self.metrics_status]: - data_source = data_source.lower() - if data_source not in self.data_sources and data_source != "none": - self.data_sources.append(data_source) - self.data_sources.append("all") - - def get_metric_types(self): - for metric_type in [metric['metric_type'] for metric in self.metrics_status]: - metric_type = metric_type.lower() - if metric_type not in self.metric_types and metric_type != "none": - self.metric_types.append(metric_type) - self.metric_types.append("all") - - def get_metric_tags(self): - for tag in [(metric['tag'], metric['group']) for metric in self.metrics_status]: - # tag[0] = tag[0].lower() - if tag[0] not in [tag[0] for tag in self.tags] and tag[0] != "none": - self.tags[tag[0]] = tag[1] \ No newline at end of file diff --git a/workers/metric_status_worker/setup.py b/workers/metric_status_worker/setup.py deleted file mode 100644 --- a/workers/metric_status_worker/setup.py +++ /dev/null @@ -1,43 +0,0 @@ -import io -import os -import re - -from setuptools import find_packages -from setuptools import setup - - -def read(filename): - filename = os.path.join(os.path.dirname(__file__), filename) - text_type = type(u"") - with io.open(filename, mode="r", encoding='utf-8') as fd: - return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) - - -setup( - name="metric_status_worker", - version="0.1.0", - url="https://github.com/chaoss/augur", - license='MIT', - author="Augurlabs", - author_email="[email protected]", - description="Augur Worker that collects GitHub data", - packages=find_packages(exclude=('tests',)), - install_requires=[ - 'flask', - 'requests', - 'psycopg2-binary', - 'click' - ], - entry_points={ - 'console_scripts': [ - 'metric_status_worker_start=metric_status_worker.runtime:main', - ], - }, - classifiers=[ - 'Development Status :: 2 - Pre-Alpha', - 'License :: OSI Approved :: MIT License', - 'Programming Language :: Python', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.7', - ] -) diff --git a/workers/pull_request_worker/pull_request_worker/__init__.py b/workers/pull_request_worker/__init__.py similarity index 100% rename from workers/pull_request_worker/pull_request_worker/__init__.py rename to workers/pull_request_worker/__init__.py diff --git a/workers/pull_request_worker/pull_request_worker/worker.py b/workers/pull_request_worker/pull_request_worker.py similarity index 85% rename from workers/pull_request_worker/pull_request_worker/worker.py rename to workers/pull_request_worker/pull_request_worker.py --- a/workers/pull_request_worker/pull_request_worker/worker.py +++ b/workers/pull_request_worker/pull_request_worker.py @@ -6,14 +6,16 @@ from sqlalchemy.sql.expression import bindparam from workers.worker_base import Worker -class GithubPullRequestWorker(Worker): +class GitHubPullRequestWorker(Worker): """ Worker that collects Pull Request related data from the Github API and stores it in our database. :param task: most recent task the broker added to the worker's queue :param config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): + def __init__(self, config={}): + + worker_type = "pull_request_worker" # Define what this worker can be given and know how to interpret given = [['github_url']] @@ -28,28 +30,21 @@ def __init__(self, config, task=None): operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) # Define data collection info self.tool_source = 'GitHub Pull Request Worker' self.tool_version = '0.0.1' # See __init__.py self.data_source = 'GitHub API' - logging.info("Querying starting ids info...\n") - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 - self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id') - self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id') - - def graphql_paginate(query, data_subjects, before_parameters=None): + def graphql_paginate(self, query, data_subjects, before_parameters=None): """ Paginate a GitHub GraphQL query backwards :param query: A string, holds the GraphQL query :rtype: A Pandas DataFrame, contains all data contained in the pages """ - logging.info(f'Start paginate with params: \n{data_subjects} ' + self.logger.info(f'Start paginate with params: \n{data_subjects} ' f'\n{before_parameters}') def all_items(dictionary): @@ -84,7 +79,7 @@ def find_root_of_subject(data, key_subject): for data_subject, nest in data_subjects.items(): - logging.info(f'Beginning paginate process for field {data_subject} ' + self.logger.info(f'Beginning paginate process for field {data_subject} ' f'for query: {query}') page_count = 0 @@ -96,7 +91,7 @@ def find_root_of_subject(data, key_subject): success = False for attempt in range(num_attempts): - logging.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint ' + self.logger.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint ' f'page number {page_count}\n') response = requests.post(base_url, json={'query': query.format( @@ -110,7 +105,7 @@ def find_root_of_subject(data, key_subject): data = json.loads(json.dumps(response.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) + self.logger.info("Error!: {}".format(data['errors'])) if data['errors'][0]['type'] == 'RATE_LIMITED': self.update_gh_rate_limit(response) num_attempts -= 1 @@ -124,9 +119,9 @@ def find_root_of_subject(data, key_subject): data = root['edges'] break else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url)) break if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': num_attempts -= 1 @@ -135,7 +130,7 @@ def find_root_of_subject(data, key_subject): self.update_gh_rate_limit(response, bad_credentials=True) if not success: - logging.info('GraphQL query failed: {}'.format(query)) + self.logger.info('GraphQL query failed: {}'.format(query)) continue before_parameters.update({ @@ -145,7 +140,7 @@ def find_root_of_subject(data, key_subject): tuples += data - logging.info(f'Paged through {page_count} pages and ' + self.logger.info(f'Paged through {page_count} pages and ' f'collected {len(tuples)} data points\n') if not nest: @@ -155,9 +150,9 @@ def find_root_of_subject(data, key_subject): before_parameters=before_parameters) - def pull_requests_graphql(self, task_info, repo_id): + def pull_request_files_model(self, task_info, repo_id): - owner, repo = get_owner_repo(task_info['given']['github_url']) + owner, repo = self.get_owner_repo(task_info['given']['github_url']) # query existing PRs and the respective url we will append the commits url to pr_number_sql = s.sql.text(""" @@ -171,7 +166,7 @@ def pull_requests_graphql(self, task_info, repo_id): for index, pull_request in enumerate(pr_numbers.itertuples()): - logging.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}') + self.logger.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}') query = """ {{ @@ -216,14 +211,14 @@ def pull_requests_graphql(self, task_info, repo_id): WHERE pull_request_files.pull_request_id = pull_requests.pull_request_id AND repo_id = :repo_id """) - logging.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n') + self.logger.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n') table_values = pd.read_sql(table_values_sql, self.db, params={'repo_id': repo_id}) # Compare queried values against table values for dupes/updates if len(pr_file_rows) > 0: table_columns = pr_file_rows[0].keys() else: - logging.info(f'No rows need insertion for repo {repo_id}\n') + self.logger.info(f'No rows need insertion for repo {repo_id}\n') self.register_task_completion(task_info, repo_id, 'pull_request_files') return @@ -250,7 +245,7 @@ def pull_requests_graphql(self, task_info, repo_id): pr_file_insert_rows = need_insertion.to_dict('records') pr_file_update_rows = need_updates.to_dict('records') - logging.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and ' + self.logger.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and ' f'{len(need_updates)} updates.\n') if len(pr_file_update_rows) > 0: @@ -267,7 +262,7 @@ def pull_requests_graphql(self, task_info, repo_id): ) success = True except Exception as e: - logging.info('error: {}'.format(e)) + self.logger.info('error: {}'.format(e)) time.sleep(5) if len(pr_file_insert_rows) > 0: @@ -280,7 +275,7 @@ def pull_requests_graphql(self, task_info, repo_id): ) success = True except Exception as e: - logging.info('error: {}'.format(e)) + self.logger.info('error: {}'.format(e)) time.sleep(5) self.register_task_completion(task_info, repo_id, 'pull_request_files') @@ -288,6 +283,14 @@ def pull_requests_graphql(self, task_info, repo_id): def pull_request_commits_model(self, task_info, repo_id): """ Queries the commits related to each pull request already inserted in the db """ + self.logger.info("Querying starting ids info...\n") + + # Increment so we are ready to insert the 'next one' of each of these most recent ids + self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 + self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id') + self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id') + + # query existing PRs and the respective url we will append the commits url to pr_url_sql = s.sql.text(""" SELECT DISTINCT pr_url, pull_requests.pull_request_id @@ -320,7 +323,7 @@ def pull_request_commits_model(self, task_info, repo_id): 'data_source': 'GitHub API', } result = self.db.execute(self.pull_request_commits_table.insert().values(pr_commit_row)) - logging.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n") + self.logger.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n") self.register_task_completion(task_info, repo_id, 'pull_request_commits') @@ -330,10 +333,18 @@ def pull_requests_model(self, entry_info, repo_id): :param entry_info: A dictionary consisiting of 'git_url' and 'repo_id' :type entry_info: dict """ + + self.logger.info("Querying starting ids info...\n") + + # Increment so we are ready to insert the 'next one' of each of these most recent ids + self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 + self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id') + self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id') + github_url = entry_info['given']['github_url'] - logging.info('Beginning collection of Pull Requests...\n') - logging.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n') + self.logger.info('Beginning collection of Pull Requests...\n') + self.logger.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n') owner, repo = self.get_owner_repo(github_url) @@ -351,11 +362,10 @@ def pull_requests_model(self, entry_info, repo_id): #list to hold pull requests needing insertion prs = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey, where_clause='WHERE repo_id = {}'.format(repo_id), - value_update_col_map={'pr_augur_contributor_id': float('nan')}, - include_all=True) + value_update_col_map={'pr_augur_contributor_id': float('nan')}) # Discover and remove duplicates before we start inserting - logging.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n") + self.logger.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n") for pr_dict in prs: @@ -401,21 +411,21 @@ def pull_requests_model(self, entry_info, repo_id): } if pr_dict['flag'] == 'need_insertion': - logging.info(f'PR {pr_dict["id"]} needs to be inserted\n') + self.logger.info(f'PR {pr_dict["id"]} needs to be inserted\n') result = self.db.execute(self.pull_requests_table.insert().values(pr)) - logging.info(f"Added Pull Request: {result.inserted_primary_key}") + self.logger.info(f"Added Pull Request: {result.inserted_primary_key}") self.pr_id_inc = int(result.inserted_primary_key[0]) elif pr_dict['flag'] == 'need_update': result = self.db.execute(self.pull_requests_table.update().where( self.pull_requests_table.c.pr_src_id==pr_dict['id']).values(pr)) - logging.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format( + self.logger.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format( pr_dict['id'])) self.pr_id_inc = pr_dict['pkey'] else: - logging.info("PR does not need to be inserted. Fetching its id from DB") + self.logger.info("PR does not need to be inserted. Fetching its id from DB") pr_id_sql = s.sql.text(""" SELECT pull_request_id FROM pull_requests WHERE pr_src_id={} @@ -429,16 +439,16 @@ def pull_requests_model(self, entry_info, repo_id): self.query_reviewers(pr_dict['requested_reviewers'], self.pr_id_inc) self.query_pr_meta(pr_dict['head'], pr_dict['base'], self.pr_id_inc) - logging.info(f"Inserted PR data for {owner}/{repo}") + self.logger.info(f"Inserted PR data for {owner}/{repo}") self.results_counter += 1 self.register_task_completion(entry_info, repo_id, 'pull_requests') def query_labels(self, labels, pr_id): - logging.info('Querying PR Labels\n') + self.logger.info('Querying PR Labels\n') if len(labels) == 0: - logging.info('No new labels to add\n') + self.logger.info('No new labels to add\n') return table = 'pull_request_labels' @@ -454,7 +464,7 @@ def query_labels(self, labels, pr_id): new_labels = self.assign_tuple_action(labels, pr_labels_table_values, update_col_map, duplicate_col_map, table_pkey) - logging.info(f'Found {len(new_labels)} labels\n') + self.logger.info(f'Found {len(new_labels)} labels\n') for label_dict in new_labels: @@ -473,13 +483,13 @@ def query_labels(self, labels, pr_id): if label_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_labels_table.insert().values(label)) - logging.info(f"Added PR Label: {result.inserted_primary_key}\n") - logging.info(f"Inserted PR Labels data for PR with id {pr_id}\n") + self.logger.info(f"Added PR Label: {result.inserted_primary_key}\n") + self.logger.info(f"Inserted PR Labels data for PR with id {pr_id}\n") self.results_counter += 1 def query_pr_events(self, owner, repo, gh_pr_no, pr_id): - logging.info('Querying PR Events\n') + self.logger.info('Querying PR Events\n') url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' + '/events?per_page=100&page={}') @@ -495,7 +505,7 @@ def query_pr_events(self, owner, repo, gh_pr_no, pr_id): #list to hold contributors needing insertion or update pr_events = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n") + self.logger.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n") for pr_event_dict in pr_events: @@ -519,17 +529,17 @@ def query_pr_events(self, owner, repo, gh_pr_no, pr_id): } result = self.db.execute(self.pull_request_events_table.insert().values(pr_event)) - logging.info(f"Added PR Event: {result.inserted_primary_key}\n") + self.logger.info(f"Added PR Event: {result.inserted_primary_key}\n") self.results_counter += 1 - logging.info(f"Inserted PR Events data for PR with id {pr_id}\n") + self.logger.info(f"Inserted PR Events data for PR with id {pr_id}\n") def query_reviewers(self, reviewers, pr_id): - logging.info('Querying Reviewers') + self.logger.info('Querying Reviewers') if reviewers is None or len(reviewers) == 0: - logging.info('No reviewers to add') + self.logger.info('No reviewers to add') return table = 'pull_request_reviewers' @@ -562,17 +572,17 @@ def query_reviewers(self, reviewers, pr_id): if reviewers_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_reviewers_table.insert().values(reviewer)) - logging.info(f"Added PR Reviewer {result.inserted_primary_key}") + self.logger.info(f"Added PR Reviewer {result.inserted_primary_key}") self.results_counter += 1 - logging.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}") + self.logger.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}") def query_assignee(self, assignees, pr_id): - logging.info('Querying Assignees') + self.logger.info('Querying Assignees') if assignees is None or len(assignees) == 0: - logging.info('No assignees to add') + self.logger.info('No assignees to add') return table = 'pull_request_assignees' @@ -605,14 +615,14 @@ def query_assignee(self, assignees, pr_id): if assignee_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_assignees_table.insert().values(assignee)) - logging.info(f'Added PR Assignee {result.inserted_primary_key}') + self.logger.info(f'Added PR Assignee {result.inserted_primary_key}') self.results_counter += 1 - logging.info(f'Finished inserting PR Assignee data for PR with id {pr_id}') + self.logger.info(f'Finished inserting PR Assignee data for PR with id {pr_id}') def query_pr_meta(self, head, base, pr_id): - logging.info('Querying PR Meta') + self.logger.info('Querying PR Meta') table = 'pull_request_meta' duplicate_col_map = {'pr_sha': 'sha'} @@ -652,13 +662,12 @@ def query_pr_meta(self, head, base, pr_id): self.pull_request_meta_table.c.pr_sha==pr_meta['pr_sha'] and self.pull_request_meta_table.c.pr_head_or_base==pr_side ).values(pr_meta)) - logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( - issue_dict['id'])) + # self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format(issue_dict['id'])) self.pr_meta_id_inc = pr_meta_data['pkey'] elif pr_meta_data['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_meta_table.insert().values(pr_meta)) - logging.info(f'Added PR Head {result.inserted_primary_key}') + self.logger.info(f'Added PR Head {result.inserted_primary_key}') self.pr_meta_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 @@ -673,12 +682,12 @@ def query_pr_meta(self, head, base, pr_id): if pr_meta_data['repo']: self.query_pr_repo(pr_meta_data['repo'], pr_side, self.pr_meta_id_inc) else: - logging.info('No new PR Head data to add') + self.logger.info('No new PR Head data to add') - logging.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}') + self.logger.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}') def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): - logging.info('Querying PR Comments') + self.logger.info('Querying PR Comments') url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' + '/comments?per_page=100&page={}') @@ -694,7 +703,7 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): #list to hold contributors needing insertion or update pr_messages = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n") + self.logger.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n") for pr_msg_dict in pr_messages: @@ -717,7 +726,7 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): } result = self.db.execute(self.message_table.insert().values(msg)) - logging.info(f'Added PR Comment {result.inserted_primary_key}') + self.logger.info(f'Added PR Comment {result.inserted_primary_key}') pr_msg_ref = { 'pull_request_id': pr_id, @@ -732,14 +741,14 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): result = self.db.execute( self.pull_request_message_ref_table.insert().values(pr_msg_ref) ) - logging.info(f'Added PR Message Ref {result.inserted_primary_key}') + self.logger.info(f'Added PR Message Ref {result.inserted_primary_key}') self.results_counter += 1 - logging.info(f'Finished adding PR Message data for PR with id {pr_id}') + self.logger.info(f'Finished adding PR Message data for PR with id {pr_id}') def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): - logging.info(f'Querying PR {pr_repo_type} repo') + self.logger.info(f'Querying PR {pr_repo_type} repo') table = 'pull_request_repo' duplicate_col_map = {'pr_src_repo_id': 'id'} @@ -776,8 +785,8 @@ def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): if new_pr_repo['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_repo_table.insert().values(pr_repo)) - logging.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}') + self.logger.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}') self.results_counter += 1 - logging.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}') + self.logger.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}') diff --git a/workers/pull_request_worker/pull_request_worker/runtime.py b/workers/pull_request_worker/pull_request_worker/runtime.py deleted file mode 100644 --- a/workers/pull_request_worker/pull_request_worker/runtime.py +++ /dev/null @@ -1,58 +0,0 @@ -import json, logging, os, click -import requests -from flask import Flask, Response, jsonify, request -from pull_request_worker.worker import GithubPullRequestWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51239, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - worker_info = read_config('Workers', 'pull_request_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - print("New pull request worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(broker_host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.github_pull_request_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - 'gh_api_key': read_config('Database', 'key', 'AUGUR_GITHUB_API_KEY', 'key') - } - - #create instance of the worker - - app.worker = GithubPullRequestWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.worker._child is not None: - app.worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/pull_request_worker/runtime.py b/workers/pull_request_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/pull_request_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.pull_request_worker.pull_request_worker import GitHubPullRequestWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitHubPullRequestWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/pull_request_worker/setup.py b/workers/pull_request_worker/setup.py --- a/workers/pull_request_worker/setup.py +++ b/workers/pull_request_worker/setup.py @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'pull_request_worker_start=pull_request_worker.runtime:main', + 'pull_request_worker_start=workers.pull_request_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/release_worker/release_worker/__init__.py b/workers/release_worker/__init__.py similarity index 100% rename from workers/release_worker/release_worker/__init__.py rename to workers/release_worker/__init__.py diff --git a/workers/release_worker/release_worker/worker.py b/workers/release_worker/release_worker.py similarity index 76% rename from workers/release_worker/release_worker/worker.py rename to workers/release_worker/release_worker.py --- a/workers/release_worker/release_worker/worker.py +++ b/workers/release_worker/release_worker.py @@ -6,12 +6,14 @@ import sqlalchemy as s from sqlalchemy import MetaData from sqlalchemy.ext.automap import automap_base -from workers.worker_template import Worker +from workers.worker_base import Worker #TODO - fully edit to match releases class ReleaseWorker(Worker): - def __init__(self, config): - + def __init__(self, config={}): + + worker_type = "release_worker" + # Define what this worker can be given and know how to interpret given = [['github_url']] models = ['releases'] @@ -21,18 +23,18 @@ def __init__(self, config): operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) # Define data collection info self.tool_source = 'Release Worker' self.tool_version = '0.0.1' self.data_source = 'GitHub API' - def repo_info_model(self, task, repo_id): + def releases_model(self, task, repo_id): github_url = task['given']['github_url'] - logging.info("Beginning filling the releases model for repo: " + github_url + "\n") + self.logger.info("Beginning filling the releases model for repo: " + github_url + "\n") owner, repo = self.get_owner_repo(github_url) @@ -70,7 +72,7 @@ def repo_info_model(self, task, repo_id): num_attempts = 0 success = False while num_attempts < 3: - logging.info("Hitting endpoint: {} ...\n".format(url)) + self.logger.info("Hitting endpoint: {} ...\n".format(url)) r = requests.post(url, json={'query': query}, headers=self.headers) self.update_gh_rate_limit(r) @@ -80,8 +82,8 @@ def repo_info_model(self, task, repo_id): data = json.loads(json.dumps(r.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) - if data['errors']['message'] == 'API rate limit exceeded': + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': self.update_gh_rate_limit(r) continue @@ -90,9 +92,9 @@ def repo_info_model(self, task, repo_id): data = data['data']['repository'] break else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) break if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': self.update_gh_rate_limit(r, temporarily_disable=True) @@ -112,15 +114,15 @@ def repo_info_model(self, task, repo_id): if 'node' in n: release = n['node'] insert_release(self, repo_id, owner, release) - logging.info("There's no release to insert. Current node is not available in releases: {}\n".format(n)) - logging.info("There are no releases to insert for current repository: {}\n".format(data)) - logging.info("Graphql response does not contain releases: {}\n".format(data)) - logging.info("Graphql response does not contain repository: {}\n".format(data)) + self.logger.info("There's no release to insert. Current node is not available in releases: {}\n".format(n)) + self.logger.info("There are no releases to insert for current repository: {}\n".format(data)) + self.logger.info("Graphql response does not contain releases: {}\n".format(data)) + self.logger.info("Graphql response does not contain repository: {}\n".format(data)) def insert_release(self, repo_id, owner, release): author = release['author']['name']+'_'+release['author']['company'] # Put all data together in format of the table - logging.info(f'Inserting release for repo with id:{repo_id}, owner:{owner}, release name:{release['name']}\n') + self.logger.info(f'Inserting release for repo with id:{repo_id}, owner:{owner}, release name:{release["name"]}\n') release_inf = { 'release_id': release['id'], 'repo_id': repo_id, @@ -140,10 +142,10 @@ def insert_release(self, repo_id, owner, release): } result = self.db.execute(self.releases_table.insert().values(release_inf)) - logging.info(f"Primary Key inserted into releases table: {result.inserted_primary_key}\n") + self.logger.info(f"Primary Key inserted into releases table: {result.inserted_primary_key}\n") self.results_counter += 1 - logging.info(f"Inserted info for {owner}/{repo}/{release['name']}\n") + self.logger.info(f"Inserted info for {owner}/{repo}/{release['name']}\n") #Register this task as completed self.register_task_completion(task, release_id, "releases") diff --git a/workers/release_worker/release_worker/runtime.py b/workers/release_worker/release_worker/runtime.py deleted file mode 100644 --- a/workers/release_worker/release_worker/runtime.py +++ /dev/null @@ -1,101 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from release_worker.worker import ReleaseWorker -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.gh_release_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.gh_release_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'release_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.release_worker.{}".format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - 'gh_api_key': read_config('Database', 'key', 'AUGUR_GITHUB_API_KEY', 'key') - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database') - } - - #create instance of the worker - app.gh_release_worker = ReleaseWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - if app.gh_release_worker._child is not None: - app.gh_release_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - - - diff --git a/workers/release_worker/runtime.py b/workers/release_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/release_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.release_worker.release_worker import ReleaseWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ReleaseWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/release_worker/setup.py b/workers/release_worker/setup.py --- a/workers/release_worker/setup.py +++ b/workers/release_worker/setup.py @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'release_worker_start=release_worker.runtime:main', + 'release_worker_start=workers.release_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/repo_info_worker/__init__.py b/workers/repo_info_worker/__init__.py new file mode 100644 diff --git a/workers/repo_info_worker/repo_info_worker/worker.py b/workers/repo_info_worker/repo_info_worker.py similarity index 88% rename from workers/repo_info_worker/repo_info_worker/worker.py rename to workers/repo_info_worker/repo_info_worker.py --- a/workers/repo_info_worker/repo_info_worker/worker.py +++ b/workers/repo_info_worker/repo_info_worker.py @@ -9,12 +9,14 @@ # 1. Displaying discrete metadata like "number of forks" and how they change over time # 2. Validating other workers, like those related to pull requests, issues, and commits. Our totals should be at or very near the totals in the repo_info table. -# This table also updates teh REPO table in 2 cases: +# This table also updates the REPO table in 2 cases: # 1. Recognizing when a repository is a forked repository by updating the "forked_from" field and # 2. Recognizing when a repository is archived, and recording the data we observed the change in status. class RepoInfoWorker(Worker): - def __init__(self, config): + def __init__(self, config={}): + + worker_type = "repo_info_worker" # Define what this worker can be given and know how to interpret given = [['github_url']] @@ -25,7 +27,7 @@ def __init__(self, config): operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) # Define data collection info self.tool_source = 'Repo Info Worker' @@ -36,7 +38,7 @@ def repo_info_model(self, task, repo_id): github_url = task['given']['github_url'] - logging.info("Beginning filling the repo_info model for repo: " + github_url + "\n") + self.logger.info("Beginning filling the repo_info model for repo: " + github_url + "\n") owner, repo = self.get_owner_repo(github_url) @@ -105,7 +107,7 @@ def repo_info_model(self, task, repo_id): num_attempts = 0 success = False while num_attempts < 3: - logging.info("Hitting endpoint: {} ...\n".format(url)) + self.logger.info("Hitting endpoint: {} ...\n".format(url)) r = requests.post(url, json={'query': query}, headers=self.headers) self.update_gh_rate_limit(r) @@ -115,7 +117,7 @@ def repo_info_model(self, task, repo_id): data = json.loads(json.dumps(r.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) + self.logger.info("Error!: {}".format(data['errors'])) if data['errors'][0]['message'] == 'API rate limit exceeded': self.update_gh_rate_limit(r) continue @@ -125,9 +127,9 @@ def repo_info_model(self, task, repo_id): data = data['data']['repository'] break else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) break if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': self.update_gh_rate_limit(r, temporarily_disable=True) @@ -148,9 +150,11 @@ def repo_info_model(self, task, repo_id): if archived is not False: archived_date_collected = archived archived = True + else: + archived_date_collected = None # Put all data together in format of the table - logging.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n') + self.logger.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n') rep_inf = { 'repo_id': repo_id, 'last_updated': data['updatedAt'] if 'updatedAt' in data else None, @@ -184,23 +188,23 @@ def repo_info_model(self, task, repo_id): 'pull_requests_merged': data['pr_merged']['totalCount'] if data['pr_merged'] else None, 'tool_source': self.tool_source, 'tool_version': self.tool_version, - 'data_source': self.data_source, - 'forked_from': forked, - 'repo_archived': archived, - 'repo_archived_date_collected': archived_date_collected + 'data_source': self.data_source + # 'forked_from': forked, + # 'repo_archived': archived, + # 'repo_archived_date_collected': archived_date_collected } result = self.db.execute(self.repo_info_table.insert().values(rep_inf)) - logging.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n") + self.logger.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n") self.results_counter += 1 - logging.info(f"Inserted info for {owner}/{repo}\n") + self.logger.info(f"Inserted info for {owner}/{repo}\n") #Register this task as completed self.register_task_completion(task, repo_id, "repo_info") def query_committers_count(self, owner, repo): - logging.info('Querying committers count\n') + self.logger.info('Querying committers count\n') url = f'https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100' committers = 0 @@ -226,7 +230,7 @@ def is_forked(self, owner, repo): #/repos/:owner/:repo parent r = requests.get(url, headers=self.headers) self.update_gh_rate_limit(r) - data = self.get_repo_data(self, url, r) + data = self.get_repo_data(url, r) if 'fork' in data: if 'parent' in data: @@ -242,7 +246,7 @@ def is_archived(self, owner, repo): r = requests.get(url, headers=self.headers) self.update_gh_rate_limit(r) - data = self.get_repo_data(self, url, r) + data = self.get_repo_data(url, r) if 'archived' in data: if data['archived']: @@ -262,7 +266,7 @@ def get_repo_data(self, url, response): if 'errors' in data: logging.info("Error!: {}".format(data['errors'])) - if data['errors']['message'] == 'API rate limit exceeded': + if data['errors'][0]['message'] == 'API rate limit exceeded': self.update_gh_rate_limit(response) if 'id' in data: diff --git a/workers/repo_info_worker/repo_info_worker/__init__.py b/workers/repo_info_worker/repo_info_worker/__init__.py deleted file mode 100644 --- a/workers/repo_info_worker/repo_info_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""gh_repo_info_worker - Augur Worker that collects GitHub Repo Info data""" - -__version__ = '0.0.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/repo_info_worker/repo_info_worker/runtime.py b/workers/repo_info_worker/repo_info_worker/runtime.py deleted file mode 100644 --- a/workers/repo_info_worker/repo_info_worker/runtime.py +++ /dev/null @@ -1,54 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from repo_info_worker.worker import RepoInfoWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - worker_info = read_config('Workers', 'repo_info_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(broker_port, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.repo_info_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - 'gh_api_key': read_config('Database', 'key', 'AUGUR_GITHUB_API_KEY', 'key') - } - - #create instance of the worker - app.worker = RepoInfoWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - if app.worker._child is not None: - app.worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/repo_info_worker/runtime.py b/workers/repo_info_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/repo_info_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.repo_info_worker.repo_info_worker import RepoInfoWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = RepoInfoWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/repo_info_worker/setup.py b/workers/repo_info_worker/setup.py --- a/workers/repo_info_worker/setup.py +++ b/workers/repo_info_worker/setup.py @@ -19,16 +19,15 @@ def read(filename): author="Augurlabs", author_email="[email protected]", description="Augur Worker that collects general data about a repo on GitHub", - packages=find_packages(exclude=('tests',)), + packages=find_packages(), install_requires=[ 'flask', 'requests', - 'psycopg2-binary', - 'click' + 'psycopg2-binary' ], entry_points={ 'console_scripts': [ - 'repo_info_worker_start=repo_info_worker.runtime:main', + 'repo_info_worker_start=workers.repo_info_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/standard_methods.py b/workers/standard_methods.py deleted file mode 100644 --- a/workers/standard_methods.py +++ /dev/null @@ -1,719 +0,0 @@ -""" Helper methods constant across all workers """ -import requests, datetime, time, traceback, json, os, sys, math -import sqlalchemy as s -import pandas as pd -import os -import sys, logging -from urllib.parse import urlparse - -def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}): - """ map objects => { *our db col* : *gh json key*} """ - need_insertion_count = 0 - need_update_count = 0 - for i, obj in enumerate(new_data): - if type(obj) != dict: - logging.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) - continue - - obj['flag'] = 'none' # default of no action needed - existing_tuple = None - for db_dupe_key in list(duplicate_col_map.keys()): - - if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any(): - if table_values[table_values[db_dupe_key].isin( - [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records'): - existing_tuple = table_values[table_values[db_dupe_key].isin( - [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0] - continue - - logging.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) - obj['flag'] = 'need_insertion' - need_insertion_count += 1 - break - - if obj['flag'] == 'need_insertion': - logging.info('Already determined that current tuple needs insertion, skipping checking updates. ' - 'Moving to next tuple.\n') - continue - - # If we need to check the values of the existing tuple to determine if an update is needed - for augur_col, value_check in value_update_col_map.items(): - not_nan_check = not (pd.isna(value_check) and pd.isna(existing_tuple[augur_col])) if value_check is not None else True - if existing_tuple[augur_col] != value_check and not_nan_check: - continue - logging.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) - obj['flag'] = 'need_update' - obj['pkey'] = existing_tuple[table_pkey] - need_update_count += 1 - - if obj['flag'] == 'need_update': - logging.info('Already determined that current tuple needs update, skipping checking further updates. ' - 'Moving to next tuple.\n') - continue - - # Now check the existing tuple's values against the response values to determine if an update is needed - for col in update_col_map.keys(): - if update_col_map[col] not in obj: - continue - if obj[update_col_map[col]] == existing_tuple[col]: - continue - logging.info("Found a tuple that needs an update for column: {}\n".format(col)) - obj['flag'] = 'need_update' - obj['pkey'] = existing_tuple[table_pkey] - need_update_count += 1 - - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + - "was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count)) - return new_data - -def check_duplicates(new_data, table_values, key): - need_insertion = [] - for obj in new_data: - if type(obj) == dict: - if not table_values.isin([obj[key]]).any().any(): - need_insertion.append(obj) - # else: - # logging.info("Tuple with github's {} key value already".format(key) + - # "exists in our db: {}\n".format(str(obj[key]))) - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + - "was reduced to {} tuples.\n".format(str(len(need_insertion)))) - return need_insertion - -def connect_to_broker(self): - connected = False - for i in range(5): - try: - logging.info("attempt {}\n".format(i)) - if i > 0: - time.sleep(10) - requests.post('http://{}:{}/api/unstable/workers'.format( - self.config['broker_host'],self.config['broker_port']), json=self.specs) - logging.info("Connection to the broker was successful\n") - connected = True - break - except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker. Trying again...\n') - if not connected: - sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n') - -def dump_queue(queue): - """ - Empties all pending items in a queue and returns them in a list. - """ - result = [] - queue.put("STOP") - for i in iter(queue.get, 'STOP'): - result.append(i) - # time.sleep(.1) - return result - -def find_id_from_login(self, login): - idSQL = s.sql.text(""" - SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' - """.format(login)) - rs = pd.read_sql(idSQL, self.db, params={}) - data_list = [list(row) for row in rs.itertuples(index=False)] - try: - return data_list[0][0] - except: - logging.info("contributor needs to be added...") - - cntrb_url = ("https://api.github.com/users/" + login) - logging.info("Hitting endpoint: {} ...\n".format(cntrb_url)) - r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) - contributor = r.json() - - company = None - location = None - email = None - if 'company' in contributor: - company = contributor['company'] - if 'location' in contributor: - location = contributor['location'] - if 'email' in contributor: - email = contributor['email'] - - cntrb = { - "cntrb_login": contributor['login'] if 'login' in contributor else None, - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, - "cntrb_canonical": None, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } - result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) - self.results_counter += 1 - self.cntrb_id_inc = int(result.inserted_primary_key[0]) - - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - return find_id_from_login(self, login) - -def get_owner_repo(github_url): - split = github_url.split('/') - - owner = split[-2] - repo = split[-1] - - if '.git' in repo: - repo = repo[:-4] - - return owner, repo - -def get_max_id(self, table, column, default=25150, operations_table=False): - maxIdSQL = s.sql.text(""" - SELECT max({0}.{1}) AS {1} - FROM {0} - """.format(table, column)) - db = self.db if not operations_table else self.helper_db - rs = pd.read_sql(maxIdSQL, db, params={}) - if rs.iloc[0][column] is not None: - max_id = int(rs.iloc[0][column]) + 1 - logging.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) - else: - max_id = default - logging.info("Could not find max id for {} column in the {} table... using default set to: \ - {}\n".format(column, table, max_id)) - return max_id - -def get_table_values(self, cols, tables, where_clause=""): - table_str = tables[0] - del tables[0] - - col_str = cols[0] - del cols[0] - - for table in tables: - table_str += ", " + table - for col in cols: - col_str += ", " + col - - tableValuesSQL = s.sql.text(""" - SELECT {} FROM {} {} - """.format(col_str, table_str, where_clause)) - logging.info("Getting table values with the following PSQL query: \n{}\n".format(tableValuesSQL)) - values = pd.read_sql(tableValuesSQL, self.db, params={}) - return values - -def init_oauths(self): - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauthSQL = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(self.config['key'])) - for oauth in [{'oauth_id': 0, 'access_token': self.config['key']}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}\n".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - -def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}, platform="github"): - # Paginate backwards through all the tuples but get first page in order - # to determine if there are multiple pages and if the 1st page covers all - update_keys = list(update_col_map.keys()) if update_col_map else [] - update_keys += list(value_update_col_map.keys()) if value_update_col_map else [] - cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - table_values = get_table_values(self, cols_query, [table], where_clause) - - i = 1 - multiple_pages = False - tuples = [] - while True: - num_attempts = 0 - success = False - while num_attempts < 3: - logging.info("Hitting endpoint: " + url.format(i) + " ...\n") - r = requests.get(url=url.format(i), headers=self.headers) - update_gh_rate_limit(self, r) - if platform == "github": - last_page = r.links['last']['url'][-6:].split('=')[1] - elif platform == "gitlab": - last_page = r.links['last']['url'].split('&')[2].split("=")[1] - logging.info("Analyzing page {} of {}\n".format(i, int(last_page) + 1 if 'last' in r.links else '*last page not known*')) - - try: - j = r.json() - except: - j = json.loads(json.dumps(r.text)) - - if type(j) != dict and type(j) != str: - success = True - break - elif type(j) == dict: - logging.info("Request returned a dict: {}\n".format(j)) - if j['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) - break - if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': - num_attempts -= 1 - update_gh_rate_limit(self, r, temporarily_disable=True) - if j['message'] == 'Bad credentials': - update_gh_rate_limit(self, r, bad_credentials=True) - elif type(j) == str: - logging.info("J was string: {}\n".format(j)) - if '<!DOCTYPE html>' in j: - logging.info("HTML was returned, trying again...\n") - elif len(j) == 0: - logging.info("Empty string, trying again...\n") - else: - try: - j = json.loads(j) - success = True - break - except: - pass - num_attempts += 1 - if not success: - break - - # Find last page so we can decrement from there - if 'last' in r.links and not multiple_pages and not self.finishing_task: - if platform == "github": - param = r.links['last']['url'][-6:] - i = int(param.split('=')[1]) + 1 - elif platform == "gitlab": - i = int(r.links['last']['url'].split('&')[2].split("=")[1]) - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") - multiple_pages = True - elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") - elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ..." - " excess rate limit requests will be made\n") - - if len(j) == 0: - logging.info("Response was empty, breaking from pagination.\n") - break - - # Checking contents of requests with what we already have in the db - j = assign_tuple_action(self, j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map) - if not j: - logging.info("Assigning tuple action failed, moving to next page.\n") - i = i + 1 if self.finishing_task else i - 1 - continue - try: - to_add = [obj for obj in j if obj not in tuples and obj['flag'] != 'none'] - except Exception as e: - logging.info("Failure accessing data of page: {}. Moving to next page.\n".format(e)) - i = i + 1 if self.finishing_task else i - 1 - continue - if len(to_add) == 0 and multiple_pages and 'last' in r.links: - logging.info("{}".format(r.links['last'])) - if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown tuples, breaking from pagination.\n") - break - tuples += to_add - - i = i + 1 if self.finishing_task else i - 1 - - # Since we already wouldve checked the first page... break - if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") - break - - return tuples - -def query_github_contributors(self, entry_info, repo_id): - - """ Data collection function - Query the GitHub API for contributors - """ - logging.info("Querying contributors with given entry info: " + str(entry_info) + "\n") - - github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url'] - - # Extract owner/repo from the url for the endpoint - path = urlparse(github_url) - split = path[2].split('/') - - owner = split[1] - name = split[2] - - # Handles git url case by removing the extension - if ".git" in name: - name = name[:-4] - - # Set the base of the url and place to hold contributors to insert - contributors_url = ("https://api.github.com/repos/" + owner + "/" + - name + "/contributors?per_page=100&page={}") - - # Get contributors that we already have stored - # Set our duplicate and update column map keys (something other than PK) to - # check dupicates/needed column updates with - table = 'contributors' - table_pkey = 'cntrb_id' - update_col_map = {'cntrb_email': 'email'} - duplicate_col_map = {'cntrb_login': 'login'} - - #list to hold contributors needing insertion or update - contributors = paginate(self, contributors_url, duplicate_col_map, update_col_map, table, table_pkey) - - logging.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") - - for repo_contributor in contributors: - try: - # Need to hit this single contributor endpoint to get extra data including... - # `created at` - # i think that's it - cntrb_url = ("https://api.github.com/users/" + repo_contributor['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") - r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) - contributor = r.json() - - company = None - location = None - email = None - if 'company' in contributor: - company = contributor['company'] - if 'location' in contributor: - location = contributor['location'] - if 'email' in contributor: - email = contributor['email'] - canonical_email = contributor['email'] - - cntrb = { - "cntrb_login": contributor['login'], - "cntrb_created_at": contributor['created_at'], - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - # "cntrb_type": , dont have a use for this as of now ... let it default to null - "cntrb_canonical": canonical_email, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } - - # Commit insertion to table - if repo_contributor['flag'] == 'need_update': - result = self.db.execute(self.contributors_table.update().where( - self.history_table.c.cntrb_email==email).values(cntrb)) - logging.info("Updated tuple in the contributors table with existing email: {}".format(email)) - self.cntrb_id_inc = repo_contributor['pkey'] - elif repo_contributor['flag'] == 'need_insertion': - result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) - self.results_counter += 1 - - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - # Increment our global track of the cntrb id for the possibility of it being used as a FK - self.cntrb_id_inc = int(result.inserted_primary_key[0]) - - except Exception as e: - logging.info("Caught exception: {}".format(e)) - logging.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) - continue - -def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): - """ - Read a variable in specified section of the config file, unless provided an environment variable - - :param section: location of given variable - :param name: name of variable - """ - config_file_path = os.getenv("AUGUR_CONFIG_FILE", config_file_path) - _config_file_name = 'augur.config.json' - _config_bad = False - _already_exported = {} - _runtime_location = 'runtime/' - _default_config = {} - _config_file = None - - try: - _config_file = open(config_file_path, 'r+') - except: - print('Couldn\'t open {}'.format(_config_file_name)) - - # Load the config file - try: - config_text = _config_file.read() - _config = json.loads(config_text) - except json.decoder.JSONDecodeError as e: - if not _config_bad: - _using_config_file = False - print('{} could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: {}'.format(config_file_path, str(e))) - _config = _default_config - - value = None - if environment_variable is not None: - value = os.getenv(environment_variable) - if value is None: - try: - if name is not None: - value = _config[section][name] - else: - value = _config[section] - except Exception as e: - value = default - if not section in _config: - _config[section] = {} - - return value - - -def record_model_process(self, repo_id, model): - - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Stopped", - "total_results": self.results_counter - } - if self.finishing_task: - result = self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - self.history_id += 1 - else: - result = self.helper_db.execute(self.history_table.insert().values(task_history)) - logging.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) - self.history_id = int(result.inserted_primary_key[0]) - -def register_task_completion(self, task, repo_id, model): - # Task to send back to broker - task_completed = { - 'worker_id': self.config['id'], - 'job_type': "MAINTAIN", - 'repo_id': repo_id, - 'job_model': model - } - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] if 'git_url' in task['given'] else "INVALID_GIVEN" - if key == 'INVALID_GIVEN': - register_task_failure(self, task, repo_id, "INVALID_GIVEN: not github nor git url") - return - - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Success", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job completion for: " + str(task_completed) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where( - self.job_table.c.job_model==model).values(updated_job)) - logging.info("Updated job process for model: " + model + "\n") - - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - - # Reset results counter for next task - self.results_counter = 0 - -def register_task_failure(self, task, repo_id, e): - - logging.info("Worker ran into an error for task: {}\n".format(task)) - logging.info("Printing traceback...\n") - tb = traceback.format_exc() - logging.info(tb) - - logging.info(f'This task inserted {self.results_counter} tuples before failure.\n') - logging.info("Notifying broker and logging task failure in database...\n") - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - url = task['given'][key] - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(url)) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - task['worker_id'] = self.config['id'] - try: - requests.post("http://{}:{}/api/unstable/task_error".format( - self.config['broker_host'],self.config['broker_port']), json=task) - except requests.exceptions.ConnectionError: - logging.error('Could not send task failure message to the broker\n') - except Exception: - logging.exception('An error occured while informing broker about task failure\n') - - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": task['models'][0], - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Error", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job error in the history table for: " + str(task) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==task['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + task['models'][0] + "\n") - - # Reset results counter for next task - self.results_counter = 0 - -def retrieve_tuple(self, key_values, tables): - table_str = tables[0] - del tables[0] - - key_values_items = list(key_values.items()) - for col, value in [key_values_items[0]]: - where_str = col + " = '" + value + "'" - del key_values_items[0] - - for col, value in key_values_items: - where_str += ' AND ' + col + " = '" + value + "'" - for table in tables: - table_str += ", " + table - - retrieveTupleSQL = s.sql.text(""" - SELECT * FROM {} WHERE {} - """.format(table_str, where_str)) - values = json.loads(pd.read_sql(retrieveTupleSQL, self.db, params={}).to_json(orient="records")) - return values - -def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): - # Try to get rate limit from request headers, sometimes it does not work (GH's issue) - # In that case we just decrement from last recieved header count - if bad_credentials and len(self.oauths) > 1: - logging.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) - del self.oauths[0] - - if temporarily_disable: - logging.info("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") - self.oauths[0]['rate_limit'] = 0 - else: - try: - self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - logging.info("Recieved rate limit from headers\n") - except: - self.oauths[0]['rate_limit'] -= 1 - logging.info("Headers did not work, had to decrement\n") - logging.info("Updated rate limit, you have: " + - str(self.oauths[0]['rate_limit']) + " requests remaining.\n") - if self.oauths[0]['rate_limit'] <= 0: - try: - reset_time = response.headers['X-RateLimit-Reset'] - except Exception as e: - logging.info("Could not get reset time from headers because of error: {}".format(e)) - logging.info('Headers: {}'.format(response.headers)) - reset_time = 3600 - time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() - logging.info("Rate limit exceeded, checking for other available keys to use.\n") - - # We will be finding oauth with the highest rate limit left out of our list of oauths - new_oauth = self.oauths[0] - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] - for oauth in other_oauths: - logging.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - response = requests.get(url=url, headers=self.headers) - oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() - - # Update oauth to switch to if a higher limit is found - if oauth['rate_limit'] > new_oauth['rate_limit']: - logging.info("Higher rate limit found in oauth: {}\n".format(oauth)) - new_oauth = oauth - elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: - logging.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) - new_oauth = oauth - - if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: - logging.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) - time.sleep(new_oauth['seconds_to_reset']) - - # Make new oauth the 0th element in self.oauths so we know which one is in use - index = self.oauths.index(new_oauth) - self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] - logging.info("Using oauth: {}\n".format(self.oauths[0])) - - # Change headers to be using the new oauth's key - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} diff --git a/workers/template_worker/__init__.py b/workers/template_worker/__init__.py new file mode 100644 diff --git a/workers/template_worker/runtime.py b/workers/template_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/template_worker/runtime.py @@ -0,0 +1,23 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.template_worker.template_worker import TemplateWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ + Creates the Flask app and data collection worker, then starts the Gunicorn server + """ + app = Flask(__name__) + app.worker = TemplateWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/template_worker/setup.py b/workers/template_worker/setup.py --- a/workers/template_worker/setup.py +++ b/workers/template_worker/setup.py @@ -19,16 +19,15 @@ def read(filename): author="Augur Team", author_email="[email protected]", description="Template worker to be used as an example", - packages=find_packages(exclude=('tests',)), + packages=find_packages(), install_requires=[ 'flask', 'requests', - 'psycopg2-binary', - 'click' + 'psycopg2-binary' ], entry_points={ 'console_scripts': [ - 'template_worker_start=template_worker.runtime:main', + 'template_worker_start=workers.template_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/template_worker/template_worker/worker.py b/workers/template_worker/template_worker.py similarity index 76% rename from workers/template_worker/template_worker/worker.py rename to workers/template_worker/template_worker.py --- a/workers/template_worker/template_worker/worker.py +++ b/workers/template_worker/template_worker.py @@ -6,12 +6,16 @@ from workers.worker_base import Worker class TemplateWorker(Worker): - def __init__(self, config): + def __init__(self, config={}): - # Define what this worker can be given and know how to interpret + # Define the worker's type, which will be used for self identification. + # Should be unique among all workers and is the same key used to define + # this worker's settings in the configuration file. + worker_type = "template_worker" + # Define what this worker can be given and know how to interpret # given is usually either [['github_url']] or [['git_url']] (depending if your - # worker is exclusive to repos that are on the GitHub platform) + # worker is exclusive to repos that are on the GitHub platform) given = [[]] # The name the housekeeper/broker use to distinguish the data model this worker can fill @@ -28,7 +32,14 @@ def __init__(self, config): operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Do any additional configuration after the general initialization has been run + self.config.update(config) + + # If you need to do some preliminary interactions with the database, these MUST go + # in the model method. The database connection is instantiated only inside of each + # data collection process # Define data collection info self.tool_source = 'Fake Template Worker' @@ -54,8 +65,11 @@ def fake_data_model(self, task, repo_id): } :param repo_id: the collect() method queries the repo_id given the git/github url and passes it along to make things easier. An int such as: 27869 + """ + # Any initial database instructions, like finding the last tuple inserted or generate the next ID value + # Collection and insertion of data happens here # ... diff --git a/workers/template_worker/template_worker/__init__.py b/workers/template_worker/template_worker/__init__.py deleted file mode 100644 --- a/workers/template_worker/template_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""gh_repo_info_worker - Augur Worker that collects GitHub Repo Info data""" - -__version__ = '0.0.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/template_worker/template_worker/runtime.py b/workers/template_worker/template_worker/runtime.py deleted file mode 100644 --- a/workers/template_worker/template_worker/runtime.py +++ /dev/null @@ -1,54 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from template_worker.worker import TemplateWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - worker_info = read_config('Workers', 'template_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(broker_host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.template_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'), worker_port) - } - - #create instance of the worker - app.worker = TemplateWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - - if app.worker._child is not None: - app.worker._child.terminate() - - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/util.py b/workers/util.py --- a/workers/util.py +++ b/workers/util.py @@ -1,5 +1,6 @@ import os, json, requests, logging from flask import Flask, Response, jsonify, request +import gunicorn.app.base def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): """ @@ -47,7 +48,7 @@ def read_config(section, name=None, environment_variable=None, default=None, con return value -def create_server(app, worker): +def create_server(app, worker=None): """ Consists of AUGWOP endpoints for the broker to communicate to this worker Can post a new task to be added to the workers queue Can retrieve current status of the worker @@ -83,4 +84,28 @@ def heartbeat(): def augwop_config(): """ Retrieve worker's config """ - return app.worker.config \ No newline at end of file + return app.worker.config + +class WorkerGunicornApplication(gunicorn.app.base.BaseApplication): + + def __init__(self, app): + self.options = { + 'bind': '%s:%s' % (app.worker.config["host"], app.worker.config["port"]), + 'workers': 1, + 'errorlog': app.worker.config['server_logfile'], + 'accesslog': app.worker.config['server_logfile'], + 'loglevel': app.worker.config['log_level'], + 'capture_output': app.worker.config['capture_output'] + } + + self.application = app + super().__init__() + + def load_config(self): + config = {key: value for key, value in self.options.items() + if key in self.cfg.settings and value is not None} + for key, value in config.items(): + self.cfg.set(key.lower(), value) + + def load(self): + return self.application diff --git a/workers/value_worker/__init__.py b/workers/value_worker/__init__.py new file mode 100644 diff --git a/workers/value_worker/runtime.py b/workers/value_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/value_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.value_worker.value_worker import ValueWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ValueWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) \ No newline at end of file diff --git a/workers/value_worker/setup.py b/workers/value_worker/setup.py --- a/workers/value_worker/setup.py +++ b/workers/value_worker/setup.py @@ -5,33 +5,23 @@ from setuptools import find_packages from setuptools import setup - -def read(filename): - filename = os.path.join(os.path.dirname(__file__), filename) - text_type = type(u"") - with io.open(filename, mode="r", encoding='utf-8') as fd: - return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) - - setup( name="value_worker", version="0.1.0", url="https://github.com/chaoss/augur", license='MIT', - author="Augurlabs", author_email="[email protected]", - description="Augur Worker that gathers value data", - long_description=read("README.md"), - packages=find_packages(exclude=('tests',)), - - install_requires=['flask', 'requests', 'psycopg2-binary', 'click'], - + install_requires=[ + 'flask', + 'requests', + 'psycopg2-binary', + ], entry_points={ 'console_scripts': [ - 'value_worker_start=value_worker.runtime:main', + 'value_worker_start=workers.value_worker.runtime:main', ], }, diff --git a/workers/value_worker/value_worker/worker.py b/workers/value_worker/value_worker.py similarity index 74% rename from workers/value_worker/value_worker/worker.py rename to workers/value_worker/value_worker.py --- a/workers/value_worker/value_worker/worker.py +++ b/workers/value_worker/value_worker.py @@ -6,7 +6,6 @@ from urllib.parse import quote from multiprocessing import Process, Queue -from value_worker import __data_source__, __tool_source__, __tool_version__ import pandas as pd import sqlalchemy as s from sqlalchemy.ext.automap import automap_base @@ -14,8 +13,10 @@ from workers.worker_base import Worker class ValueWorker(Worker): - def __init__(self, config, task=None): - + def __init__(self, config={}): + + worker_type = "value_worker" + # Define what this worker can be given and know how to interpret given = [['git_url']] models = ['value'] @@ -24,14 +25,23 @@ def __init__(self, config, task=None): data_tables = ['repo_labor'] operations_tables = ['worker_history', 'worker_job'] + # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({ + 'repo_directory': self.augur_config.get_value('Workers', 'facade_worker')['repo_directory'] + }) + + self.tool_source = 'Value Worker' + self.tool_version = '0.1.0' + self.data_source = 'SCC' def value_model(self, entry_info, repo_id): """ Data collection and storage method """ - logging.info(entry_info) - logging.info(repo_id) + self.logger.info(entry_info) + self.logger.info(repo_id) repo_path_sql = s.sql.text(""" SELECT repo_id, CONCAT(repo_group_id || chr(47) || repo_path || repo_name) AS path @@ -45,7 +55,7 @@ def value_model(self, entry_info, repo_id): try: self.generate_value_data(repo_id, absolute_repo_path) except Exception as e: - logging.error(e) + self.logger.error(e) self.register_task_completion(entry_info, repo_id, "value") @@ -55,8 +65,8 @@ def generate_value_data(self, repo_id, path): :param repo_id: Repository ID :param path: Absolute path of the Repostiory """ - logging.info('Running `scc`....') - logging.info(f'Repo ID: {repo_id}, Path: {path}') + self.logger.info('Running `scc`....') + self.logger.info(f'Repo ID: {repo_id}, Path: {path}') output = subprocess.check_output([self.config['scc_bin'], '-f', 'json', path]) records = json.loads(output.decode('utf8')) @@ -74,11 +84,11 @@ def generate_value_data(self, repo_id, path): 'comment_lines': file['Comment'], 'blank_lines': file['Blank'], 'code_complexity': file['Complexity'], - 'tool_source': __tool_source__, - 'tool_version': __tool_version__, - 'data_source': __data_source__, + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source, 'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') } result = self.db.execute(self.repo_labor_table.insert().values(repo_labor)) - logging.info(f"Added Repo Labor Data: {result.inserted_primary_key}") + self.logger.info(f"Added Repo Labor Data: {result.inserted_primary_key}") diff --git a/workers/value_worker/value_worker/__init__.py b/workers/value_worker/value_worker/__init__.py deleted file mode 100644 --- a/workers/value_worker/value_worker/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""value_worker - Augur Worker that collects value data""" - -__tool_source__ = 'Value Worker' -__tool_version__ = '0.1.0' -__data_source__ = 'SCC' - -__author__ = 'Augur Team <[email protected]>' -__all__ = [] - diff --git a/workers/value_worker/value_worker/runtime.py b/workers/value_worker/value_worker/runtime.py deleted file mode 100644 --- a/workers/value_worker/value_worker/runtime.py +++ /dev/null @@ -1,56 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from value_worker.worker import ValueWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - worker_info = read_config('Workers', 'repo_info_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(broker_host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.badge_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'), worker_port), - 'scc_bin': worker_info['scc_bin'], - 'repo_directory': read_config('Workers', 'facade_worker', None, None)['repo_directory'] - } - - #create instance of the worker - app.worker = ValueWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - - if app.worker._child is not None: - app.worker._child.terminate() - - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/worker_base.py b/workers/worker_base.py --- a/workers/worker_base.py +++ b/workers/worker_base.py @@ -1,22 +1,29 @@ """ Helper methods constant across all workers """ -import requests, datetime, time, traceback, json, os, sys, math +import requests, datetime, time, traceback, json, os, sys, math, logging +from logging import FileHandler, Formatter, StreamHandler from multiprocessing import Process, Queue import sqlalchemy as s import pandas as pd -import os -import sys, logging +from pathlib import Path from urllib.parse import urlparse -from workers.util import read_config from sqlalchemy import MetaData from sqlalchemy.ext.automap import automap_base +from augur.config import AugurConfig +from augur.logging import verbose_formatter, generic_formatter class Worker(): - def __init__(self, config={}, given=[], models=[], data_tables=[], operations_tables=[]): + ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + def __init__(self, worker_type, config={}, given=[], models=[], data_tables=[], operations_tables=[]): + + self.worker_type = worker_type self._task = None # task currently being worked on (dict) self._child = None # process of currently running task (multiprocessing process) self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes) + self.data_tables = data_tables + self.operations_tables = operations_tables + self._root_augur_dir = Worker.ROOT_AUGUR_DIR # count of tuples inserted in the database (to store stats for each task in op tables) self.results_counter = 0 @@ -25,23 +32,67 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta self.finishing_task = False # Update config with options that are general and not specific to any worker - self.config = config + self.augur_config = AugurConfig(self._root_augur_dir) + + self.config = { + 'worker_type': self.worker_type, + 'host': self.augur_config.get_value("Server", "host"), + 'gh_api_key': self.augur_config.get_value('Database', 'key'), + 'offline_mode': False + } + self.config.update(self.augur_config.get_section("Development")) + + try: + worker_defaults = self.augur_config.get_default_config()['Workers'][self.config['worker_type']] + self.config.update(worker_defaults) + except KeyError as e: + logging.warn('Could not get default configuration for {}', self.config['worker_type']) + + worker_info = self.augur_config.get_value('Workers', self.config['worker_type']) + self.config.update(worker_info) + + worker_port = self.config['port'] + while True: + try: + r = requests.get('http://{}:{}/AUGWOP/heartbeat'.format( + self.config['host'], worker_port)).json() + if 'status' in r: + if r['status'] == 'alive': + worker_port += 1 + except: + break + + logfile_dir = f'{self._root_augur_dir}/logs/workers/{self.worker_type}/' + server_logfile = logfile_dir + '{}_{}_server.log'.format(self.worker_type, worker_port) + collection_logfile = logfile_dir + '{}_{}_collection.log'.format(self.worker_type, worker_port) self.config.update({ - 'port_broker': read_config('Server', 'port', 'AUGUR_PORT', 5000), - 'host_broker': read_config('Server', 'host', 'AUGUR_HOST', '0.0.0.0'), - 'host_database': read_config('Database', 'host', 'AUGUR_DB_HOST', 'host'), - 'port_database': read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - 'user_database': read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - 'name_database': read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - 'password_database': read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password') + "port": worker_port, + "id": "com.augurlabs.core.{}.{}".format(self.worker_type, worker_port), + "logfile_dir": logfile_dir, + "server_logfile": server_logfile, + "collection_logfile": collection_logfile, + "capture_output": True, + 'location': 'http://{}:{}'.format(self.config["host"], worker_port), + 'port_broker': self.augur_config.get_value('Server', 'port'), + 'host_broker': self.augur_config.get_value('Server', 'host'), + 'host_database': self.augur_config.get_value('Database', 'host'), + 'port_database': self.augur_config.get_value('Database', 'port'), + 'user_database': self.augur_config.get_value('Database', 'user'), + 'name_database': self.augur_config.get_value('Database', 'name'), + 'password_database': self.augur_config.get_value('Database', 'password') }) + self.config.update(config) + + # Initialize logging in the main process + self.initialize_logging() - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format( - self.config['id'].split('.')[len(self.config['id'].split('.')) - 1] - ), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) + # Clear log contents from previous runs + open(self.config["server_logfile"], "w").close() + open(self.config["collection_logfile"], "w").close() + + # Get configured collection logger + self.logger = logging.getLogger(self.config["id"]) + self.logger.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) self.given = given self.models = models @@ -56,28 +107,76 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta ], 'config': self.config } - + + # Send broker hello message + if self.config["offline_mode"] is False: + self.connect_to_broker() + + try: + self.tool_source + self.tool_version + self.data_source + except: + self.tool_source = 'Augur Worker Testing' + self.tool_version = '0.0.0' + self.data_source = 'Augur Worker Testing' + + def __repr__(self): + return f"{self.config['id']}" + + def initialize_logging(self): + self.config["log_level"] = self.config["log_level"].upper() + formatter = generic_formatter + if self.config["verbose"] is True: + formatter = verbose_formatter + + Path(self.config["logfile_dir"]).mkdir(exist_ok=True) + + collection_file_handler = FileHandler(filename=self.config["collection_logfile"], mode="a") + collection_file_handler.setFormatter(formatter) + collection_file_handler.setLevel(self.config["log_level"]) + + self.logger = logging.getLogger(self.config["id"]) + self.logger.handlers = [] + self.logger.addHandler(collection_file_handler) + self.logger.setLevel(self.config["log_level"]) + + if self.config["debug"]: + console_handler = StreamHandler() + self.config["log_level"] = "DEBUG" + console_handler.setLevel(self.config["log_level"]) + console_handler.setFormatter(formatter) + self.logger.handlers = [] + self.logger.addHandler(console_handler) + self.logger.addHandler(collection_file_handler) + self.config["capture_output"] = False + + if self.config["quiet"]: + self.logger.disabled = True + self.config["capture_output"] = False + + def initialize_database_connections(self): DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( self.config['user_database'], self.config['password_database'], self.config['host_database'], self.config['port_database'], self.config['name_database'] ) # Create an sqlalchemy engine for both database schemas - logging.info("Making database connections... {}".format(DB_STR)) + self.logger.info("Making database connections") db_schema = 'augur_data' - self.db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, + self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(db_schema)}) helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, + self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(helper_schema)}) metadata = MetaData() helper_metadata = MetaData() # Reflect only the tables we will use for each schema's metadata object - metadata.reflect(self.db, only=data_tables) - helper_metadata.reflect(self.helper_db, only=operations_tables) + metadata.reflect(self.db, only=self.data_tables) + helper_metadata.reflect(self.helper_db, only=self.operations_tables) Base = automap_base(metadata=metadata) HelperBase = automap_base(metadata=helper_metadata) @@ -86,18 +185,18 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta HelperBase.prepare() # So we can access all our tables when inserting, updating, etc - for table in data_tables: + for table in self.data_tables: setattr(self, '{}_table'.format(table), Base.classes[table].__table__) try: - logging.info(HelperBase.classes.keys()) + self.logger.info(HelperBase.classes.keys()) except: pass - for table in operations_tables: + for table in self.operations_tables: try: setattr(self, '{}_table'.format(table), HelperBase.classes[table].__table__) except Exception as e: - logging.info("Error setting attribute for table: {} : {}".format(table, e)) + self.logger.info("Error setting attribute for table: {} : {}".format(table, e)) # Increment so we are ready to insert the 'next one' of each of these most recent ids self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 @@ -108,9 +207,6 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta else: self.oauths = [{'oauth_id': 0}] - # Send broker hello message - self.connect_to_broker() - @property def task(self): """ Property that is returned when the worker's current task is referenced @@ -130,7 +226,7 @@ def task(self, value): # This setting is set by the housekeeper and is attached to the task before it gets sent here if 'focused_task' in value: if value['focused_task'] == 1: - logging.info("Focused task is ON\n") + self.logger.info("Focused task is ON\n") self.finishing_task = True self._task = value @@ -145,21 +241,23 @@ def run(self): """ Kicks off the processing of the queue if it is not already being processed Gets run whenever a new task is added """ - logging.info("Running...\n") # Spawn a subprocess to handle message reading and performing the tasks self._child = Process(target=self.collect, args=()) self._child.start() - + def collect(self): """ Function to process each entry in the worker's task queue Determines what action to take based off the message type """ + self.initialize_logging() # need to initialize logging again in child process cause multiprocessing + self.logger.info("Starting data collection process\n") + self.initialize_database_connections() while True: if not self._queue.empty(): message = self._queue.get() # Get the task off our MP queue else: break - logging.info("Popped off message: {}\n".format(str(message))) + self.logger.info("Popped off message: {}\n".format(str(message))) if message['job_type'] == 'STOP': break @@ -180,7 +278,7 @@ def collect(self): model_method = getattr(self, '{}_model'.format(message['models'][0])) self.record_model_process(repo_id, 'repo_info') except Exception as e: - logging.info('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) + + self.logger.info('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) + 'must have name of {}_model'.format(message['models'][0])) self.register_task_failure(message, repo_id, e) break @@ -189,17 +287,50 @@ def collect(self): # and worker can move onto the next task without stopping try: model_method(message, repo_id) - except Exception as e: + except Exception as e: # this could be a custom exception, might make things easier self.register_task_failure(message, repo_id, e) - pass + break + + self.logger.info('Closing database connections\n') + self.db.dispose() + self.helper_db.dispose() def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}): - """ map objects => { *our db col* : *gh json key*} """ + """ Include an extra key-value pair on each element of new_data that represents + the action that should be taken with this element (i.e. 'need_insertion') + + :param new_data: List of dictionaries, data to be assigned an action to + :param table_values: Pandas DataFrame, existing data in the database to check + what action should be taken on the new_data depending on the presence of + each element in this DataFrame + :param update_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + updates (if source data value != value in existing database row, then an + update is needed). Key is source data column name, value is database field name. + Example: {'id': 'gh_issue_id'} + :param duplicate_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + duplicates (if source data value == value in existing database row, then this + element is a duplicate and would not need an insertion). Key is source data + column name, value is database field name. Example: {'id': 'gh_issue_id'} + :param table_pkey: String, the field name of the primary key of the table in + the database that we are checking the table_values for. + :param value_update_col_map: Dictionary, sometimes we add a new field to a table, + and we want to trigger an update of that row in the database even if all of the + data values are the same and would not need an update ordinarily. Checking for + a specific existing value in the database field allows us to do this. The key is the + name of the field in the database we are checking for a specific value to trigger + an update, the value is the value we are checking for equality to trigger an update. + Example: {'cntrb_id': None} + :return: List of dictionaries, contains all the same elements of new_data, except + each element now has an extra key-value pair with the key being 'flag', and + the value being 'need_insertion', 'need_update', or 'none' + """ need_insertion_count = 0 need_update_count = 0 for i, obj in enumerate(new_data): if type(obj) != dict: - logging.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) + self.logger.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) continue obj['flag'] = 'none' # default of no action needed @@ -208,13 +339,13 @@ def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_ if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any(): continue - logging.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) + self.logger.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) obj['flag'] = 'need_insertion' need_insertion_count += 1 break if obj['flag'] == 'need_insertion': - logging.info('Already determined that current tuple needs insertion, skipping checking updates. ' + self.logger.info('Already determined that current tuple needs insertion, skipping checking updates. ' 'Moving to next tuple.\n') continue @@ -226,13 +357,13 @@ def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_ not_nan_check = not (math.isnan(value_check) and math.isnan(existing_tuple[augur_col])) if value_check is not None else True if existing_tuple[augur_col] != value_check and not_nan_check: continue - logging.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) + self.logger.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) obj['flag'] = 'need_update' obj['pkey'] = existing_tuple[table_pkey] need_update_count += 1 if obj['flag'] == 'need_update': - logging.info('Already determined that current tuple needs update, skipping checking further updates. ' + self.logger.info('Already determined that current tuple needs update, skipping checking further updates. ' 'Moving to next tuple.\n') continue @@ -242,25 +373,33 @@ def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_ continue if obj[update_col_map[col]] == existing_tuple[col]: continue - logging.info("Found a tuple that needs an update for column: {}\n".format(col)) + self.logger.info("Found a tuple that needs an update for column: {}\n".format(col)) obj['flag'] = 'need_update' obj['pkey'] = existing_tuple[table_pkey] need_update_count += 1 - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + + self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + "was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count)) return new_data - def check_duplicates(new_data, table_values, key): + def check_duplicates(self, new_data, table_values, key): + """ Filters what items of the new_data json are not present in the table_values df + + :param new_data: List of dictionaries, new data to filter duplicates out of + :param table_values: Pandas DataFrame, existing data to check what data is already + present in the database + :param key: String, key of each dict in new_data whose value we are checking + duplicates with + :return: List of dictionaries, contains elements of new_data that are not already + present in the database + """ need_insertion = [] for obj in new_data: - if type(obj) == dict: - if not table_values.isin([obj[key]]).any().any(): - need_insertion.append(obj) - # else: - # logging.info("Tuple with github's {} key value already".format(key) + - # "exists in our db: {}\n".format(str(obj[key]))) - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + + if type(obj) != dict: + continue + if not table_values.isin([obj[key]]).any().any(): + need_insertion.append(obj) + self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + "was reduced to {} tuples.\n".format(str(len(need_insertion)))) return need_insertion @@ -268,43 +407,39 @@ def connect_to_broker(self): connected = False for i in range(5): try: - logging.info("attempt {}\n".format(i)) + self.logger.debug("Connecting to broker, attempt {}\n".format(i)) if i > 0: time.sleep(10) requests.post('http://{}:{}/api/unstable/workers'.format( self.config['host_broker'],self.config['port_broker']), json=self.specs) - logging.info("Connection to the broker was successful\n") + self.logger.info("Connection to the broker was successful\n") connected = True break except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker. Trying again...\n') + self.logger.error('Cannot connect to the broker. Trying again...\n') if not connected: sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n') - def dump_queue(queue): - """ - Empties all pending items in a queue and returns them in a list. - """ - result = [] - queue.put("STOP") - for i in iter(queue.get, 'STOP'): - result.append(i) - # time.sleep(.1) - return result - def find_id_from_login(self, login): + """ Retrieves our contributor table primary key value for the contributor with + the given GitHub login credentials, if this contributor is not there, then + they get inserted. + + :param login: String, the GitHub login username to find the primary key id for + :return: Integer, the id of the row with the matching GitHub login + """ idSQL = s.sql.text(""" SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' - """.format(login)) + """.format(login)) rs = pd.read_sql(idSQL, self.db, params={}) data_list = [list(row) for row in rs.itertuples(index=False)] try: return data_list[0][0] except: - logging.info("contributor needs to be added...") + self.logger.info('contributor needs to be added...') - cntrb_url = ("https://api.github.com/users/" + login) - logging.info("Hitting endpoint: {} ...\n".format(cntrb_url)) + cntrb_url = ('https://api.github.com/users/' + login) + self.logger.info('Hitting endpoint: {} ...\n'.format(cntrb_url)) r = requests.get(url=cntrb_url, headers=self.headers) self.update_gh_rate_limit(r) contributor = r.json() @@ -320,45 +455,50 @@ def find_id_from_login(self, login): email = contributor['email'] cntrb = { - "cntrb_login": contributor['login'] if 'login' in contributor else None, - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, - "cntrb_canonical": None, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source + 'cntrb_login': contributor['login'] if 'login' in contributor else None, + 'cntrb_email': email, + 'cntrb_company': company, + 'cntrb_location': location, + 'cntrb_created_at': contributor['created_at'] if 'created_at' in contributor else None, + 'cntrb_canonical': None, + 'gh_user_id': contributor['id'], + 'gh_login': contributor['login'], + 'gh_url': contributor['url'], + 'gh_html_url': contributor['html_url'], + 'gh_node_id': contributor['node_id'], + 'gh_avatar_url': contributor['avatar_url'], + 'gh_gravatar_id': contributor['gravatar_id'], + 'gh_followers_url': contributor['followers_url'], + 'gh_following_url': contributor['following_url'], + 'gh_gists_url': contributor['gists_url'], + 'gh_starred_url': contributor['starred_url'], + 'gh_subscriptions_url': contributor['subscriptions_url'], + 'gh_organizations_url': contributor['organizations_url'], + 'gh_repos_url': contributor['repos_url'], + 'gh_events_url': contributor['events_url'], + 'gh_received_events_url': contributor['received_events_url'], + 'gh_type': contributor['type'], + 'gh_site_admin': contributor['site_admin'], + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source } result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted contributor: " + contributor['login'] + "\n") + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") return self.find_id_from_login(login) - def get_owner_repo(self, github_url): - split = github_url.split('/') + def get_owner_repo(self, git_url): + """ Gets the owner and repository names of a repository from a git url + + :param git_url: String, the git url of a repository + :return: Tuple, includes the owner and repository names in that order + """ + split = git_url.split('/') owner = split[-2] repo = split[-1] @@ -369,6 +509,19 @@ def get_owner_repo(self, github_url): return owner, repo def get_max_id(self, table, column, default=25150, operations_table=False): + """ Gets the max value (usually used for id/pk's) of any Integer column + of any table + + :param table: String, the table that consists of the column you want to + query a max value for + :param column: String, the column that you want to query the max value for + :param default: Integer, if there are no values in the + specified column, the value of this parameter will be returned + :param operations_table: Boolean, if True, this signifies that the table/column + that is wanted to be queried is in the augur_operations schema rather than + the augur_data schema. Default False + :return: Integer, the max value of the specified column/table + """ maxIdSQL = s.sql.text(""" SELECT max({0}.{1}) AS {1} FROM {0} @@ -377,14 +530,24 @@ def get_max_id(self, table, column, default=25150, operations_table=False): rs = pd.read_sql(maxIdSQL, db, params={}) if rs.iloc[0][column] is not None: max_id = int(rs.iloc[0][column]) + 1 - logging.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) + self.logger.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) else: max_id = default - logging.info("Could not find max id for {} column in the {} table... using default set to: \ - {}\n".format(column, table, max_id)) + self.logger.info('Could not find max id for {} column in the {} table... ' + + 'using default set to: {}\n'.format(column, table, max_id)) return max_id def get_table_values(self, cols, tables, where_clause=""): + """ Can query all values of any column(s) from any table(s) + with an optional where clause + + :param cols: List of Strings, column(s) that user wants to query + :param tables: List of Strings, table(s) that user wants to query + :param where_clause: String, optional where clause to filter the values + queried + :return: Pandas DataFrame, contains all values queried in the columns, tables, and + optional where clause provided + """ table_str = tables[0] del tables[0] @@ -396,27 +559,33 @@ def get_table_values(self, cols, tables, where_clause=""): for col in cols: col_str += ", " + col - tableValuesSQL = s.sql.text(""" + table_values_sql = s.sql.text(""" SELECT {} FROM {} {} """.format(col_str, table_str, where_clause)) - logging.info("Getting table values with the following PSQL query: \n{}\n".format(tableValuesSQL)) - values = pd.read_sql(tableValuesSQL, self.db, params={}) + self.logger.info('Getting table values with the following PSQL query: \n{}\n'.format( + table_values_sql)) + values = pd.read_sql(table_values_sql, self.db, params={}) return values def init_oauths(self): + """ Initialization required to have all GitHub tokens within access to GitHub workers + """ self.oauths = [] self.headers = None # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" + url = 'https://api.github.com/users/gabe-heim' # Make a list of api key in the config combined w keys stored in the database oauthSQL = s.sql.text(""" SELECT * FROM worker_oauth WHERE access_token <> '{}' """.format(self.config['gh_api_key'])) - for oauth in [{'oauth_id': 0, 'access_token': self.config['gh_api_key']}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): + + oauths = [{'oauth_id': 0, 'access_token': self.config['gh_api_key']}] + \ + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")) + for oauth in oauths: self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}\n".format(oauth)) + self.logger.info("Getting rate limit info for oauth: {}\n".format(oauth)) response = requests.get(url=url, headers=self.headers) self.oauths.append({ 'oauth_id': oauth['oauth_id'], @@ -424,18 +593,51 @@ def init_oauths(self): 'rate_limit': int(response.headers['X-RateLimit-Remaining']), 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() }) - logging.info("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) + self.logger.info("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") + self.logger.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") # First key to be used will be the one specified in the config (first element in # self.oauths array will always be the key in use) self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}): - # Paginate backwards through all the tuples but get first page in order - # to determine if there are multiple pages and if the 1st page covers all + """ Paginate either backwards or forwards (depending on the value of the worker's + finishing_task attribute) through all the GitHub or GitLab api endpoint pages. + + :param url: String, the url of the API endpoint we are paginating through, expects + a curly brace string formatter within the string to format the Integer + representing the page number that is wanted to be returned + :param duplicate_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + duplicates (if source data value == value in existing database row, then this + element is a duplicate and would not need an insertion). Key is source data + column name, value is database field name. Example: {'id': 'gh_issue_id'} + :param update_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + updates (if source data value != value in existing database row, then an + update is needed). Key is source data column name, value is database field name. + Example: {'id': 'gh_issue_id'} + :param table: String, the name of the table that holds the values to check for + duplicates/updates against + :param table_pkey: String, the field name of the primary key of the table in + the database that we are getting the values for to cross-reference to check + for duplicates. + :param where_clause: String, optional where clause to filter the values + that are queried when preparing the values that will be cross-referenced + for duplicates/updates + :param value_update_col_map: Dictionary, sometimes we add a new field to a table, + and we want to trigger an update of that row in the database even if all of the + data values are the same and would not need an update ordinarily. Checking for + a specific existing value in the database field allows us to do this. The key is the + name of the field in the database we are checking for a specific value to trigger + an update, the value is the value we are checking for equality to trigger an update. + Example: {'cntrb_id': None} + :return: List of dictionaries, all data points from the pages of the specified API endpoint + each with a 'flag' key-value pair representing the required action to take with that + data point (i.e. 'need_insertion', 'need_update', 'none') + """ update_keys = list(update_col_map.keys()) if update_col_map else [] update_keys += list(value_update_col_map.keys()) if value_update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] @@ -448,10 +650,10 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh num_attempts = 0 success = False while num_attempts < 3: - logging.info("Hitting endpoint: " + url.format(i) + " ...\n") + self.logger.info(f'Hitting endpoint: {url.format(i)}...\n') r = requests.get(url=url.format(i), headers=self.headers) self.update_gh_rate_limit(r) - logging.info("Analyzing page {} of {}\n".format(i, int(r.links['last']['url'][-6:].split('=')[1]) + 1 if 'last' in r.links else '*last page not known*')) + self.logger.info("Analyzing page {} of {}\n".format(i, int(r.links['last']['url'][-6:].split('=')[1]) + 1 if 'last' in r.links else '*last page not known*')) try: j = r.json() @@ -462,9 +664,9 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh success = True break elif type(j) == dict: - logging.info("Request returned a dict: {}\n".format(j)) + self.logger.info("Request returned a dict: {}\n".format(j)) if j['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) break if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': num_attempts -= 1 @@ -472,11 +674,11 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh if j['message'] == 'Bad credentials': self.update_gh_rate_limit(r, bad_credentials=True) elif type(j) == str: - logging.info("J was string: {}\n".format(j)) + self.logger.info(f'J was string: {j}\n') if '<!DOCTYPE html>' in j: - logging.info("HTML was returned, trying again...\n") + self.logger.info('HTML was returned, trying again...\n') elif len(j) == 0: - logging.info("Empty string, trying again...\n") + self.logger.info('Empty string, trying again...\n') else: try: j = json.loads(j) @@ -492,34 +694,34 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh if 'last' in r.links and not multiple_pages and not self.finishing_task: param = r.links['last']['url'][-6:] i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") + self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n") multiple_pages = True elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") + self.logger.info("Only 1 page of request\n") elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ..." + self.logger.info("Finishing a previous task, paginating forwards ..." " excess rate limit requests will be made\n") if len(j) == 0: - logging.info("Response was empty, breaking from pagination.\n") + self.logger.info("Response was empty, breaking from pagination.\n") break # Checking contents of requests with what we already have in the db j = self.assign_tuple_action(j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map) if not j: - logging.info("Assigning tuple action failed, moving to next page.\n") + self.logger.info("Assigning tuple action failed, moving to next page.\n") i = i + 1 if self.finishing_task else i - 1 continue try: to_add = [obj for obj in j if obj not in tuples and obj['flag'] != 'none'] except Exception as e: - logging.info("Failure accessing data of page: {}. Moving to next page.\n".format(e)) + self.logger.info("Failure accessing data of page: {}. Moving to next page.\n".format(e)) i = i + 1 if self.finishing_task else i - 1 continue if len(to_add) == 0 and multiple_pages and 'last' in r.links: - logging.info("{}".format(r.links['last'])) + self.logger.info("{}".format(r.links['last'])) if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown tuples, breaking from pagination.\n") + self.logger.info("No more pages with unknown tuples, breaking from pagination.\n") break tuples += to_add @@ -527,7 +729,7 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh # Since we already wouldve checked the first page... break if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") + self.logger.info("No more pages to check, breaking from pagination.\n") break return tuples @@ -537,24 +739,16 @@ def query_github_contributors(self, entry_info, repo_id): """ Data collection function Query the GitHub API for contributors """ - logging.info("Querying contributors with given entry info: " + str(entry_info) + "\n") + self.logger.info(f'Querying contributors with given entry info: {entry_info}\n') github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url'] # Extract owner/repo from the url for the endpoint - path = urlparse(github_url) - split = path[2].split('/') - - owner = split[1] - name = split[2] - - # Handles git url case by removing the extension - if ".git" in name: - name = name[:-4] + owner, name = self.get_owner_repo(github_url) # Set the base of the url and place to hold contributors to insert - contributors_url = ("https://api.github.com/repos/" + owner + "/" + - name + "/contributors?per_page=100&page={}") + contributors_url = (f'https://api.github.com/repos/{owner}/{name}/' + + 'contributors?per_page=100&page={}') # Get contributors that we already have stored # Set our duplicate and update column map keys (something other than PK) to @@ -567,7 +761,7 @@ def query_github_contributors(self, entry_info, repo_id): #list to hold contributors needing insertion or update contributors = self.paginate(contributors_url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") + self.logger.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") for repo_contributor in contributors: try: @@ -575,7 +769,7 @@ def query_github_contributors(self, entry_info, repo_id): # `created at` # i think that's it cntrb_url = ("https://api.github.com/users/" + repo_contributor['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) self.update_gh_rate_limit(r) contributor = r.json() @@ -626,70 +820,23 @@ def query_github_contributors(self, entry_info, repo_id): if repo_contributor['flag'] == 'need_update': result = self.db.execute(self.contributors_table.update().where( self.worker_history_table.c.cntrb_email==email).values(cntrb)) - logging.info("Updated tuple in the contributors table with existing email: {}".format(email)) + self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email)) self.cntrb_id_inc = repo_contributor['pkey'] elif repo_contributor['flag'] == 'need_insertion': result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: " + contributor['login'] + "\n") + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") # Increment our global track of the cntrb id for the possibility of it being used as a FK self.cntrb_id_inc = int(result.inserted_primary_key[0]) except Exception as e: - logging.info("Caught exception: {}".format(e)) - logging.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) + self.logger.info("Caught exception: {}".format(e)) + self.logger.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) continue - def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): - """ - Read a variable in specified section of the config file, unless provided an environment variable - - :param section: location of given variable - :param name: name of variable - """ - config_file_path = os.getenv("AUGUR_CONFIG_FILE", config_file_path) - _config_file_name = 'augur.config.json' - _config_bad = False - _already_exported = {} - _runtime_location = 'runtime/' - _default_config = {} - _config_file = None - - try: - _config_file = open(config_file_path, 'r+') - except: - print('Couldn\'t open {}'.format(_config_file_name)) - - # Load the config file - try: - config_text = _config_file.read() - _config = json.loads(config_text) - except json.decoder.JSONDecodeError as e: - if not _config_bad: - _using_config_file = False - print('{} could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: {}'.format(config_file_path, str(e))) - _config = _default_config - - value = None - if environment_variable is not None: - value = os.getenv(environment_variable) - if value is None: - try: - if name is not None: - value = _config[section][name] - else: - value = _config[section] - except Exception as e: - value = default - if not section in _config: - _config[section] = {} - - return value - - def record_model_process(self, repo_id, model): task_history = { @@ -707,7 +854,7 @@ def record_model_process(self, repo_id, model): self.history_id += 1 else: result = self.helper_db.execute(self.worker_history_table.insert().values(task_history)) - logging.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) + self.logger.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) self.history_id = int(result.inserted_primary_key[0]) def register_task_completion(self, task, repo_id, model): @@ -737,7 +884,7 @@ def register_task_completion(self, task, repo_id, model): self.helper_db.execute(self.worker_history_table.update().where( self.worker_history_table.c.history_id==self.history_id).values(task_history)) - logging.info("Recorded job completion for: " + str(task_completed) + "\n") + self.logger.info("Recorded job completion for: " + str(task_completed) + "\n") # Update job process table updated_job = { @@ -748,27 +895,29 @@ def register_task_completion(self, task, repo_id, model): } self.helper_db.execute(self.worker_job_table.update().where( self.worker_job_table.c.job_model==model).values(updated_job)) - logging.info("Updated job process for model: " + model + "\n") + self.logger.info("Updated job process for model: " + model + "\n") - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") + if self.config["offline_mode"] is False: + + # Notify broker of completion + self.logger.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + + "This task inserted: " + str(self.results_counter) + " tuples.\n") - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['host_broker'],self.config['port_broker']), json=task_completed) + requests.post('http://{}:{}/api/unstable/completed_task'.format( + self.config['host_broker'],self.config['port_broker']), json=task_completed) # Reset results counter for next task self.results_counter = 0 def register_task_failure(self, task, repo_id, e): - logging.info("Worker ran into an error for task: {}\n".format(task)) - logging.info("Printing traceback...\n") + self.logger.info("Worker ran into an error for task: {}\n".format(task)) + self.logger.info("Printing traceback...\n") tb = traceback.format_exc() - logging.info(tb) + self.logger.info(tb) - logging.info(f'This task inserted {self.results_counter} tuples before failure.\n') - logging.info("Notifying broker and logging task failure in database...\n") + self.logger.info(f'This task inserted {self.results_counter} tuples before failure.\n') + self.logger.info("Notifying broker and logging task failure in database...\n") key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" url = task['given'][key] @@ -783,7 +932,7 @@ def register_task_failure(self, task, repo_id, e): requests.post("http://{}:{}/api/unstable/task_error".format( self.config['host_broker'],self.config['port_broker']), json=task) except requests.exceptions.ConnectionError: - logging.error('Could not send task failure message to the broker\n') + self.logger.error('Could not send task failure message to the broker\n') except Exception: logging.exception('An error occured while informing broker about task failure\n') @@ -799,7 +948,7 @@ def register_task_failure(self, task, repo_id, e): } self.helper_db.execute(self.worker_history_table.update().where(self.worker_history_table.c.history_id==self.history_id).values(task_history)) - logging.info("Recorded job error in the history table for: " + str(task) + "\n") + self.logger.info("Recorded job error in the history table for: " + str(task) + "\n") # Update job process table updated_job = { @@ -809,7 +958,7 @@ def register_task_failure(self, task, repo_id, e): "analysis_state": 0 } self.helper_db.execute(self.worker_job_table.update().where(self.worker_job_table.c.job_model==task['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + task['models'][0] + "\n") + self.logger.info("Updated job process for model: " + task['models'][0] + "\n") # Reset results counter for next task self.results_counter = 0 @@ -838,29 +987,29 @@ def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disa # Try to get rate limit from request headers, sometimes it does not work (GH's issue) # In that case we just decrement from last recieved header count if bad_credentials and len(self.oauths) > 1: - logging.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) + self.logger.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) del self.oauths[0] if temporarily_disable: - logging.info("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") + self.logger.info("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") self.oauths[0]['rate_limit'] = 0 else: try: self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - logging.info("Recieved rate limit from headers\n") + self.logger.info("Recieved rate limit from headers\n") except: self.oauths[0]['rate_limit'] -= 1 - logging.info("Headers did not work, had to decrement\n") - logging.info("Updated rate limit, you have: " + + self.logger.info("Headers did not work, had to decrement\n") + self.logger.info("Updated rate limit, you have: " + str(self.oauths[0]['rate_limit']) + " requests remaining.\n") if self.oauths[0]['rate_limit'] <= 0: try: reset_time = response.headers['X-RateLimit-Reset'] except Exception as e: - logging.info("Could not get reset time from headers because of error: {}".format(error)) + self.logger.info("Could not get reset time from headers because of error: {}".format(error)) reset_time = 3600 time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() - logging.info("Rate limit exceeded, checking for other available keys to use.\n") + self.logger.info("Rate limit exceeded, checking for other available keys to use.\n") # We will be finding oauth with the highest rate limit left out of our list of oauths new_oauth = self.oauths[0] @@ -869,7 +1018,7 @@ def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disa other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] for oauth in other_oauths: - logging.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) + self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) self.headers = {'Authorization': 'token %s' % oauth['access_token']} response = requests.get(url=url, headers=self.headers) oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) @@ -877,20 +1026,20 @@ def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disa # Update oauth to switch to if a higher limit is found if oauth['rate_limit'] > new_oauth['rate_limit']: - logging.info("Higher rate limit found in oauth: {}\n".format(oauth)) + self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth)) new_oauth = oauth elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: - logging.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) + self.logger.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) new_oauth = oauth if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: - logging.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) + self.logger.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) time.sleep(new_oauth['seconds_to_reset']) # Make new oauth the 0th element in self.oauths so we know which one is in use index = self.oauths.index(new_oauth) self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] - logging.info("Using oauth: {}\n".format(self.oauths[0])) + self.logger.info("Using oauth: {}\n".format(self.oauths[0])) # Change headers to be using the new oauth's key self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']}
diff --git a/test/__init__.py b/augur/routes/metrics/__init__.py similarity index 100% rename from test/__init__.py rename to augur/routes/metrics/__init__.py diff --git a/test/api/test_experimental_routes.py b/test/api/test_experimental_routes.py deleted file mode 100644 --- a/test/api/test_experimental_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_insight_routes.py b/test/api/test_insight_routes.py deleted file mode 100644 --- a/test/api/test_insight_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_message_routes.py b/test/api/test_message_routes.py deleted file mode 100644 --- a/test/api/test_message_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_platform_routes.py b/test/api/test_platform_routes.py deleted file mode 100644 --- a/test/api/test_platform_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_util_routes.py b/test/api/test_util_routes.py deleted file mode 100644 --- a/test/api/test_util_routes.py +++ /dev/null @@ -1,31 +0,0 @@ -import requests -import pytest - [email protected](scope="session") -def metrics(): - pass - -def test_common(endpoint="http://localhost:5000/api/unstable/repos"): - response = requests.get(endpoint) - data = response.json() - assert response.status_code == 200 - assert len(data) >= 1 - -def test_get_all_repos(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repos') - -def test_repo_groups(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups') - -def test_repos_in_repo_groups(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups') - -def test_get_repo_for_dosocs(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/dosocs/repos') - -def test_aggregate_summary_by_repo(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups/10/repos/25430/aggregate-summary') - -def test_aggregate_summary_by_group(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups/10/aggregate-summary') - diff --git a/test/metrics/test_experimental_metrics.py b/test/metrics/test_experimental_metrics.py deleted file mode 100644 --- a/test/metrics/test_experimental_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_insight_metrics.py b/test/metrics/test_insight_metrics.py deleted file mode 100644 --- a/test/metrics/test_insight_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_message_metrics.py b/test/metrics/test_message_metrics.py deleted file mode 100644 --- a/test/metrics/test_message_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_platform_metrics.py b/test/metrics/test_platform_metrics.py deleted file mode 100644 --- a/test/metrics/test_platform_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_util_metrics.py b/test/metrics/test_util_metrics.py deleted file mode 100644 --- a/test/metrics/test_util_metrics.py +++ /dev/null @@ -1,14 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - -# def test_get_repos_for_dosocs(metrics): -# assert metrics.get_repos_for_dosocs().isin( -# ['/home/sean/git-repos/25430/github.com/rails/rails-dom-testing']).any().any() - diff --git a/test/test_model.py b/tests/__init__.py similarity index 100% rename from test/test_model.py rename to tests/__init__.py diff --git a/tests/test_application.py b/tests/test_application.py new file mode 100644 --- /dev/null +++ b/tests/test_application.py @@ -0,0 +1,47 @@ +import pytest +import augur.application +import sqlalchemy as s +import json + +def test_init_augur_regular(): + augur_app = augur.application.Application() + assert augur_app is not None + +# def test_discover_config_file(monkeypatch): +# def mock_discover_config_file(augur_app): +# return None +# monkeypatch.setattr(augur.application.Application, "_discover_config_file", mock_discover_config_file) + +# augur_app = augur.application.Application() + +# assert augur_app._using_default_config is True + +# def test_load_config_from_nonexistent_file(monkeypatch): +# def mock_load_config_from_file(augur_app): +# raise(FileNotFoundError()) + +# monkeypatch.setattr(augur.application.Application, "_load_config_from_file", mock_load_config_from_file) + +# with pytest.raises(FileNotFoundError): +# augur_app = augur.application.Application() +# assert augur_app._using_default_config is True + +# def test_load_config_from_invalid_json(monkeypatch): +# def mock_load_config_from_file(augur_app): +# raise(json.decoder.JSONDecodeError("fake", "error", 0)) + +# monkeypatch.setattr(augur.application.Application, "_load_config_from_file", mock_load_config_from_file) + +# with pytest.raises(json.decoder.JSONDecodeError): +# augur_app = augur.application.Application() +# assert augur_app._using_default_config is True + +def test_connect_to_database(monkeypatch): + def mock_fail_connection(self): + raise(s.exc.OperationalError("fake", "error", "message")) + + monkeypatch.setattr(s.engine.Engine, "connect", mock_fail_connection) + monkeypatch.setenv("AUGUR_LOG_QUIET", "1") + + with pytest.raises(s.exc.OperationalError): + augur_app = augur.application.Application() diff --git a/test/metrics/test_commit_metrics.py b/tests/test_metrics/test_commit_metrics.py similarity index 90% rename from test/metrics/test_commit_metrics.py rename to tests/test_metrics/test_commit_metrics.py --- a/test/metrics/test_commit_metrics.py +++ b/tests/test_metrics/test_commit_metrics.py @@ -2,12 +2,6 @@ import pytest [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_annual_commit_count_ranked_by_repo_in_repo_group(metrics): assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10).iloc[0].net > 0 assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10, 25430).iloc[0].net > 0 diff --git a/test/metrics/test_contributor_metrics.py b/tests/test_metrics/test_contributor_metrics.py similarity index 91% rename from test/metrics/test_contributor_metrics.py rename to tests/test_metrics/test_contributor_metrics.py --- a/test/metrics/test_contributor_metrics.py +++ b/tests/test_metrics/test_contributor_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_contributors(metrics): # repo group assert metrics.contributors(20).iloc[0]['total'] > 0 diff --git a/test/metrics/test_issue_metrics.py b/tests/test_metrics/test_issue_metrics.py similarity index 97% rename from test/metrics/test_issue_metrics.py rename to tests/test_metrics/test_issue_metrics.py --- a/test/metrics/test_issue_metrics.py +++ b/tests/test_metrics/test_issue_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_issues_new(metrics): #repo_id assert metrics.issues_new(10, 25430, period='year').iloc[0]['issues'] > 0 diff --git a/test/metrics/test_pull_request_metrics.py b/tests/test_metrics/test_pull_request_metrics.py similarity index 91% rename from test/metrics/test_pull_request_metrics.py rename to tests/test_metrics/test_pull_request_metrics.py --- a/test/metrics/test_pull_request_metrics.py +++ b/tests/test_metrics/test_pull_request_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_pull_requests_merge_contributor_new(metrics): # repo id assert metrics.pull_requests_merge_contributor_new(10, repo_id=25430, period='year').isin( diff --git a/test/metrics/test_repo_meta_metrics.py b/tests/test_metrics/test_repo_meta_metrics.py similarity index 96% rename from test/metrics/test_repo_meta_metrics.py rename to tests/test_metrics/test_repo_meta_metrics.py --- a/test/metrics/test_repo_meta_metrics.py +++ b/tests/test_metrics/test_repo_meta_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_code_changes(metrics): #repo_id assert metrics.code_changes(10, 25430, period='year').isin([pd.Timestamp('2019-01-01T00:00:00+00:00'), 2]).any().any() diff --git a/test/api/runner.py b/tests/test_routes/runner.py similarity index 84% rename from test/api/runner.py rename to tests/test_routes/runner.py --- a/test/api/runner.py +++ b/tests/test_routes/runner.py @@ -9,9 +9,10 @@ start = subprocess.Popen(["augur", "run", "--disable-housekeeper", "--skip-cleanup"], stdout=FNULL, stderr=subprocess.STDOUT) print("Waiting for the server to start...") time.sleep(5) -process = subprocess.run(["pytest", "-ra", "--tb=short", "-x", "test/metrics"]) + +process = subprocess.run(["pytest", "tests/test_routes/"]) time.sleep(2) + subprocess.Popen(["augur", "util", "kill"], stdout=FNULL, stderr=subprocess.STDOUT) print("Server successfully shutdown.") - sys.exit(process.returncode) diff --git a/test/api/test_commit_routes.py b/tests/test_routes/test_commit_routes.py similarity index 97% rename from test/api/test_commit_routes.py rename to tests/test_routes/test_commit_routes.py --- a/test/api/test_commit_routes.py +++ b/tests/test_routes/test_commit_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_annual_commit_count_ranked_by_new_repo_in_repo_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/annual-commit-count-ranked-by-new-repo-in-repo-group/') data = response.json() diff --git a/test/api/test_contributor_routes.py b/tests/test_routes/test_contributor_routes.py similarity index 95% rename from test/api/test_contributor_routes.py rename to tests/test_routes/test_contributor_routes.py --- a/test/api/test_contributor_routes.py +++ b/tests/test_routes/test_contributor_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_contributors_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/contributors') data = response.json() diff --git a/test/api/test_issue_routes.py b/tests/test_routes/test_issue_routes.py similarity index 99% rename from test/api/test_issue_routes.py rename to tests/test_routes/test_issue_routes.py --- a/test/api/test_issue_routes.py +++ b/tests/test_routes/test_issue_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_issues_new_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/issues-new') data = response.json() diff --git a/test/api/test_pull_request_routes.py b/tests/test_routes/test_pull_request_routes.py similarity index 94% rename from test/api/test_pull_request_routes.py rename to tests/test_routes/test_pull_request_routes.py --- a/test/api/test_pull_request_routes.py +++ b/tests/test_routes/test_pull_request_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_pull_requests_merge_contributor_new_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/pull-requests-merge-contributor-new') data = response.json() diff --git a/test/api/test_repo_meta_routes.py b/tests/test_routes/test_repo_meta_routes.py similarity index 98% rename from test/api/test_repo_meta_routes.py rename to tests/test_routes/test_repo_meta_routes.py --- a/test/api/test_repo_meta_routes.py +++ b/tests/test_routes/test_repo_meta_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_code_changes_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/code-changes') data = response.json() @@ -51,7 +47,6 @@ def test_sub_projects_by_repo(metrics): def test_cii_best_practices_badge_by_repo(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/repos/25430/cii-best-practices-badge') - print(response) data = response.json() assert response.status_code == 200 assert len(data) >= 1 diff --git a/tests/test_routes/test_util_routes.py b/tests/test_routes/test_util_routes.py new file mode 100644 --- /dev/null +++ b/tests/test_routes/test_util_routes.py @@ -0,0 +1,20 @@ +import requests +import pytest + +from conftest import create_full_routes + +util_routes = [\ +"repos",\ +"repo-groups",\ +"repo-groups",\ +"dosocs/repos",\ +"repo-groups/<default_repo_group_id>/aggregate-summary",\ +"repo-groups/<default_repo_group_id>/repos/<default_repo_id>/aggregate-summary",\ +] + [email protected]("endpoint", create_full_routes(util_routes)) +def test_base_test(client, endpoint): + response = client.get(endpoint) + data = response.get_json() + assert response.status_code == 200 + assert len(data) >= 1 diff --git a/tests/test_workers/test_repo_info_worker.py b/tests/test_workers/test_repo_info_worker.py new file mode 100644 --- /dev/null +++ b/tests/test_workers/test_repo_info_worker.py @@ -0,0 +1,29 @@ +import pytest +from time import sleep + +from workers.repo_info_worker.repo_info_worker import RepoInfoWorker + [email protected] +def test_task(): + return { + "given": { + "github_url": "https://github.com/chaoss/augur.git" + }, + "models": ["repo_info"], + "job_type": "MAINTAIN", + "display_name": "repo_info model for url: https://github.com/chaoss/augur.git", + "focused_task": 1 + } + [email protected] +def repo_info_worker(): + config = { + "offline_mode": True, + "quiet": True + } + + repo_info_worker = RepoInfoWorker(config=config) + return repo_info_worker + +def test_repo_info_worker(repo_info_worker, test_task): + assert repo_info_worker is not None diff --git a/workers/metric_status_worker/tests/tests_worker.py b/workers/metric_status_worker/tests/tests_worker.py deleted file mode 100644 --- a/workers/metric_status_worker/tests/tests_worker.py +++ /dev/null @@ -1,16 +0,0 @@ -import requests -import pytest - -from metric_status_worker.worker import MetricsStatus - -def test_get_metric_index_in_table_row(): - row = "metric |sTatuS|TestString" - metric_status = MetricsStatus("api.github.com") - result = metric_status.get_metric_index_in_table_row(row) - print(result) - assert result == (0, 3) - -def test_is_has_link(): - metric_status = MetricsStatus("api.github.com") - re_result = metric_status.is_has_link(" [oss](augur" , None) - assert re_result == ('oss', 'augur') diff --git a/workers/tests/test_standard_methods.py b/workers/tests/test_standard_methods.py deleted file mode 100644 --- a/workers/tests/test_standard_methods.py +++ /dev/null @@ -1,28 +0,0 @@ -# Sample Test passing with nose and pytest -import pandas as pd -import pytest -from workers.standard_methods import check_duplicates, dump_queue, read_config -from queue import Queue - - -def test_check_duplicates(): - obj = {"website":["walmart.com"]} - new_data = [obj] - table_values = pd.read_csv("augur/data/companies.csv") - assert check_duplicates(new_data, table_values, "website") == [obj] - -def test_dump_queues(): - sample_queue = Queue() - list_sample = ["[email protected]", "[email protected]", "[email protected]"] - for list_item in list_sample: - sample_queue.put(list_item) - queue_to_list = dump_queue(sample_queue) - assert queue_to_list == ["[email protected]", "[email protected]", "[email protected]"] - -def test_read_config_no_exception(): - db_name = read_config('Database', 'user', 'AUGUR_DB_USER', 'augur',config_file_path="augur.config.json") - assert db_name == "augur" - -def test_read_config_exception(): - with pytest.raises(AttributeError): - db_name = read_config('Server', 'username')
repo_info worker: dev/test branch Please help us help you by filling out the following sections as thoroughly as you can. **Description:** Looks like the new Fork information collection has some kind of mismatch between the method and parameters passed: ``` INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}} INFO:root:Printing traceback... INFO:root:Traceback (most recent call last): File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect model_method(message, repo_id) File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model forked = self.is_forked(owner, repo) File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked data = self.get_repo_data(self, url, r) TypeError: get_repo_data() takes 3 positional arguments but 4 were given INFO:root:This task inserted 0 tuples before failure. INFO:root:Notifying broker and logging task failure in database... INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 - INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'} INFO:root:Updated job process for model: repo_info ``` If the log does not provide enough info, let me know
2020-06-15T20:11:56Z
[]
[]
chaoss/augur
781
chaoss__augur-781
[ "737" ]
9086d6df824b31c65c678eb19cef86ecb3052ca5
diff --git a/augur/__init__.py b/augur/__init__.py --- a/augur/__init__.py +++ b/augur/__init__.py @@ -1,10 +1,4 @@ #SPDX-License-Identifier: MIT -import logging -import coloredlogs - -coloredlogs.install() -logger = logging.getLogger('augur') - -# Classes -from .application import Application, logger +import os +ROOT_AUGUR_DIRECTORY = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) diff --git a/augur/application.py b/augur/application.py --- a/augur/application.py +++ b/augur/application.py @@ -4,72 +4,52 @@ """ import os -import time -import multiprocessing as mp +from pathlib import Path import logging +from logging import FileHandler, Formatter import coloredlogs import json -import pkgutil from beaker.cache import CacheManager from beaker.util import parse_cache_config_options import sqlalchemy as s import psycopg2 -from augur import logger +from augur import ROOT_AUGUR_DIRECTORY from augur.metrics import Metrics -from augur.cli.configure import default_config +from augur.config import AugurConfig +from augur.logging import AugurLogging -class Application(object): +logger = logging.getLogger(__name__) + +class Application(): """Initalizes all classes from Augur using a config file or environment variables""" - def __init__(self): + def __init__(self, given_config={}, disable_logs=False, offline_mode=False): """ Reads config, creates DB session, and initializes cache """ - self.config_file_name = 'augur.config.json' - self.__shell_config = None - self.__export_file = None - self.__env_file = None - self.config = default_config - self.env_config = {} - self.root_augur_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - default_config_path = self.root_augur_dir + '/' + self.config_file_name - using_config_file = False - - - config_locations = [self.config_file_name, default_config_path, f"/opt/augur/{self.config_file_name}"] - if os.getenv('AUGUR_CONFIG_FILE') is not None: - config_file_path = os.getenv('AUGUR_CONFIG_FILE') - using_config_file = True - else: - for index, location in enumerate(config_locations): - try: - f = open(location, "r+") - config_file_path = os.path.abspath(location) - using_config_file = True - f.close() - break - except FileNotFoundError: - pass - - if using_config_file: - try: - with open(config_file_path, 'r+') as config_file_handle: - self.config = json.loads(config_file_handle.read()) - except json.decoder.JSONDecodeError as e: - logger.warning('%s could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: %s', config_file_path, str(e)) - else: - logger.warning('%s could not be parsed, using defaults.') - - self.load_env_configuration() - - logger.setLevel(self.read_config("Development", "log_level")) + self.logging = AugurLogging(disable_logs=disable_logs) + self.root_augur_dir = ROOT_AUGUR_DIRECTORY + self.config = AugurConfig(self.root_augur_dir, given_config) + + # we need these for later + self.housekeeper = None + self.manager = None + + self.gunicorn_options = { + 'bind': '%s:%s' % (self.config.get_value("Server", "host"), self.config.get_value("Server", "port")), + 'workers': int(self.config.get_value('Server', 'workers')), + 'timeout': int(self.config.get_value('Server', 'timeout')) + } + self.logging.configure_logging(self.config) + self.gunicorn_options.update(self.logging.gunicorn_logging_options) self.cache_config = { 'cache.type': 'file', 'cache.data_dir': 'runtime/cache/', 'cache.lock_dir': 'runtime/cache/' } + if not os.path.exists(self.cache_config['cache.data_dir']): os.makedirs(self.cache_config['cache.data_dir']) if not os.path.exists(self.cache_config['cache.lock_dir']): @@ -77,75 +57,56 @@ def __init__(self): cache_parsed = parse_cache_config_options(self.cache_config) self.cache = CacheManager(**cache_parsed) - self.database = self.__connect_to_database() - self.spdx_db = self.__connect_to_database(include_spdx=True) + if offline_mode is False: + logger.debug("Running in online mode") + self.database, self.operations_database, self.spdx_database = self._connect_to_database() - self.metrics = Metrics(self) + self.metrics = Metrics(self) - def __connect_to_database(self, include_spdx=False): - user = self.read_config('Database', 'user') - host = self.read_config('Database', 'host') - port = self.read_config('Database', 'port') - dbname = self.read_config('Database', 'name') + def _connect_to_database(self): + logger.debug("Testing database connections") + user = self.config.get_value('Database', 'user') + host = self.config.get_value('Database', 'host') + port = self.config.get_value('Database', 'port') + dbname = self.config.get_value('Database', 'name') database_connection_string = 'postgresql://{}:{}@{}:{}/{}'.format( - user, self.read_config('Database', 'password'), host, port, dbname + user, self.config.get_value('Database', 'password'), host, port, dbname ) csearch_path_options = 'augur_data' - if include_spdx == True: - csearch_path_options += ',spdx' engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, connect_args={'options': f'-csearch_path={csearch_path_options}'}, pool_pre_ping=True) + csearch_path_options += ',spdx' + spdx_engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, + connect_args={'options': f'-csearch_path={csearch_path_options}'}, pool_pre_ping=True) + + helper_engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, + connect_args={'options': f'-csearch_path=augur_operations'}, pool_pre_ping=True) + try: - test_connection = engine.connect() - test_connection.close() - return engine + engine.connect().close() + helper_engine.connect().close() + spdx_engine.connect().close() + return engine, helper_engine, spdx_engine except s.exc.OperationalError as e: - logger.fatal(f"Unable to connect to the database. Terminating...") - exit() + logger.error("Unable to connect to the database. Terminating...") + raise(e) - def read_config(self, section, name=None): - """ - Read a variable in specified section of the config file, unless provided an environment variable + def shutdown(self): + if self.logging.stop_event is not None: + logger.debug("Stopping housekeeper logging listener...") + self.logging.stop_event.set() - :param section: location of given variable - :param name: name of variable - """ - if name is not None: - try: - value = self.config[section][name] - except KeyError as e: - value = default_config[section][name] - else: - try: - value = self.config[section] - except KeyError as e: - value = default_config[section] - - return value - - def load_env_configuration(self): - self.set_env_value(section='Database', name='key', environment_variable='AUGUR_GITHUB_API_KEY') - self.set_env_value(section='Database', name='host', environment_variable='AUGUR_DB_HOST') - self.set_env_value(section='Database', name='name', environment_variable='AUGUR_DB_NAME') - self.set_env_value(section='Database', name='port', environment_variable='AUGUR_DB_PORT') - self.set_env_value(section='Database', name='user', environment_variable='AUGUR_DB_USER') - self.set_env_value(section='Database', name='password', environment_variable='AUGUR_DB_PASSWORD') - self.set_env_value(section='Development', name='log_level', environment_variable='AUGUR_LOG_LEVEL') - - def set_env_value(self, section, name, environment_variable, sub_config=None): - """ - Sets names and values of specified config section according to their environment variables. - """ - # using sub_config lets us grab values from nested config blocks - if sub_config is None: - sub_config = self.config + if self.housekeeper is not None: + logger.debug("Shutting down housekeeper updates...") + self.housekeeper.shutdown_updates() + self.housekeeper = None - env_value = os.getenv(environment_variable) + if self.manager is not None: + logger.debug("Shutting down manager...") + self.manager.shutdown() + self.manager = None - if env_value is not None: - self.env_config[environment_variable] = env_value - sub_config[section][name] = env_value diff --git a/augur/cli/__init__.py b/augur/cli/__init__.py --- a/augur/cli/__init__.py +++ b/augur/cli/__init__.py @@ -0,0 +1,28 @@ +import click +from functools import update_wrapper + +from augur.application import Application +from augur.config import AugurConfig +from augur.logging import AugurLogging, ROOT_AUGUR_DIRECTORY + +def pass_application(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + ctx.obj = Application() + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def pass_config(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + ctx.obj = Application(offline_mode=True).config + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def pass_logs_dir(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + config = AugurConfig(ROOT_AUGUR_DIRECTORY) + ctx.obj = AugurLogging.get_log_directories(config, reset_logfiles=False) + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) \ No newline at end of file diff --git a/augur/runtime.py b/augur/cli/_multicommand.py similarity index 63% rename from augur/runtime.py rename to augur/cli/_multicommand.py --- a/augur/runtime.py +++ b/augur/cli/_multicommand.py @@ -6,16 +6,14 @@ import os import sys import click +import importlib import augur.application CONTEXT_SETTINGS = dict(auto_envvar_prefix='AUGUR') class AugurMultiCommand(click.MultiCommand): - def __commands_folder(self): - return os.path.abspath( - os.path.join(os.path.dirname(__file__), 'cli') - ) + return os.path.abspath(os.path.dirname(__file__)) def list_commands(self, ctx): rv = [] @@ -26,13 +24,8 @@ def list_commands(self, ctx): return rv def get_command(self, ctx, name): - # try: - if sys.version_info[0] == 2: - name = name.encode('ascii', 'replace') - mod = __import__('augur.cli.' + name, - None, None, ['cli']) - - return mod.cli + module = importlib.import_module('.' + name, 'augur.cli') + return module.cli @click.command(cls=AugurMultiCommand, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -40,11 +33,4 @@ def run(ctx): """ Augur is an application for open source community health analytics """ - - app = augur.application.Application() - ctx.obj = app - return ctx.obj - - -if __name__ == '__main__': - run() + return ctx diff --git a/augur/cli/configure.py b/augur/cli/configure.py --- a/augur/cli/configure.py +++ b/augur/cli/configure.py @@ -6,190 +6,13 @@ import os import click import json +import logging -from augur import logger +from augur.config import default_config, ENVVAR_PREFIX +from augur.cli import pass_config +from augur.logging import ROOT_AUGUR_DIRECTORY -ENVVAR_PREFIX = "AUGUR_" - -default_config = { - "Database": { - "name": "augur", - "host": "localhost", - "key": "key", - "password": "augur", - "port": 5432, - "user": "augur" - }, - "Housekeeper": { - "jobs": [ - { - "all_focused": 1, - "delay": 150000, - "given": [ - "github_url" - ], - "model": "issues", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "github_url" - ], - "model": "pull_request_commits", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "github_url" - ], - "model": "repo_info", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "repo_group" - ], - "model": "commits", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "github_url" - ], - "model": "pull_requests", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "contributors", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "insights", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "badges", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "value", - "repo_group_id": 0 - }, - { - "delay": 100000, - "given": [ - "github_url" - ], - "model": "pull_request_files", - "repo_group_id": 0 - } - ] - }, - "Workers": { - "facade_worker": { - "port": 50100, - "repo_directory": "repos/", - "switch": 1, - "workers": 1 - }, - "github_worker": { - "port": 50200, - "switch": 1, - "workers": 1 - }, - "insight_worker": { - "port": 50300, - "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"}, - "contamination": 0.041, - "switch": 0, - "workers": 1, - "training_days": 365, - "anomaly_days": 2 - }, - "linux_badge_worker": { - "port": 50400, - "switch": 1, - "workers": 1 - }, - "metric_status_worker": { - "port": 50500, - "switch": 0, - "workers": 1 - }, - "pull_request_worker": { - "port": 50600, - "switch": 1, - "workers": 1 - }, - "repo_info_worker": { - "port": 50700, - "switch": 1, - "workers": 1 - }, - "value_worker": { - "port": 50800, - "scc_bin": "scc", - "switch": 0, - "workers": 1 - }, - "contributor_worker": { - "port": 50900, - "switch": 1, - "workers": 1 - } - }, - "Facade": { - "check_updates": 1, - "clone_repos": 1, - "create_xlsx_summary_files": 1, - "delete_marked_repos": 0, - "fix_affiliations": 1, - "force_analysis": 1, - "force_invalidate_caches": 1, - "force_updates": 1, - "limited_run": 0, - "multithreaded": 0, - "nuke_stored_affiliations": 0, - "pull_repos": 1, - "rebuild_caches": 1, - "run_analysis": 1 - }, - "Server": { - "cache_expire": "3600", - "host": "0.0.0.0", - "port": "5000", - "workers": 4, - "timeout": 60 - }, - "Frontend": { - "host": "0.0.0.0", - "port": "5000" - }, - "Development": { - "log_level": "INFO" - } - } +logger = logging.getLogger(__name__) @click.group('configure', short_help='Generate an augur.config.json') def cli(): @@ -204,7 +27,9 @@ def cli(): @click.option('--github_api_key', help="GitHub API key for data collection from the GitHub API", envvar=ENVVAR_PREFIX + 'GITHUB_API_KEY') @click.option('--facade_repo_directory', help="Directory on the database server where Facade should clone repos", envvar=ENVVAR_PREFIX + 'FACADE_REPO_DIRECTORY') @click.option('--rc-config-file', help="File containing existing config whose values will be used as the defaults", type=click.Path(exists=True)) -def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file): [email protected]('--gitlab_api_key', help="GitLab API key for data collection from the GitLab API", envvar=ENVVAR_PREFIX + 'GITLAB_API_KEY') +@pass_config +def generate(augur_config, db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file, gitlab_api_key): """ Generate an augur.config.json """ @@ -250,11 +75,13 @@ def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, fa config['Database']['password'] = db_password if github_api_key is not None: config['Database']['key'] = github_api_key + if gitlab_api_key is not None: + config['Database']['gitlab_api_key'] = gitlab_api_key if facade_repo_directory is not None: config['Workers']['facade_worker']['repo_directory'] = facade_repo_directory try: - with open(os.path.abspath('augur.config.json'), 'w') as f: + with open(os.path.abspath(ROOT_AUGUR_DIRECTORY + '/augur.config.json'), 'w') as f: json.dump(config, f, indent=4) logger.info('augur.config.json successfully created') except Exception as e: diff --git a/augur/cli/db.py b/augur/cli/db.py --- a/augur/cli/db.py +++ b/augur/cli/db.py @@ -1,5 +1,6 @@ from os import walk, chdir, environ, chmod, path import os +import logging from sys import exit import stat from collections import OrderedDict @@ -12,7 +13,9 @@ import pandas as pd from sqlalchemy import exc -from augur import logger +from augur.cli import pass_config, pass_application + +logger = logging.getLogger(__name__) @click.group('db', short_help='Database utilities') def cli(): @@ -20,14 +23,12 @@ def cli(): @cli.command('add-repos') @click.argument('filename', type=click.Path(exists=True)) [email protected]_context -def add_repos(ctx, filename): +@pass_application +def add_repos(augur_app, filename): """ Add repositories to Augur's database """ - app = ctx.obj - - df = app.database.execute(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups")) + df = augur_app.database.execute(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups")) repo_group_IDs = [group[0] for group in df.fetchall()] insertSQL = s.sql.text(""" @@ -41,33 +42,29 @@ def add_repos(ctx, filename): for row in data: logger.info(f"Inserting repo with Git URL `{row[1]}` into repo group {row[0]}") if int(row[0]) in repo_group_IDs: - result = app.database.execute(insertSQL, repo_group_id=int(row[0]), repo_git=row[1]) + result = augur_app.database.execute(insertSQL, repo_group_id=int(row[0]), repo_git=row[1]) else: - logger.warn(f"Invalid repo group id specified for {row[1]}, skipping.") + logger.warning(f"Invalid repo group id specified for {row[1]}, skipping.") @cli.command('get-repo-groups') [email protected]_context -def get_repo_groups(ctx): +@pass_application +def get_repo_groups(augur_app): """ List all repo groups and their associated IDs """ - app = ctx.obj - - df = pd.read_sql(s.sql.text("SELECT repo_group_id, rg_name, rg_description FROM augur_data.repo_groups"), app.database) + df = pd.read_sql(s.sql.text("SELECT repo_group_id, rg_name, rg_description FROM augur_data.repo_groups"), augur_app.database) print(df) return df @cli.command('add-repo-groups') @click.argument('filename', type=click.Path(exists=True)) [email protected]_context -def add_repo_groups(ctx, filename): +@pass_application +def add_repo_groups(augur_app, filename): """ Create new repo groups in Augur's database """ - app = ctx.obj - - df = pd.read_sql(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups"), app.database) + df = pd.read_sql(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups"), augur_app.database) repo_group_IDs = df['repo_group_id'].values.tolist() insert_repo_group_sql = s.sql.text(""" @@ -80,51 +77,48 @@ def add_repo_groups(ctx, filename): logger.info(f"Inserting repo group with name {row[1]} and ID {row[0]}...") if int(row[0]) not in repo_group_IDs: repo_group_IDs.append(int(row[0])) - app.database.execute(insert_repo_group_sql, repo_group_id=int(row[0]), repo_group_name=row[1]) + augur_app.database.execute(insert_repo_group_sql, repo_group_id=int(row[0]), repo_group_name=row[1]) else: logger.info(f"Repo group with ID {row[1]} for repo group {row[1]} already exists, skipping...") @cli.command('update-repo-directory') @click.argument('repo_directory') [email protected]_context -def update_repo_directory(ctx, repo_directory): +@pass_application +def update_repo_directory(augur_app, repo_directory): """ Update Facade worker repo cloning directory """ - app = ctx.obj - updateRepoDirectorySQL = s.sql.text(""" UPDATE augur_data.settings SET VALUE = :repo_directory WHERE setting='repo_directory'; """) - app.database.execute(updateRepoDirectorySQL, repo_directory=repo_directory) + augur_app.database.execute(updateRepoDirectorySQL, repo_directory=repo_directory) logger.info(f"Updated Facade repo directory to: {repo_directory}") # get_db_version is a helper function to print_db_version and upgrade_db_version -def get_db_version(app): +def get_db_version(augur_app): db_version_sql = s.sql.text(""" SELECT * FROM augur_operations.augur_settings WHERE setting = 'augur_data_version' """) - return int(app.database.execute(db_version_sql).fetchone()[2]) + return int(augur_app.database.execute(db_version_sql).fetchone()[2]) @cli.command('print-db-version') [email protected]_context -def print_db_version(ctx): +@pass_application +def print_db_version(augur_app): """ Get the version of the configured database """ - print(get_db_version(ctx.obj)) + print(get_db_version(augur_app)) @cli.command('upgrade-db-version') [email protected]_context -def upgrade_db_version(ctx): +@pass_application +def upgrade_db_version(augur_app): """ Upgrade the configured database to the latest version """ - app = ctx.obj - check_pgpass_credentials(app.config) - current_db_version = get_db_version(app) + check_pgpass_credentials(augur_app.config.get_raw_config()) + current_db_version = get_db_version(augur_app) update_scripts_filenames = [] for (_, _, filenames) in walk('schema/generate'): @@ -143,23 +137,22 @@ def upgrade_db_version(ctx): if current_db_version == most_recent_version: logger.info("Your database is already up to date. ") elif current_db_version > most_recent_version: - logger.info(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") + logger.error(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") for target_version, script_location in target_version_script_map.items(): if target_version == current_db_version + 1: logger.info(f"Upgrading from {current_db_version} to {target_version}") - run_psql_command_in_database(app, '-f', f"schema/generate/{script_location}") + run_psql_command_in_database(augur_app, '-f', f"schema/generate/{script_location}") current_db_version += 1 @cli.command('check-for-upgrade') [email protected]_context -def check_for_upgrade(ctx): +@pass_application +def check_for_upgrade(augur_app): """ Upgrade the configured database to the latest version """ - app = ctx.obj - check_pgpass_credentials(app.config) - current_db_version = get_db_version(app) + check_pgpass_credentials(augur_app.config.get_raw_config()) + current_db_version = get_db_version(augur_app) update_scripts_filenames = [] for (_, _, filenames) in walk('schema/generate'): @@ -180,18 +173,17 @@ def check_for_upgrade(ctx): elif current_db_version < most_recent_version: logger.info(f"Current database version: v{current_db_version}\nPlease upgrade to the most recent version (v{most_recent_version}) with augur db upgrade-db-version.") elif current_db_version > most_recent_version: - logger.warn(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") + logger.error(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") @cli.command('create-schema') [email protected]_context -def create_schema(ctx): +@pass_application +def create_schema(augur_app): """ Create schema in the configured database """ - app = ctx.obj - check_pgpass_credentials(app.config) - run_psql_command_in_database(app, '-f', 'schema/create_schema.sql') + check_pgpass_credentials(augur_app.config.get_raw_config()) + run_psql_command_in_database(augur_app, '-f', 'schema/create_schema.sql') def generate_key(length): return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length)) @@ -202,46 +194,40 @@ def generate_api_key(ctx): """ Generate and set a new Augur API key """ - app = ctx.obj key = generate_key(32) ctx.invoke(update_api_key, api_key=key) print(key) @cli.command('update-api-key') @click.argument("api_key") [email protected]_context -def update_api_key(ctx, api_key): +@pass_application +def update_api_key(augur_app, api_key): """ Update the API key in the database to the given key """ - app = ctx.obj - update_api_key_sql = s.sql.text(""" UPDATE augur_operations.augur_settings SET VALUE = :api_key WHERE setting='augur_api_key'; """) - app.database.execute(update_api_key_sql, api_key=api_key) - logger.info(f"Update Augur API key to: {api_key}") + augur_app.database.execute(update_api_key_sql, api_key=api_key) + logger.info(f"Updated Augur API key to: {api_key}") @cli.command('get-api-key') [email protected]_context -def get_api_key(ctx): - app = ctx.obj - +@pass_application +def get_api_key(augur_app): get_api_key_sql = s.sql.text(""" SELECT value FROM augur_operations.augur_settings WHERE setting='augur_api_key'; """) try: - print(app.database.execute(get_api_key_sql).fetchone()[0]) + print(augur_app.database.execute(get_api_key_sql).fetchone()[0]) except TypeError: - logger.warn("No Augur API key found.") + logger.error("No Augur API key found.") @cli.command('check-pgpass', short_help="Check the ~/.pgpass file for Augur's database credentials") [email protected]_context -def check_pgpass(ctx): - app = ctx.obj - check_pgpass_credentials(app.config) +@pass_config +def check_pgpass(config): + check_pgpass_credentials(config.get_raw_config()) @cli.command('init-database') @click.option('--default-db-name', default='postgres') @@ -252,12 +238,10 @@ def check_pgpass(ctx): @click.option('--target-password', default='augur') @click.option('--host', default='localhost') @click.option('--port', default='5432') [email protected]_context -def init_database(ctx, default_db_name, default_user, default_password, target_db_name, target_user, target_password, host, port): +def init_database(default_db_name, default_user, default_password, target_db_name, target_user, target_password, host, port): """ Create database with the given credentials using the given maintenance database """ - app = ctx.obj config = { 'Database': { 'name': default_db_name, @@ -276,15 +260,15 @@ def init_database(ctx, default_db_name, default_user, default_password, target_d def run_db_creation_psql_command(host, port, user, name, command): call(['psql', '-h', host, '-p', port, '-U', user, '-d', name, '-a', '-w', '-c', command]) -def run_psql_command_in_database(app, target_type, target): +def run_psql_command_in_database(augur_app, target_type, target): if target_type not in ['-f', '-c']: - logger.fatal("Invalid target type. Exiting...") + logger.error("Invalid target type. Exiting...") exit(1) - call(['psql', '-h', app.read_config('Database', 'host'),\ - '-d', app.read_config('Database', 'name'),\ - '-U', app.read_config('Database', 'user'),\ - '-p', str(app.read_config('Database', 'port')),\ + call(['psql', '-h', augur_app.config.get_value('Database', 'host'),\ + '-d', augur_app.config.get_value('Database', 'name'),\ + '-U', augur_app.config.get_value('Database', 'user'),\ + '-p', str(augur_app.config.get_value('Database', 'port')),\ '-a', '-w', target_type, target ]) @@ -292,14 +276,14 @@ def check_pgpass_credentials(config): pgpass_file_path = environ['HOME'] + '/.pgpass' if not path.isfile(pgpass_file_path): - logger.debug("~/.pgpass does not exist, creating.") + logger.info("~/.pgpass does not exist, creating.") open(pgpass_file_path, 'w+') chmod(pgpass_file_path, stat.S_IWRITE | stat.S_IREAD) pgpass_file_mask = oct(os.stat(pgpass_file_path).st_mode & 0o777) if pgpass_file_mask != '0o600': - logger.debug("Updating ~/.pgpass file permissions.") + logger.info("Updating ~/.pgpass file permissions.") chmod(pgpass_file_path, stat.S_IWRITE | stat.S_IREAD) with open(pgpass_file_path, 'a+') as pgpass_file: diff --git a/augur/cli/logging.py b/augur/cli/logging.py new file mode 100644 --- /dev/null +++ b/augur/cli/logging.py @@ -0,0 +1,89 @@ +import click +import os +from os import walk + +from augur.cli import pass_logs_dir + [email protected]("logging", short_help="View Augur's log files") +def cli(): + pass + [email protected]("directory") +@pass_logs_dir +def directory(logs_dir): + """ + Print the location of Augur's logs directory + """ + print(logs_dir) + [email protected]("tail") [email protected]("lines", default=20) +@pass_logs_dir +def tail(logs_dir, lines): + """ + Output the last n lines of the main Augur and worker logfiles + """ + root_log_dir = logs_dir + worker_log_dir = logs_dir + "/workers/" + if lines is None: + lines = 20 + + files = [] + directories = [] + for (_, _, filenames) in walk(root_log_dir): + for file in filenames: + result = _tail(open(root_log_dir + "/" + file), lines) + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + break + + files = [] + directories = [] + for (dirpath, dirnames, filenames) in walk(worker_log_dir): + directories.extend(dirnames) + break + + for directory in directories: + specific_worker_log_dir = worker_log_dir + directory + for (_, _, filenames) in walk(specific_worker_log_dir): + files.extend(filenames) + + for file in [file for file in filenames if "collection" in file]: + result = _tail(open(specific_worker_log_dir + "/" + file), lines) + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + break + +def _tail(f, lines=20, _buffer=4098): + lines_found = [] + + # block counter will be multiplied by buffer + # to get the block size from the end + block_counter = -1 + + # loop until we find X lines + while len(lines_found) < lines: + try: + f.seek(block_counter * _buffer, os.SEEK_END) + except IOError: # either file is too small, or too many lines requested + f.seek(0) + lines_found = f.readlines() + break + + lines_found = f.readlines() + + # we found enough lines, get out + # Removed this line because it was redundant the while will catch + # it, I left it for history + # if len(lines_found) > lines: + # break + + # decrement the block counter to get the + # next X bytes + block_counter -= 1 + + return lines_found[-lines:] \ No newline at end of file diff --git a/augur/cli/run.py b/augur/cli/run.py --- a/augur/cli/run.py +++ b/augur/cli/run.py @@ -4,187 +4,138 @@ """ from copy import deepcopy -import os, time, atexit, subprocess, click +import os, time, atexit, subprocess, click, atexit, logging, sys import multiprocessing as mp import gunicorn.app.base -from gunicorn.six import iteritems from gunicorn.arbiter import Arbiter -from augur.housekeeper.housekeeper import Housekeeper -from augur import logger +from augur.housekeeper import Housekeeper from augur.server import Server - from augur.cli.util import kill_processes -import time +from augur.application import Application + +logger = logging.getLogger("augur") @click.command("run") @click.option("--disable-housekeeper", is_flag=True, default=False, help="Turns off the housekeeper") @click.option("--skip-cleanup", is_flag=True, default=False, help="Disables the old process cleanup that runs before Augur starts") [email protected]_context -def cli(ctx, disable_housekeeper, skip_cleanup): +def cli(disable_housekeeper, skip_cleanup): """ Start Augur's backend server """ + augur_app = Application() + logger.info("Augur application initialized") if not skip_cleanup: - logger.info("Cleaning up old Augur processes. Just a moment please...") - ctx.invoke(kill_processes) + logger.debug("Cleaning up old Augur processes...") + kill_processes() time.sleep(2) else: - logger.info("Skipping cleanup processes.") - - def get_process_id(name): - """Return process ids found by name or command - """ - child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False) - response = child.communicate()[0] - return [int(pid) for pid in response.split()] + logger.debug("Skipping process cleanup") - app = ctx.obj + master = initialize_components(augur_app, disable_housekeeper) + logger.info('Starting Gunicorn server in the background...') + logger.info('Housekeeper update process logs will now take over.') + Arbiter(master).run() - mp.set_start_method('forkserver', force=True) +def initialize_components(augur_app, disable_housekeeper): master = None - manager = None broker = None housekeeper = None - - logger.info("Booting broker and its manager...") - manager = mp.Manager() - broker = manager.dict() - - controller = app.read_config('Workers') - worker_pids = [] worker_processes = [] + mp.set_start_method('forkserver', force=True) if not disable_housekeeper: - if not controller: - return + logger.info("Booting manager") + manager = mp.Manager() + + logger.info("Booting broker") + broker = manager.dict() + + housekeeper = Housekeeper(broker=broker, augur_app=augur_app) + + controller = augur_app.config.get_section('Workers') + for worker in controller.keys(): - if not controller[worker]['switch']: - continue - logger.info("Your config has the option set to automatically boot {} instances of the {}".format(controller[worker]['workers'], worker)) - pids = get_process_id("/bin/sh -c cd workers/{} && {}_start".format(worker, worker)) - worker_pids += pids - if len(pids) > 0: - worker_pids.append(pids[0] + 1) - pids.append(pids[0] + 1) - logger.info("Found and preparing to kill previous {} worker pids: {}".format(worker,pids)) - for pid in pids: - try: - os.kill(pid, 9) - except: - logger.info("Worker process {} already killed".format(pid)) - - @atexit.register - def exit(): - try: - for pid in worker_pids: - os.kill(pid, 9) - except: - logger.info("Worker process {} already killed".format(pid)) - for process in worker_processes: - logger.info("Shutting down worker process with pid: {} ...".format(process.pid)) - process.terminate() + if controller[worker]['switch']: + for i in range(controller[worker]['workers']): + logger.info("Booting {} #{}".format(worker, i + 1)) + worker_process = mp.Process(target=worker_start, kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True) + worker_processes.append(worker_process) + worker_process.start() - if master is not None: - master.halt() - logger.info("Shutting down housekeeper updates...") - if housekeeper is not None: - housekeeper.shutdown_updates() - - # if hasattr(manager, "shutdown"): - # wait for the spawner and the worker threads to go down - # - if manager is not None: - manager.shutdown() - # check if it is still alive and kill it if necessary - # if manager._process.is_alive(): - manager._process.terminate() - - # Prevent multiprocessing's atexit from conflicting with gunicorn - logger.info("Killing main augur process with PID: {}".format(os.getpid())) - os.kill(os.getpid(), 9) - os._exit(0) + augur_app.manager = manager + augur_app.broker = broker + augur_app.housekeeper = housekeeper - if not disable_housekeeper: - logger.info("Booting housekeeper...") - jobs = deepcopy(app.read_config('Housekeeper', 'jobs')) - try: - housekeeper = Housekeeper( - jobs, - broker, - broker_host=app.read_config('Server', 'host'), - broker_port=app.read_config('Server', 'port'), - user=app.read_config('Database', 'user'), - password=app.read_config('Database', 'password'), - host=app.read_config('Database', 'host'), - port=app.read_config('Database', 'port'), - dbname=app.read_config('Database', 'name') - ) - except KeyboardInterrupt as e: - exit() - - logger.info("Housekeeper has finished booting.") - - if controller: - for worker in controller.keys(): - if controller[worker]['switch']: - for i in range(controller[worker]['workers']): - logger.info("Booting {} #{}".format(worker, i + 1)) - worker_process = mp.Process(target=worker_start, kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True) - worker_process.start() - worker_processes.append(worker_process) - - host = app.read_config('Server', 'host') - port = app.read_config('Server', 'port') - workers = int(app.read_config('Server', 'workers')) - timeout = int(app.read_config('Server', 'timeout')) - options = { - 'bind': '%s:%s' % (host, port), - 'workers': workers, - 'accesslog': '-', - 'access_log_format': '%(h)s - %(t)s - %(r)s', - 'timeout': timeout - } - logger.info('Starting server...') - master = Arbiter(AugurGunicornApp(options, manager=manager, broker=broker, housekeeper=housekeeper)).run() + atexit._clear() + atexit.register(exit, augur_app, worker_processes, master) + return AugurGunicornApp(augur_app.gunicorn_options, augur_app=augur_app) def worker_start(worker_name=None, instance_number=0, worker_port=None): - time.sleep(120 * instance_number) - destination = subprocess.DEVNULL try: - destination = open("workers/{}/worker_{}.log".format(worker_name, worker_port), "a+") - except IOError as e: - logger.error("Error opening log file for auto-started worker {}: {}".format(worker_name, e)) - process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT) - logger.info("{} booted.".format(worker_name)) + time.sleep(30 * instance_number) + destination = subprocess.DEVNULL + process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT) + logger.info("{} #{} booted.".format(worker_name,instance_number+1)) + except KeyboardInterrupt as e: + pass + +def exit(augur_app, worker_processes, master): + + logger.info("Shutdown started for this Gunicorn worker...") + augur_app.shutdown() + + if worker_processes: + for process in worker_processes: + logger.debug("Shutting down worker process with pid: {}...".format(process.pid)) + process.terminate() + + if master is not None: + logger.debug("Shutting down Gunicorn server") + master.halt() + master = None + + logger.info("Shutdown complete") class AugurGunicornApp(gunicorn.app.base.BaseApplication): """ Loads configurations, initializes Gunicorn, loads server """ - def __init__(self, options=None, manager=None, broker=None, housekeeper=None): - self.options = options or {} - self.manager = manager - self.broker = broker - self.housekeeper = housekeeper + def __init__(self, options={}, augur_app=None): + self.options = options + self.augur_app = augur_app + self.manager = self.augur_app.manager + self.broker = self.augur_app.broker + self.housekeeper = self.augur_app.housekeeper + self.server = None + logger.debug(f"Gunicorn will start {self.options['workers']} worker processes") super(AugurGunicornApp, self).__init__() - # self.cfg.pre_request.set(pre_request) def load_config(self): """ Sets the values for configurations """ - config = dict([(key, value) for key, value in iteritems(self.options) - if key in self.cfg.settings and value is not None]) - for key, value in iteritems(config): + config = {key: value for key, value in self.options.items() + if key in self.cfg.settings and value is not None} + for key, value in config.items(): self.cfg.set(key.lower(), value) - def load(self): + def get_augur_app(self): """ Returns the loaded server """ - server = Server(manager=self.manager, broker=self.broker, housekeeper=self.housekeeper) - return server.app + self.load() + return self.server.augur_app + def load(self): + """ + Returns the loaded server + """ + if self.server is None: + try: + self.server = Server(augur_app=self.augur_app) + except Exception as e: + logger.error(f"An error occured when Gunicorn tried to load the server: {e}") + return self.server.app diff --git a/augur/cli/util.py b/augur/cli/util.py --- a/augur/cli/util.py +++ b/augur/cli/util.py @@ -5,6 +5,7 @@ import os import signal +import logging from subprocess import call, run import psutil @@ -12,27 +13,27 @@ import pandas as pd import sqlalchemy as s -from augur import logger -from augur.cli.configure import default_config +from augur.cli import pass_config, pass_application + +logger = logging.getLogger(__name__) @click.group('util', short_help='Miscellaneous utilities') def cli(): pass @cli.command('export-env') [email protected]_context -def export_env(ctx): +@pass_config +def export_env(config): """ Exports your GitHub key and database credentials """ - app = ctx.obj export_file = open(os.getenv('AUGUR_EXPORT_FILE', 'augur_export_env.sh'), 'w+') export_file.write('#!/bin/bash') export_file.write('\n') env_file = open(os.getenv('AUGUR_ENV_FILE', 'docker_env.txt'), 'w+') - for env_var in app.env_config.items(): + for env_var in config.get_env_config().items(): export_file.write('export ' + env_var[0] + '="' + str(env_var[1]) + '"\n') env_file.write(env_var[0] + '=' + str(env_var[1]) + '\n') @@ -40,8 +41,7 @@ def export_env(ctx): env_file.close() @cli.command('kill') [email protected]_context -def kill_processes(ctx): +def cli_kill_processes(): """ Terminates all currently running backend Augur processes, including any workers. Will only work in a virtual environment. """ @@ -56,8 +56,22 @@ def kill_processes(ctx): except psutil.NoSuchProcess as e: pass +def kill_processes(): + logger = logging.getLogger("augur") + processes = get_augur_processes() + if processes != []: + for process in processes: + if process.pid != os.getpid(): + # logger.info(f"Killing {process.pid}: {' '.join(process.info['cmdline'][1:])}") + logger.info(f"Killing process {process.pid}") + try: + process.send_signal(signal.SIGTERM) + except psutil.NoSuchProcess as e: + pass + @cli.command('list',) -def list_processes(): +@pass_config +def list_processes(config): """ Outputs the name and process ID (PID) of all currently running backend Augur processes, including any workers. Will only work in a virtual environment. """ @@ -78,13 +92,11 @@ def get_augur_processes(): return processes @cli.command('repo-reset') [email protected]_context -def repo_reset(ctx): +@pass_application +def repo_reset(augur_app): """ Refresh repo collection to force data collection """ - app = ctx.obj - - app.database.execute("UPDATE augur_data.repo SET repo_path = NULL, repo_name = NULL, repo_status = 'New'; TRUNCATE augur_data.commits CASCADE; ") + augur_app.database.execute("UPDATE augur_data.repo SET repo_path = NULL, repo_name = NULL, repo_status = 'New'; TRUNCATE augur_data.commits CASCADE; ") logger.info("Repos successfully reset") diff --git a/augur/config.py b/augur/config.py new file mode 100644 --- /dev/null +++ b/augur/config.py @@ -0,0 +1,344 @@ +import os +import json +import logging + +ENVVAR_PREFIX = "AUGUR_" + +default_config = { + "version": 1, + "Database": { + "name": "augur", + "host": "localhost", + "key": "key", + "password": "augur", + "port": 5432, + "user": "augur", + "gitlab_api_key":"gitlab_api_key" + }, + "Housekeeper": { + "jobs": [ + { + "all_focused": 1, + "delay": 150000, + "given": [ + "github_url" + ], + "model": "issues", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "github_url" + ], + "model": "pull_request_commits", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "github_url" + ], + "model": "repo_info", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "repo_group" + ], + "model": "commits", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "github_url" + ], + "model": "pull_requests", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "contributors", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "insights", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "badges", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "value", + "repo_group_id": 0 + }, + { + "delay": 100000, + "given": [ + "github_url" + ], + "model": "pull_request_files", + "repo_group_id": 0 + }, + { + "delay": 100000, + "given": [ + "github_url" + ], + "model": "releases", + "repo_group_id": 0 + } + ] + }, + "Workers": { + "facade_worker": { + "port": 50100, + "repo_directory": "repos/", + "switch": 1, + "workers": 1 + }, + "github_worker": { + "port": 50200, + "switch": 1, + "workers": 1 + }, + "insight_worker": { + "port": 50300, + "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", + "reviews": "pull_requests", "contributors-new": "new_contributors"}, + "confidence_interval": 95, + "contamination": 0.041, + "switch": 0, + "workers": 1, + "training_days": 365, + "anomaly_days": 2 + }, + "linux_badge_worker": { + "port": 50400, + "switch": 1, + "workers": 1 + }, + "metric_status_worker": { + "port": 50500, + "switch": 0, + "workers": 1 + }, + "pull_request_worker": { + "port": 50600, + "switch": 1, + "workers": 1 + }, + "repo_info_worker": { + "port": 50700, + "switch": 1, + "workers": 1 + }, + "value_worker": { + "port": 50800, + "scc_bin": "scc", + "switch": 0, + "workers": 1 + }, + "contributor_worker": { + "port": 50900, + "switch": 1, + "workers": 1 + }, + "gitlab_issues_worker": { + "port": 51000, + "switch": 1, + "workers": 1 + }, + "release_worker": { + "port": 51100, + "switch": 1, + "workers": 1 + } + }, + "Facade": { + "check_updates": 1, + "clone_repos": 1, + "create_xlsx_summary_files": 1, + "delete_marked_repos": 0, + "fix_affiliations": 1, + "force_analysis": 1, + "force_invalidate_caches": 1, + "force_updates": 1, + "limited_run": 0, + "multithreaded": 0, + "nuke_stored_affiliations": 0, + "pull_repos": 1, + "rebuild_caches": 1, + "run_analysis": 1 + }, + "Server": { + "cache_expire": "3600", + "host": "0.0.0.0", + "port": "5000", + "workers": 4, + "timeout": 60 + }, + "Frontend": { + "host": "0.0.0.0", + "port": "5000" + }, + "Logging": { + "logs_directory": "logs/", + "log_level": "INFO", + "verbose": 0, + "quiet": 0, + "debug": 0 + } + } + +logger = logging.getLogger(__name__) + +class AugurConfig(): + """docstring for AugurConfig""" + def __init__(self, root_augur_dir, given_config={}): + self._default_config_file_name = 'augur.config.json' + self._root_augur_dir = root_augur_dir + self._default_config = default_config + self._env_config = {} + self.load_config() + self.version = self.get_version() + self._config.update(given_config) + + def get_section(self, section_name): + try: + return self._config[section_name] + except KeyError as e: + if not self.using_default_config: + logger.warn(f"{section_name} not found in loaded config. Checking default config") + try: + return self._default_config[section_name] + except KeyError as e: + logger.error(f"No defaults found for {section_name}") + raise(e) + else: + logger.debug(f"Already using default config, skipping check for {section_name}") + + def get_version(self): + try: + return self._config["version"] + except KeyError as e: + logger.warning("No config version found. Setting version to 0.") + return 0 + + def get_value(self, section_name, value): + try: + return self._config[section_name][value] + except KeyError as e: + if not self.using_default_config: + logger.warn(f"{section_name}:{value} not found in loaded config. Checking default config") + try: + return self._default_config[section_name][value] + except KeyError as e: + logger.error(f"No defaults found for {section_name}:{value}") + raise(e) + else: + logger.debug(f"Already using default config, skipping check for {section_name}:{value}") + + def load_config(self): + self._config = None + self.using_default_config = False + + logger.info("Attempting to load config file") + try: + config_file_path = self.discover_config_file() + try: + with open(config_file_path, 'r+') as config_file_handle: + self._config = json.loads(config_file_handle.read()) + logger.info("Config file loaded successfully") + except json.decoder.JSONDecodeError as e: + logger.warning("Unable to parse config. Using default configuration") + self.using_default_config = True + self._config = default_config + except AugurConfigFileNotFoundException as e: + logger.warning("Config file not found. Using default configuration") + self.using_default_config = True + self._config = default_config + + self.load_env_configuration() + + def discover_config_file(self): + default_config_path = self._root_augur_dir + '/' + self._default_config_file_name + config_file_path = None + + config_locations = [self._default_config_file_name, default_config_path + , f"/opt/augur/{self._default_config_file_name}"] + if os.getenv('AUGUR_CONFIG_FILE', None) is not None: + config_file_path = os.getenv('AUGUR_CONFIG_FILE') + else: + for location in config_locations: + try: + f = open(location, "r+") + config_file_path = os.path.abspath(location) + f.close() + break + except FileNotFoundError: + pass + if config_file_path: + return config_file_path + else: + raise(AugurConfigFileNotFoundException(message=f"{self._default_config_file_name} not found", errors=None)) + + def load_env_configuration(self): + self.set_env_value(section='Database', name='key', environment_variable='AUGUR_GITHUB_API_KEY') + self.set_env_value(section='Database', name='host', environment_variable='AUGUR_DB_HOST') + self.set_env_value(section='Database', name='name', environment_variable='AUGUR_DB_NAME') + self.set_env_value(section='Database', name='port', environment_variable='AUGUR_DB_PORT') + self.set_env_value(section='Database', name='user', environment_variable='AUGUR_DB_USER') + self.set_env_value(section='Database', name='password', environment_variable='AUGUR_DB_PASSWORD') + self.set_env_value(section='Logging', name='log_level', environment_variable='AUGUR_LOG_LEVEL') + self.set_env_value(section='Logging', name='quiet', environment_variable='AUGUR_LOG_QUIET') + self.set_env_value(section='Logging', name='debug', environment_variable='AUGUR_LOG_DEBUG') + self.set_env_value(section='Logging', name='verbose', environment_variable='AUGUR_LOG_VERBOSE') + + def set_env_value(self, section, name, environment_variable, sub_config=None): + """ + Sets names and values of specified config section according to their environment variables. + """ + # using sub_config lets us grab values from nested config blocks + if sub_config is None: + sub_config = self._config + + env_value = os.getenv(environment_variable) + + if env_value is not None: + self._env_config[environment_variable] = env_value + sub_config[section][name] = env_value + # logger.info(f"{section}:[\"{name}\"] set to {env_value} by: {environment_variable}") + else: + self._env_config[environment_variable] = self.get_value(section, name) + + def get_raw_config(self): + return self._config + + def get_default_config(self): + return self._default_config + + def get_env_config(self): + return self._env_config + +class AugurConfigFileNotFoundException(Exception): + def __init__(self, message, errors): + super().__init__(message) diff --git a/augur/housekeeper/housekeeper.py b/augur/housekeeper.py similarity index 81% rename from augur/housekeeper/housekeeper.py rename to augur/housekeeper.py --- a/augur/housekeeper/housekeeper.py +++ b/augur/housekeeper.py @@ -1,69 +1,85 @@ """ Keeps data up to date """ +import coloredlogs +from copy import deepcopy import logging, os, time, requests -from multiprocessing import Process +import logging.config +from multiprocessing import Process, get_start_method from sqlalchemy.ext.automap import automap_base import sqlalchemy as s import pandas as pd from sqlalchemy import MetaData -logging.basicConfig(filename='housekeeper.log') + +from augur.logging import AugurLogging + +import warnings +warnings.filterwarnings('ignore') + +logger = logging.getLogger(__name__) class Housekeeper: - def __init__(self, jobs, broker, broker_host, broker_port, user, password, host, port, dbname): + def __init__(self, broker, augur_app): + logger.info("Booting housekeeper") - self.broker_host = broker_host - self.broker_port = broker_port + self._processes = [] + self.augur_logging = augur_app.logging + self.jobs = deepcopy(augur_app.config.get_value("Housekeeper", "jobs")) + self.broker_host = augur_app.config.get_value("Server", "host") + self.broker_port = augur_app.config.get_value("Server", "port") self.broker = broker - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - user, password, host, port, dbname - ) - - dbschema='augur_data' - self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) + self.db = augur_app.database + self.helper_db = augur_app.operations_database helper_metadata = MetaData() helper_metadata.reflect(self.helper_db, only=['worker_job']) HelperBase = automap_base(metadata=helper_metadata) HelperBase.prepare() - self.job_table = HelperBase.classes.worker_job.__table__ repoUrlSQL = s.sql.text(""" SELECT repo_git FROM repo """) - rs = pd.read_sql(repoUrlSQL, self.db, params={}) - all_repos = rs['repo_git'].values.tolist() # List of tasks that need periodic updates - self.__updatable = self.prep_jobs(jobs) + self.schedule_updates() + + def schedule_updates(self): + """ + Starts update processes + """ + self.prep_jobs() + self.augur_logging.initialize_housekeeper_logging_listener() + logger.info("Scheduling update processes") + for job in self.jobs: + process = Process(target=self.updater_process, name=job["model"], args=(self.broker_host, self.broker_port, self.broker, job, (self.augur_logging.housekeeper_job_config, self.augur_logging.get_config()))) + self._processes.append(process) + process.start() - self.__processes = [] - self.__updater() @staticmethod - def updater_process(broker_host, broker_port, broker, job): + def updater_process(broker_host, broker_port, broker, job, logging_config): """ Controls a given plugin's update process - :param name: name of object to be updated - :param delay: time needed to update - :param shared: shared object that is to also be updated + """ - + logging.config.dictConfig(logging_config[0]) + logger = logging.getLogger(f"augur.jobs.{job['model']}") + coloredlogs.install(level=logging_config[1]["log_level"], logger=logger, fmt=logging_config[1]["format_string"]) + + if logging_config[1]["quiet"]: + logger.disabled + if 'repo_group_id' in job: repo_group_id = job['repo_group_id'] - logging.info('Housekeeper spawned {} model updater process for repo group id {} with PID {}\n'.format(job['model'], repo_group_id, os.getpid())) + logger.info('Housekeeper spawned {} model updater process for repo group id {}'.format(job['model'], repo_group_id)) else: repo_group_id = None - logging.info('Housekeeper spawned {} model updater process for repo ids {} with PID {}\n'.format(job['model'], job['repo_ids'], os.getpid())) + logger.info('Housekeeper spawned {} model updater process for repo ids {}'.format(job['model'], job['repo_ids'])) try: compatible_worker_found = False @@ -76,10 +92,10 @@ def updater_process(broker_host, broker_port, broker, job): time.sleep(3) continue - logging.info("Housekeeper recognized that the broker has a worker that " + - "can handle the {} model... beginning to distribute maintained tasks\n".format(job['model'])) + logger.info("Housekeeper recognized that the broker has a worker that " + + "can handle the {} model... beginning to distribute maintained tasks".format(job['model'])) while True: - logging.info('Housekeeper updating {} model with given {}...\n'.format( + logger.info('Housekeeper updating {} model with given {}...'.format( job['model'], job['given'][0])) if job['given'][0] == 'git_url' or job['given'][0] == 'github_url': @@ -100,9 +116,9 @@ def updater_process(broker_host, broker_port, broker, job): requests.post('http://{}:{}/api/unstable/task'.format( broker_host,broker_port), json=task, timeout=10) except Exception as e: - logging.info("Error encountered: {}\n".format(e)) + logger.error("Error encountered: {}".format(e)) - logging.info(task) + logger.debug(task) time.sleep(15) @@ -119,61 +135,33 @@ def updater_process(broker_host, broker_port, broker, job): requests.post('http://{}:{}/api/unstable/task'.format( broker_host,broker_port), json=task, timeout=10) except Exception as e: - logging.info("Error encountered: {}\n".format(e)) + logger.error("Error encountered: {}".format(e)) - logging.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)\n".format(len(job['repos']))) + logger.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)".format(len(job['repos']))) time.sleep(job['delay']) - - except KeyboardInterrupt: - os.kill(os.getpid(), 9) - os._exit(0) - except: - raise - def __updater(self, jobs=None): - """ - Starts update processes - """ - logging.info("Starting update processes...") - if jobs is None: - jobs = self.__updatable - for job in jobs: - up = Process(target=self.updater_process, args=(self.broker_host, self.broker_port, self.broker, job), daemon=True) - up.start() - self.__processes.append(up) - - def update_all(self): - """ - Updates all plugins - """ - for updatable in self.__updatable: - updatable['update']() - - def schedule_updates(self): - """ - Schedules updates - """ - # don't use this, - logging.debug('Scheduling updates...') - self.__updater() + except KeyboardInterrupt as e: + pass def join_updates(self): """ Join to the update processes """ - for process in self.__processes: + for process in self._processes: + logger.debug(f"Joining {process.name} update process") process.join() def shutdown_updates(self): """ Ends all running update processes """ - for process in self.__processes: + for process in self._processes: + # logger.debug(f"Terminating {process.name} update process") process.terminate() - def prep_jobs(self, jobs): - - for job in jobs: + def prep_jobs(self): + logger.info("Preparing housekeeper jobs") + for job in self.jobs: if 'repo_group_id' in job or 'repo_ids' in job: # If RG id is 0 then it just means to query all repos where_and = 'AND' if job['model'] == 'issues' and 'repo_group_id' in job else 'WHERE' @@ -269,7 +257,7 @@ def prep_jobs(self, jobs): reorganized_repos = pd.read_sql(repo_url_sql, self.db, params={}) if len(reorganized_repos) == 0: - logging.info("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql)) + logger.warning("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql)) job['repos'] = [] continue @@ -290,7 +278,7 @@ def prep_jobs(self, jobs): 'oauth_id': 0 } result = self.helper_db.execute(self.job_table.insert().values(job_tuple)) - logging.info("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple)) + logger.debug("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple)) # If a last id is not recorded, start from beginning of repos # (first id is not necessarily 0) @@ -347,5 +335,3 @@ def prep_jobs(self, jobs): job['repos'] = rs # time.sleep(120) - return jobs - diff --git a/augur/logging.py b/augur/logging.py new file mode 100644 --- /dev/null +++ b/augur/logging.py @@ -0,0 +1,305 @@ +import logging +import logging.config +import logging.handlers +from logging import FileHandler, StreamHandler, Formatter +from multiprocessing import Process, Queue, Event, current_process +from time import sleep +import os +from pathlib import Path +import atexit +import shutil +import coloredlogs +from copy import deepcopy + +from augur import ROOT_AUGUR_DIRECTORY + +logger = logging.getLogger(__name__) + +class AugurLogging(): + + simple_format_string = "[%(process)d] %(name)s [%(levelname)s] %(message)s" + verbose_format_string = "%(asctime)s,%(msecs)dms [PID: %(process)d] %(name)s [%(levelname)s] %(message)s" + cli_format_string = "CLI: [%(module)s.%(funcName)s] [%(levelname)s] %(message)s" + config_format_string = "[%(levelname)s] %(message)s" + error_format_string = "%(asctime)s [PID: %(process)d] %(name)s [%(funcName)s() in %(filename)s:L%(lineno)d] %(levelname)s: %(message)s" + + @staticmethod + def get_log_directories(augur_config, reset_logfiles=True): + LOGS_DIRECTORY = augur_config.get_value("Logging", "logs_directory") + + if LOGS_DIRECTORY[0] != "/": + LOGS_DIRECTORY = ROOT_AUGUR_DIRECTORY + "/" + LOGS_DIRECTORY + + if LOGS_DIRECTORY[-1] != "/": + LOGS_DIRECTORY += "/" + + if reset_logfiles is True: + try: + shutil.rmtree(LOGS_DIRECTORY) + except FileNotFoundError as e: + pass + + Path(LOGS_DIRECTORY).mkdir(exist_ok=True) + + return LOGS_DIRECTORY + + def __init__(self, disable_logs=False, reset_logfiles=True): + self.stop_event = None + self.LOGS_DIRECTORY = None + self.WORKER_LOGS_DIRECTORY = None + self.LOG_LEVEL = None + self.VERBOSE = None + self.QUIET = None + self.DEGBUG = None + + self.logfile_config = None + self.housekeeper_job_config = None + + self._reset_logfiles = reset_logfiles + + self.formatters = { + "simple": { + "class": "logging.Formatter", + "format": AugurLogging.simple_format_string + }, + "verbose": { + "class": "logging.Formatter", + "format": AugurLogging.verbose_format_string + }, + "cli": { + "class": "logging.Formatter", + "format": AugurLogging.cli_format_string + }, + "config": { + "class": "logging.Formatter", + "format": AugurLogging.config_format_string + }, + "error": { + "class": "logging.Formatter", + "format": AugurLogging.error_format_string + } + } + + self._configure_cli_logger() + + level = logging.INFO + config_handler = StreamHandler() + config_handler.setFormatter(Formatter(fmt=AugurLogging.config_format_string)) + config_handler.setLevel(level) + + config_initialization_logger = logging.getLogger("augur.config") + config_initialization_logger.setLevel(level) + config_initialization_logger.handlers = [] + config_initialization_logger.addHandler(config_handler) + config_initialization_logger.propagate = False + + coloredlogs.install(level=level, logger=config_initialization_logger, fmt=AugurLogging.config_format_string) + + if disable_logs: + self._disable_all_logging() + + + def _disable_all_logging(self): + for logger in ["augur", "augur.application", "augur.housekeeper", "augur.config", "augur.cli", "root"]: + lg = logging.getLogger(logger) + lg.disabled = True + + def _configure_cli_logger(self): + cli_handler = StreamHandler() + cli_handler.setLevel(logging.INFO) + + cli_logger = logging.getLogger("augur.cli") + cli_logger.setLevel(logging.INFO) + cli_logger.handlers = [] + cli_logger.addHandler(cli_handler) + cli_logger.propagate = False + + coloredlogs.install(level=logging.INFO, logger=cli_logger, fmt=AugurLogging.cli_format_string) + + def _set_config(self, augur_config): + self.LOGS_DIRECTORY = AugurLogging.get_log_directories(augur_config, self._reset_logfiles) + self.LOG_LEVEL = augur_config.get_value("Logging", "log_level") + self.QUIET = int(augur_config.get_value("Logging", "quiet")) + self.DEBUG = int(augur_config.get_value("Logging", "debug")) + self.VERBOSE = int(augur_config.get_value("Logging", "verbose")) + # self.JOB_NAMES = [job["model"] for job in deepcopy(augur_config.get_value("Housekeeper", "jobs"))] + + if self.QUIET: + self._disable_all_logging() + + if self.DEBUG: + self.LOG_LEVEL = "DEBUG" + self.VERBOSE = True + + if self.VERBOSE: + self.FORMATTER = "verbose" + else: + self.FORMATTER = "simple" + self.format_string = self.formatters[self.FORMATTER]["format"] + + def configure_logging(self, augur_config): + self._set_config(augur_config) + self._configure_logfiles() + self._configure_cli_logger() + self._configure_gunicorn_logging() + logger.debug("Loggers are fully configured") + + def _configure_logfiles(self): + self.logfile_config = { + "version": 1, + "disable_existing_loggers": True, + "formatters": self.formatters, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "formatter": self.FORMATTER, + "level": self.LOG_LEVEL + }, + "logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "augur.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "errorfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "augur.err", + "mode": "a", + "level": logging.WARNING, + "formatter": "error" + }, + "server_logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "gunicorn.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "housekeeper_logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "housekeeper.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "housekeeper_errorfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "housekeeper.err", + "mode": "a", + "level": logging.WARNING, + "formatter": "error", + }, + }, + "loggers": { + "augur": { + "handlers": ["console", "logfile", "errorfile"], + "level": self.LOG_LEVEL + }, + "augur.server": { + "handlers": ["server_logfile"], + "level": self.LOG_LEVEL, + "propagate": False + }, + "augur.housekeeper": { + "handlers": ["housekeeper_logfile", "housekeeper_errorfile"], + "level": self.LOG_LEVEL, + }, + "augur.jobs": { + "handlers": ["housekeeper_logfile", "housekeeper_errorfile", "logfile", "errorfile"], + "level": self.LOG_LEVEL, + "propagate": False + } + }, + "root": { + "handlers": [], + "level": self.LOG_LEVEL + } + } + + logging.config.dictConfig(self.logfile_config) + for logger_name in ["augur", "augur.housekeeper", "augur.jobs"]: + coloredlogs.install(logger=logging.getLogger(logger_name), level=self.LOG_LEVEL, fmt=self.format_string) + + logger.debug("Logfiles initialized") + logger.debug("Logs will be written to: " + self.LOGS_DIRECTORY) + + def initialize_housekeeper_logging_listener(self): + queue = Queue() + self.housekeeper_job_config = { + "version": 1, + "disable_existing_loggers": True, + "formatters": self.formatters, + "handlers": { + "queue": { + "class": "logging.handlers.QueueHandler", + "queue": queue + } + }, + "root": { + "handlers": ["queue"], + "level": self.LOG_LEVEL + } + } + + stop_event = Event() + self.lp = Process(target=logging_listener_process, name='listener', + args=(queue, stop_event, self.logfile_config)) + self.lp.start() + sleep(2) # just to let it fully start up + self.stop_event = stop_event + logger.debug("Houseekeeper logging listener initialized") + + def get_config(self): + return { + "log_level": self.LOG_LEVEL, + "quiet": self.QUIET, + "verbose": self.VERBOSE, + "debug": self.DEBUG, + "format_string": self.format_string + } + + def _configure_gunicorn_logging(self): + gunicorn_log_file = self.LOGS_DIRECTORY + "gunicorn.log" + self.gunicorn_logging_options = { + "errorlog": gunicorn_log_file, + "accesslog": gunicorn_log_file, + "loglevel": self.LOG_LEVEL, + "capture_output": False + } + +def logging_listener_process(queue, stop_event, config): + """ + This could be done in the main process, but is just done in a separate + process for illustrative purposes. + + This initialises logging according to the specified configuration, + starts the listener and waits for the main process to signal completion + via the event. The listener is then stopped, and the process exits. + """ + logging.config.dictConfig(config) + listener = logging.handlers.QueueListener(queue, AugurLoggingHandler()) + listener.start() + try: + stop_event.wait() + except KeyboardInterrupt: + pass + finally: + listener.stop() + +class AugurLoggingHandler: + """ + A simple handler for logging events. It runs in the listener process and + dispatches events to loggers based on the name in the received record, + which then get dispatched, by the logging system, to the handlers + configured for those loggers. + """ + + def handle(self, record): + if record.name == "root": + logger = logging.getLogger() + else: + logger = logging.getLogger(record.name) + + record.processName = '%s (for %s)' % (current_process().name, record.processName) + logger.handle(record) diff --git a/augur/metrics/__init__.py b/augur/metrics/__init__.py --- a/augur/metrics/__init__.py +++ b/augur/metrics/__init__.py @@ -1 +1,38 @@ -from .metrics import Metrics \ No newline at end of file +import os +import glob +import sys +import inspect +import types +import importlib +import logging + +logger = logging.getLogger(__name__) + +class Metrics(): + def __init__(self, app): + logger.debug("Loading metrics") + self.database = app.database + self.spdx_db = app.spdx_database + + self.models = [] #TODO: standardize this + for filename in glob.iglob("augur/metrics/**"): + file_id = get_file_id(filename) + if not file_id.startswith('__') and filename.endswith('.py') and file_id != "metrics": + self.models.append(file_id) + + for model in self.models: + importlib.import_module(f"augur.metrics.{model}") + add_metrics(self, f"augur.metrics.{model}") + +def get_file_id(path): + return os.path.splitext(os.path.basename(path))[0] + +def add_metrics(metrics, module_name): + # find all unbound endpoint functions objects + # (ones that have metadata) defined the given module_name + # and bind them to the metrics class + for name, obj in inspect.getmembers(sys.modules[module_name]): + if inspect.isfunction(obj) == True: + if hasattr(obj, 'is_metric') == True: + setattr(metrics, name, types.MethodType(obj, metrics)) + diff --git a/augur/metrics/insight.py b/augur/metrics/insight.py --- a/augur/metrics/insight.py +++ b/augur/metrics/insight.py @@ -6,8 +6,7 @@ import pandas as pd from augur.util import register_metric - -@register_metric() +@register_metric(type="repo_group_only") def top_insights(self, repo_group_id, num_repos=6): """ Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date) diff --git a/augur/metrics/metrics.py b/augur/metrics/metrics.py deleted file mode 100644 --- a/augur/metrics/metrics.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import glob -import sys -import inspect -import types -import importlib -from augur import logger - -class Metrics(): - def __init__(self, app): - self.database = app.database - self.spdx_db = app.spdx_db - - models = [] #TODO: standardize this - for filename in glob.iglob("augur/metrics/**"): - file_id = get_file_id(filename) - if not file_id.startswith('__') and filename.endswith('.py') and file_id != "metrics": - models.append(file_id) - - for model in models: - importlib.import_module(f"augur.metrics.{model}") - - for model in models: - add_metrics(self, f"augur.metrics.{model}") - -def get_file_id(path): - return os.path.splitext(os.path.basename(path))[0] - -def add_metrics(metrics, module_name): - # find all unbound endpoint functions objects - # (ones that have metadata) defined the given module_name - # and bind them to the metrics class - # Derek are you proud of me - for name, obj in inspect.getmembers(sys.modules[module_name]): - if inspect.isfunction(obj) == True: - if hasattr(obj, 'metadata') == True: - setattr(metrics, name, types.MethodType(obj, metrics)) - diff --git a/augur/metrics/release.py b/augur/metrics/release.py new file mode 100644 --- /dev/null +++ b/augur/metrics/release.py @@ -0,0 +1,88 @@ +""" +Metrics that provide data about releases +""" + +import datetime +import sqlalchemy as s +import pandas as pd +from augur.util import register_metric + +@register_metric() +def releases(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): + """ Returns a timeseris of new reviews or pull requests opened + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of new releases/period + """ + if not begin_date: + begin_date = '1970-1-1' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + if not repo_id: + reviews_SQL = s.sql.text(""" + SELECT + res.repo_name, + res.release_id, + res.release_name, + res.release_description, + res.release_author, + res.release_created_at, + res.release_published_at, + res.release_updated_at, + res.release_is_draft, + res.release_is_prerelease, + res.release_tag_name, + res.release_url, + COUNT(res) + FROM ( + SELECT + releases.* + repo.repo_name + FROM + releases LEFT JOIN repo ON releases.repo_id = repo.repo_id + WHERE + repo.repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id ) + ) as res + GROUP BY releases.repo_id, releases.release_id + ORDER BY releases.release_published_at DESC + """) + + results = pd.read_sql(reviews_SQL, self.database, + params={'period': period, 'repo_group_id': repo_group_id, + 'begin_date': begin_date, 'end_date': end_date }) + return results + + else: + reviews_SQL = s.sql.text(""" + SELECT + repo.repo_name, + releases.release_id, + releases.release_name, + releases.release_description, + releases.release_author, + releases.release_created_at, + releases.release_published_at, + releases.release_updated_at, + releases.release_is_draft, + releases.release_is_prerelease, + releases.release_tag_name, + releases.release_url, + COUNT(releases) + FROM + releases LEFT JOIN repo ON releases.repo_id = repo.repo_id + GROUP BY repo.repo_id, releases.release_id + ORDER BY releases.release_published_at DESC + """) + + results = pd.read_sql(reviews_SQL, self.database, + params={'period': period, 'repo_id': repo_id, + 'begin_date': begin_date, 'end_date': end_date}) + return results + +def create_release_metrics(metrics): + add_metrics(metrics, __name__) \ No newline at end of file diff --git a/augur/metrics/repo_meta.py b/augur/metrics/repo_meta.py --- a/augur/metrics/repo_meta.py +++ b/augur/metrics/repo_meta.py @@ -5,9 +5,12 @@ import datetime import sqlalchemy as s import pandas as pd -from augur import logger -from augur.util import register_metric import math +import logging + +from augur.util import register_metric + +logger = logging.getLogger("augur") @register_metric() def code_changes(self, repo_group_id, repo_id=None, period='week', begin_date=None, end_date=None): @@ -321,7 +324,7 @@ def languages(self, repo_group_id, repo_id=None): results = pd.read_sql(languages_SQL, self.database, params={'repo_id': repo_id}) return results -@register_metric() +@register_metric(type="license") def license_files(self, license_id, spdx_binary, repo_group_id, repo_id=None,): """Returns the files related to a license diff --git a/augur/metrics/util/routes.py b/augur/metrics/util/routes.py deleted file mode 100644 --- a/augur/metrics/util/routes.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/augur/metrics/util/util.py b/augur/metrics/util/util.py deleted file mode 100644 --- a/augur/metrics/util/util.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/augur/models/__init__.py b/augur/models/__init__.py deleted file mode 100644 --- a/augur/models/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from sqlalchemy.orm import sessionmaker -from .user import User -from .repo import Repo, RepoGroup - - -__all__ = ['User', 'RepoGroup', 'Repo'] \ No newline at end of file diff --git a/augur/models/common.py b/augur/models/common.py deleted file mode 100644 --- a/augur/models/common.py +++ /dev/null @@ -1,2 +0,0 @@ -from sqlalchemy.ext.declarative import declarative_base -Base = declarative_base() \ No newline at end of file diff --git a/augur/models/repo.py b/augur/models/repo.py deleted file mode 100644 --- a/augur/models/repo.py +++ /dev/null @@ -1,48 +0,0 @@ -import datetime -from sqlalchemy import Table, ForeignKey, Column, Integer, String, DateTime -from sqlalchemy.orm import relationship -from .common import Base -from .user import user_has_repo_group - -repo_group_has_project = Table('repo_group_has_project', - Base.metadata, - Column('repo_group_id', ForeignKey('repo_group.id'), primary_key=True), - Column('repo_id', ForeignKey('repo.url'), primary_key=True), -) - -class Repo(Base): - """ - The Repo object models a VCS repository - """ - __tablename__ = 'repo' - - # Keys - url = Column(String(1024), primary_key=True) - vcs = Column(String(64), default='git') - - # Fields - created_at = Column(DateTime, default=datetime.datetime.utcnow) - - # Foreign Keys - repo_groups_member_of = relationship('RepoGroup', secondary=repo_group_has_project, back_populates='projects') - - def __repr__(self): - return f"<Repo(giturl='{self.password}')>" - - -class RepoGroup(Base): - """ - The RepoGroup class models lists of projects that a user wants to keep track of - """ - __tablename__ = 'repo_group' - - # Keys - id = Column(Integer, primary_key=True) - name = Column(String(128)) - - # Fields - created_at = Column(DateTime, default=datetime.datetime.utcnow) - - # Foreign Keys - projects = relationship('Repo', secondary=repo_group_has_project, back_populates='repo_groups_member_of') - users_of = relationship('User', secondary=user_has_repo_group, back_populates='repo_groups') \ No newline at end of file diff --git a/augur/models/user.py b/augur/models/user.py deleted file mode 100644 --- a/augur/models/user.py +++ /dev/null @@ -1,61 +0,0 @@ -import datetime -from sqlalchemy import Table, ForeignKey, Column, Integer, String, DateTime, Boolean -from sqlalchemy.orm import relationship -from sqlalchemy.ext.hybrid import hybrid_property -from .common import Base -from werkzeug.security import generate_password_hash, check_password_hash -from flask_login import UserMixin - -user_has_repo_group = Table('user_has_repo_group', - Base.metadata, - Column('user_id', ForeignKey('user.id'), primary_key=True), - Column('repo_group_id', ForeignKey('repo_group.id'), primary_key=True), -) - -class User(Base): - """ - The User object models users in the database. - """ - __tablename__ = 'user' - - # Keys - id = Column(Integer, primary_key=True) - username = Column(String(64), unique=True, nullable=False) - email = Column(String(64), unique=True, nullable=False) - - # Fields - password_hash = Column(String(128)) - email_confirmation_token = Column(String(128), nullable=True) - created_at = Column(DateTime, default=datetime.datetime.utcnow) - password_updated_at = Column(DateTime, default=datetime.datetime.utcnow) - last_login_at = Column(DateTime, nullable=True) - authenticated = Column(Boolean, default=False) - active = Column(Boolean, default=True) - administrator = Column(Boolean, default=False) - - # Foreign Keys - repo_groups = relationship('RepoGroup', secondary=user_has_repo_group, back_populates='users_of') - - def get_id(self): - return self.id - - def __repr__(self): - return f"<User(username='{self.username}', email='{self.email}')>" - - @hybrid_property - def password(self): - return self.password_hash - - @password.setter - def password(self, password): - self.password_hash = generate_password_hash(password) - - def check_password(self, password): - return check_password_hash(self.password_hash, password) - - def is_authenticated(self): - return self.authenticated - - def is_active(self): - # False as we do not support annonymity - return self.active diff --git a/augur/routes/__init__.py b/augur/routes/__init__.py --- a/augur/routes/__init__.py +++ b/augur/routes/__init__.py @@ -1,35 +1,34 @@ + +import logging import importlib import os import glob +import sys +import inspect -from augur import logger +logger = logging.getLogger(__name__) def get_route_files(): route_files = [] - metric_route_files = [] def get_file_id(path): return os.path.splitext(os.path.basename(path))[0] - for filename in glob.iglob("**/routes/*"): + for filename in glob.iglob("augur/routes/*"): file_id = get_file_id(filename) if not file_id.startswith('__') and filename.endswith('.py'): route_files.append(file_id) - for filename in glob.iglob("**/routes/metrics/*"): - file_id = get_file_id(filename) - if not file_id.startswith('__') and filename.endswith('.py'): - metric_route_files.append(file_id) - - return route_files, metric_route_files + return route_files -route_files, metric_route_files = get_route_files() +route_files = get_route_files() def create_routes(server): for route_file in route_files: module = importlib.import_module('.' + route_file, 'augur.routes') module.create_routes(server) - for route_file in metric_route_files: - module = importlib.import_module('.' + route_file, 'augur.routes.metrics') - module.create_routes(server) + for name, obj in inspect.getmembers(server.augur_app.metrics): + if hasattr(obj, 'is_metric') == True: + if obj.metadata['type'] == "standard": + server.add_standard_metric(obj, obj.metadata['endpoint']) diff --git a/augur/routes/batch.py b/augur/routes/batch.py --- a/augur/routes/batch.py +++ b/augur/routes/batch.py @@ -10,9 +10,10 @@ from sqlalchemy import exc from flask import request, Response from augur.util import metric_metadata -from augur import logger import json +logger = logging.getLogger(__name__) + def create_routes(server): @server.app.route('/{}/batch'.format(server.api_version), methods=['GET', 'POST']) diff --git a/augur/routes/broker.py b/augur/routes/broker.py --- a/augur/routes/broker.py +++ b/augur/routes/broker.py @@ -9,6 +9,9 @@ import requests from flask import request, Response +logger = logging.getLogger(__name__) + +# TODO: not this... def worker_start(worker_name=None): process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True) @@ -26,12 +29,12 @@ def send_task(worker_proxy): j = r.json() if 'status' not in j: - logging.info("Worker: {}'s heartbeat did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) + logger.error("Worker: {}'s heartbeat did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) worker_proxy['status'] = 'Disconnected' return if j['status'] != 'alive': - logging.info("Worker: {} is busy, setting its status as so.\n".format(worker_id)) + logger.info("Worker: {} is busy, setting its status as so.\n".format(worker_id)) return # Want to check user-created job requests first @@ -43,16 +46,16 @@ def send_task(worker_proxy): new_task = maintain_queue.pop(0) else: - logging.info("Both queues are empty for worker {}\n".format(worker_id)) + logger.debug("Both queues are empty for worker {}\n".format(worker_id)) worker_proxy['status'] = 'Idle' return - logging.info("Worker {} is idle, preparing to send the {} task to {}\n".format(worker_id, new_task['display_name'], task_endpoint)) + logger.info("Worker {} is idle, preparing to send the {} task to {}\n".format(worker_id, new_task['display_name'], task_endpoint)) try: requests.post(task_endpoint, json=new_task) worker_proxy['status'] = 'Working' except: - logging.info("Sending Worker: {} a task did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) + logger.error("Sending Worker: {} a task did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) worker_proxy['status'] = 'Disconnected' # If the worker died, then restart it worker_start(worker_id.split('.')[len(worker_id.split('.')) - 2]) @@ -71,9 +74,9 @@ def task(): for given_component in list(task['given'].keys()): given.append(given_component) model = task['models'][0] - logging.info("Broker recieved a new user task ... checking for compatible workers for given: " + str(given) + " and model(s): " + str(model) + "\n") + logger.info("Broker recieved a new user task ... checking for compatible workers for given: " + str(given) + " and model(s): " + str(model) + "\n") - logging.info("Broker's list of all workers: {}\n".format(server.broker._getvalue().keys())) + logger.debug("Broker's list of all workers: {}\n".format(server.broker._getvalue().keys())) worker_found = False compatible_workers = {} @@ -83,7 +86,7 @@ def task(): if type(server.broker[worker_id]._getvalue()) != dict: continue - logging.info("Considering compatible worker: {}\n".format(worker_id)) + logger.info("Considering compatible worker: {}\n".format(worker_id)) # Group workers by type (all gh workers grouped together etc) worker_type = worker_id.split('.')[len(worker_id.split('.'))-2] @@ -91,28 +94,28 @@ def task(): # Make worker that is prioritized the one with the smallest sum of task queues if (len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue'])) < min([compatible_workers[w]['task_load'] for w in compatible_workers.keys() if worker_type == w]): - logging.info("Worker id: {} has the smallest task load encountered so far: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']))) + logger.info("Worker id: {} has the smallest task load encountered so far: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']))) compatible_workers[worker_type]['task_load'] = len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']) compatible_workers[worker_type]['worker_id'] = worker_id for worker_type in compatible_workers.keys(): worker_id = compatible_workers[worker_type]['worker_id'] worker = server.broker[worker_id] - logging.info("Final compatible worker chosen: {} with smallest task load: {} found to work on task: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']), task)) + logger.info("Final compatible worker chosen: {} with smallest task load: {} found to work on task: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']), task)) if task['job_type'] == "UPDATE": worker['user_queue'].append(task) - logging.info("Added task for model: {}. New length of worker {}'s user queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['user_queue'])))) + logger.info("Added task for model: {}. New length of worker {}'s user queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['user_queue'])))) elif task['job_type'] == "MAINTAIN": worker['maintain_queue'].append(task) - logging.info("Added task for model: {}. New length of worker {}'s maintain queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['maintain_queue'])))) + logger.info("Added task for model: {}. New length of worker {}'s maintain queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['maintain_queue'])))) if worker['status'] == 'Idle': send_task(worker) worker_found = True # Otherwise, let the frontend know that the request can't be served if not worker_found: - logging.info("Augur does not have knowledge of any workers that are capable of handing the request: {}\n".format(task)) + logger.warning("Augur does not have knowledge of any workers that are capable of handing the request: {}\n".format(task)) return Response(response=task, status=200, @@ -124,7 +127,7 @@ def worker(): and telling the broker to add this worker to the set it maintains """ worker = request.json - logging.info("Recieved HELLO message from worker: {}\n".format(worker['id'])) + logger.info("Recieved HELLO message from worker: {}\n".format(worker['id'])) if worker['id'] not in server.broker: server.broker[worker['id']] = server.manager.dict() server.broker[worker['id']]['id'] = worker['id'] @@ -139,7 +142,7 @@ def worker(): server.broker[worker['id']]['status'] = 'Idle' server.broker[worker['id']]['location'] = worker['location'] else: - logging.info("Worker: {} has been reconnected.\n".format(worker['id'])) + logger.info("Worker: {} has been reconnected.\n".format(worker['id'])) models = server.broker[worker['id']]['models'] givens = server.broker[worker['id']]['given'] user_queue = server.broker[worker['id']]['user_queue'] @@ -157,7 +160,7 @@ def worker(): def sync_queue(): task = request.json worker = task['worker_id'] - logging.info("Message recieved that worker {} completed task: {}\n".format(worker,task)) + logger.info("Message recieved that worker {} completed task: {}\n".format(worker,task)) try: models = server.broker[worker]['models'] givens = server.broker[worker]['given'] @@ -167,8 +170,8 @@ def sync_queue(): if server.broker[worker]['status'] != 'Disconnected': send_task(server.broker[worker]) except Exception as e: - logging.info("Ran into error: {}\n".format(repr(e))) - logging.info("A past instance of the {} worker finished a previous leftover task.\n".format(worker)) + logger.error("Ran into error: {}\n".format(repr(e))) + logger.error("A past instance of the {} worker finished a previous leftover task.\n".format(worker)) return Response(response=task, status=200, @@ -190,7 +193,7 @@ def get_status(): @server.app.route('/{}/workers/remove'.format(server.api_version), methods=['POST']) def remove_worker(): worker = request.json - logging.info("Recieved a message to disconnect worker: {}\n".format(worker)) + logger.info("Recieved a message to disconnect worker: {}\n".format(worker)) server.broker[worker['id']]['status'] = 'Disconnected' return Response(response=worker, status=200, @@ -200,13 +203,13 @@ def remove_worker(): def task_error(): task = request.json worker_id = task['worker_id'] - logging.info("Recieved a message that {} ran into an error on task: {}\n".format(worker_id, task)) + logger.error("Recieved a message that {} ran into an error on task: {}\n".format(worker_id, task)) if worker_id in server.broker: if server.broker[worker_id]['status'] != 'Disconnected': - logging.info("{} ran into error while completing task: {}\n".format(worker_id, task)) + logger.error("{} ran into error while completing task: {}\n".format(worker_id, task)) send_task(server.broker[worker_id]) else: - logging.info("A previous instance of {} ran into error while completing task: {}\n".format(worker_id, task)) + logger.error("A previous instance of {} ran into error while completing task: {}\n".format(worker_id, task)) return Response(response=request.json, status=200, mimetype="application/json") \ No newline at end of file diff --git a/augur/routes/manager.py b/augur/routes/manager.py --- a/augur/routes/manager.py +++ b/augur/routes/manager.py @@ -285,15 +285,15 @@ def get_repo_name(self): repo = self.name return repo[repo.find('/')+1:] -def authenticate_request(app, request): +def authenticate_request(augur_app, request): # do I like doing it like this? not at all # do I have the time to implement a better solution right now? not at all - user = app.read_config('Database', 'user') - password = app.read_config('Database', 'password') - host = app.read_config('Database', 'host') - port = app.read_config('Database', 'port') - dbname = app.read_config('Database', 'name') + user = augur_app.config.get_value('Database', 'user') + password = augur_app.config.get_value('Database', 'password') + host = augur_app.config.get_value('Database', 'host') + port = augur_app.config.get_value('Database', 'port') + dbname = augur_app.config.get_value('Database', 'name') DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( user, password, host, port, dbname diff --git a/augur/routes/metrics/commit.py b/augur/routes/metrics/commit.py deleted file mode 100644 --- a/augur/routes/metrics/commit.py +++ /dev/null @@ -1,8 +0,0 @@ -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.annual_commit_count_ranked_by_new_repo_in_repo_group,'annual-commit-count-ranked-by-new-repo-in-repo-group') - - server.add_standard_metric(metrics.annual_commit_count_ranked_by_repo_in_repo_group,'annual-commit-count-ranked-by-repo-in-repo-group') - diff --git a/augur/routes/metrics/contributor.py b/augur/routes/metrics/contributor.py deleted file mode 100644 --- a/augur/routes/metrics/contributor.py +++ /dev/null @@ -1,17 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.contributors, 'contributors') - - server.add_standard_metric(metrics.contributors_new, 'contributors-new') - - server.add_standard_metric(metrics.committers, 'committers') - - server.add_standard_metric(metrics.lines_changed_by_author,'lines-changed-by-author') - - server.add_standard_metric(metrics.top_committers, 'top-committers') - - server.add_standard_metric(metrics.contributors_code_development, 'contributors-code-development') \ No newline at end of file diff --git a/augur/routes/metrics/experimental.py b/augur/routes/metrics/experimental.py deleted file mode 100644 --- a/augur/routes/metrics/experimental.py +++ /dev/null @@ -1,6 +0,0 @@ - -def create_routes(server): - - metrics = server.augur_app.metrics - - diff --git a/augur/routes/metrics/insight.py b/augur/routes/metrics/insight.py deleted file mode 100644 --- a/augur/routes/metrics/insight.py +++ /dev/null @@ -1,13 +0,0 @@ -#SPDX-License-Identifier: MIT -from flask import Response - -def create_routes(server): - - metrics = server.augur_app.metrics - - @server.app.route(f"/{server.api_version}/repo-groups/<repo_group_id>/top-insights") - def top_insights(repo_group_id): - data = server.transform(metrics.top_insights, args=[repo_group_id]) - return Response(response=data, - status=200, - mimetype="application/json") diff --git a/augur/routes/metrics/issue.py b/augur/routes/metrics/issue.py deleted file mode 100644 --- a/augur/routes/metrics/issue.py +++ /dev/null @@ -1,39 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.issues_new, 'issues-new') - - server.add_standard_metric(metrics.issues_active, 'issues-active') - - server.add_standard_metric(metrics.issues_closed, 'issues-closed') - - server.add_standard_metric(metrics.issue_duration, 'issue-duration') - - server.add_standard_metric(metrics.issue_participants, 'issue-participants') - - server.add_standard_metric(metrics.issue_backlog, 'issue-backlog') - - server.add_standard_metric(metrics.issue_throughput, 'issue-throughput') - - server.add_standard_metric(metrics.issues_first_time_opened, 'issues-first-time-opened') - - server.add_standard_metric(metrics.issues_first_time_closed, 'issues-first-time-closed') - - server.add_standard_metric(metrics.open_issues_count, 'open-issues-count') - - server.add_standard_metric(metrics.closed_issues_count, 'closed-issues-count') - - server.add_standard_metric(metrics.issues_open_age, 'issues-open-age') - - server.add_standard_metric(metrics.issues_closed_resolution_duration, 'issues-closed-resolution-duration') - - server.add_standard_metric(metrics.issues_maintainer_response_duration, 'issues-maintainer-response-duration') - - server.add_standard_metric(metrics.average_issue_resolution_time, 'average-issue-resolution-time') - - server.add_standard_metric(metrics.issue_comments_mean, 'issue-comments-mean') - - server.add_standard_metric(metrics.issue_comments_mean_std, 'issue-comments-mean-std') diff --git a/augur/routes/metrics/message.py b/augur/routes/metrics/message.py deleted file mode 100644 --- a/augur/routes/metrics/message.py +++ /dev/null @@ -1,6 +0,0 @@ - -def create_routes(server): - - metrics = server.augur_app.metrics - - diff --git a/augur/routes/metrics/platform.py b/augur/routes/metrics/platform.py deleted file mode 100644 --- a/augur/routes/metrics/platform.py +++ /dev/null @@ -1,4 +0,0 @@ - -def create_routes(server): - metrics = server.augur_app.metrics - diff --git a/augur/routes/metrics/pull_request.py b/augur/routes/metrics/pull_request.py deleted file mode 100644 --- a/augur/routes/metrics/pull_request.py +++ /dev/null @@ -1,31 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.reviews, 'reviews') - - server.add_standard_metric(metrics.reviews_accepted, 'reviews-accepted') - - server.add_standard_metric(metrics.reviews_declined, 'reviews-declined') - - server.add_standard_metric(metrics.review_duration, 'review-duration') - - server.add_standard_metric(metrics.pull_requests_merge_contributor_new, 'pull-requests-merge-contributor-new') - - server.add_standard_metric(metrics.pull_request_acceptance_rate, 'pull-request-acceptance-rate') - - server.add_standard_metric(metrics.pull_requests_closed_no_merge, 'pull-requests-closed-no-merge') - - server.add_standard_metric(metrics.pull_request_merged_status_counts, 'pull-request-merged-status-counts') - - server.add_standard_metric(metrics.pull_request_average_time_to_close, 'pull-request-average-time-to-close') - - server.add_standard_metric(metrics.pull_request_average_time_between_responses, 'pull-request-average-time-between-responses') - - server.add_standard_metric(metrics.pull_request_average_commit_counts, 'pull-request-average-commit-counts') - - server.add_standard_metric(metrics.pull_request_average_event_counts, 'pull-request-average-event-counts') - - server.add_standard_metric(metrics.pull_request_average_time_to_responses_and_close, 'pull-request-average-time-to-responses-and-close') diff --git a/augur/routes/metrics/repo_meta.py b/augur/routes/metrics/repo_meta.py deleted file mode 100644 --- a/augur/routes/metrics/repo_meta.py +++ /dev/null @@ -1,54 +0,0 @@ -#SPDX-License-Identifier: MIT -from flask import Response - -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.code_changes, 'code-changes') - - server.add_standard_metric(metrics.code_changes_lines, 'code-changes-lines') - - @server.app.route(f"/{server.api_version}/<license_id>/<spdx_binary>/<repo_group_id>/<repo_id>/license-files") - def get_license_files(license_id, spdx_binary, repo_group_id, repo_id): - arguments = [license_id, spdx_binary, repo_group_id, repo_id] - license_files = server.transform(metrics.license_files, args=arguments) - return Response(response=license_files, - status=200, - mimetype="application/json") - - server.add_standard_metric(metrics.sbom_download, 'sbom-download') - - server.add_standard_metric(metrics.sub_projects, 'sub-projects') - - server.add_standard_metric(metrics.cii_best_practices_badge, 'cii-best-practices-badge') - - server.add_standard_metric(metrics.forks, 'forks') - - server.add_standard_metric(metrics.fork_count, 'fork-count') - - server.add_standard_metric(metrics.languages, 'languages') - - server.add_standard_metric(metrics.license_count, 'license-count') - - server.add_standard_metric(metrics.license_coverage, 'license-coverage') - - server.add_standard_metric(metrics.license_declared, 'license-declared') - - server.add_standard_metric(metrics.stars, 'stars') - - server.add_standard_metric(metrics.stars_count, 'stars-count') - - server.add_standard_metric(metrics.watchers, 'watchers') - - server.add_standard_metric(metrics.watchers_count, 'watchers-count') - - server.add_standard_metric(metrics.annual_lines_of_code_count_ranked_by_new_repo_in_repo_group,'annual-lines-of-code-count-ranked-by-new-repo-in-repo-group') - - server.add_standard_metric(metrics.annual_lines_of_code_count_ranked_by_repo_in_repo_group,'annual-lines-of-code-count-ranked-by-repo-in-repo-group') - - server.add_standard_metric(metrics.lines_of_code_commit_counts_by_calendar_year_grouped,'lines-of-code-commit-counts-by-calendar-year-grouped') - - server.add_standard_metric(metrics.average_weekly_commits, 'average-weekly-commits') - - server.add_standard_metric(metrics.aggregate_summary, 'aggregate-summary') diff --git a/augur/routes/nonstandard_metrics.py b/augur/routes/nonstandard_metrics.py new file mode 100644 --- /dev/null +++ b/augur/routes/nonstandard_metrics.py @@ -0,0 +1,24 @@ +import base64 +import sqlalchemy as s +import pandas as pd +import json +from flask import Response + +def create_routes(server): + + metrics = server.augur_app.metrics + + @server.app.route(f"/{server.api_version}/<license_id>/<spdx_binary>/<repo_group_id>/<repo_id>/license-files") + def get_license_files(license_id, spdx_binary, repo_group_id, repo_id): + arguments = [license_id, spdx_binary, repo_group_id, repo_id] + license_files = server.transform(metrics.license_files, args=arguments) + return Response(response=license_files, + status=200, + mimetype="application/json") + + @server.app.route(f"/{server.api_version}/repo-groups/<repo_group_id>/top-insights") + def top_insights(repo_group_id): + data = server.transform(metrics.top_insights, args=[repo_group_id]) + return Response(response=data, + status=200, + mimetype="application/json") \ No newline at end of file diff --git a/augur/routes/util.py b/augur/routes/util.py --- a/augur/routes/util.py +++ b/augur/routes/util.py @@ -6,8 +6,6 @@ def create_routes(server): - metrics = server.augur_app.metrics - @server.app.route('/{}/repo-groups'.format(server.api_version)) def get_all_repo_groups(): #TODO: make this name automatic - wrapper? repoGroupsSQL = s.sql.text(""" @@ -202,7 +200,7 @@ def get_issues(repo_group_id, repo_id=None): @server.app.route('/{}/api-port'.format(server.api_version)) def api_port(): - response = {'port': server.augur_app.read_config('Server', 'port')} + response = {'port': server.augur_app.config.get_value('Server', 'port')} return Response(response=json.dumps(response), status=200, mimetype="application/json") diff --git a/augur/server.py b/augur/server.py --- a/augur/server.py +++ b/augur/server.py @@ -3,51 +3,50 @@ Creates a WSGI server that serves the Augur REST API """ +import glob +import sys +import inspect +import types import json import os import base64 +import logging + from flask import Flask, request, Response, redirect from flask_cors import CORS import pandas as pd + import augur -from augur.util import logger from augur.routes import create_routes AUGUR_API_VERSION = 'api/unstable' -class VueCompatibleFlask(Flask): - jinja_options = Flask.jinja_options.copy() - jinja_options.update(dict( - block_start_string='(%', - block_end_string='%)', - variable_start_string='%%', - variable_end_string='%%', - comment_start_string='(#', - comment_end_string='#)', - )) - +logger = logging.getLogger(__name__) class Server(object): """ Defines Augur's server's behavior """ - def __init__(self, frontend_folder='../frontend/public', manager=None, broker=None, housekeeper=None): + def __init__(self, augur_app=None): """ Initializes the server, creating both the Flask application and Augur application """ # Create Flask application - self.app = VueCompatibleFlask(__name__, static_folder=frontend_folder, template_folder=frontend_folder) + self.app = Flask(__name__) + logger.debug("Created Flask app") self.api_version = AUGUR_API_VERSION app = self.app CORS(app) app.url_map.strict_slashes = False - # Create Augur application - self.augur_app = augur.Application() + self.augur_app = augur_app + self.manager = augur_app.manager + self.broker = augur_app.broker + self.housekeeper = augur_app.housekeeper # Initialize cache - expire = int(self.augur_app.read_config('Server', 'cache_expire')) + expire = int(self.augur_app.config.get_value('Server', 'cache_expire')) self.cache = self.augur_app.cache.get_cache('server', expire=expire) self.cache.clear() @@ -55,10 +54,7 @@ def __init__(self, frontend_folder='../frontend/public', manager=None, broker=No self.show_metadata = False - self.manager = manager - self.broker = broker - self.housekeeper = housekeeper - + logger.debug("Creating API routes...") create_routes(self) ##################################### @@ -184,40 +180,3 @@ def add_standard_metric(self, function, endpoint, **kwargs): self.app.route(repo_endpoint)(self.routify(function, 'repo')) self.app.route(repo_group_endpoint)(self.routify(function, 'repo_group')) self.app.route(deprecated_repo_endpoint )(self.routify(function, 'deprecated_repo')) - -def run(): - """ - Runs server with configured hosts/ports - """ - server = Server() - host = server.augur_app.read_config('Server', 'host') - port = server.augur_app.read_config('Server', 'port') - Server().app.run(host=host, port=int(port), debug=True) - -wsgi_app = None -def wsgi(environ, start_response): - """ - Creates WSGI app - """ - global wsgi_app - if (wsgi_app is None): - app_instance = Server() - wsgi_app = app_instance.app - # Stuff to make proxypass work - script_name = environ.get('HTTP_X_SCRIPT_NAME', '') - if script_name: - environ['SCRIPT_NAME'] = script_name - path_info = environ['PATH_INFO'] - if path_info.startswith(script_name): - environ['PATH_INFO'] = path_info[len(script_name):] - - scheme = environ.get('HTTP_X_SCHEME', '') - if scheme: - environ['wsgi.url_scheme'] = scheme - server = environ.get('HTTP_X_FORWARDED_SERVER', '') - if server: - environ['HTTP_HOST'] = server - return wsgi_app(environ, start_response) - -if __name__ == "__main__": - run() diff --git a/augur/util.py b/augur/util.py --- a/augur/util.py +++ b/augur/util.py @@ -8,8 +8,9 @@ import types import sys import beaker +import logging -from augur import logger +logger = logging.getLogger(__name__) __ROOT = os.path.abspath(os.path.dirname(__file__)) def get_data_path(path): @@ -42,7 +43,7 @@ def get_cache(namespace, cache_manager=None): metric_metadata = [] def register_metric(metadata=None, **kwargs): """ - Decorates a function as being a metric + Register a function as being a metric """ if metadata is None: metadata = {} @@ -54,20 +55,19 @@ def decorate(function): if not hasattr(function, 'is_metric'): function.is_metric = True - function.metadata.update(metadata) - if kwargs.get('endpoint_type', None): - endpoint_type = kwargs.pop('endpoint_type') - if endpoint_type == 'repo': - function.metadata['repo_endpoint'] = kwargs.get('endpoint') - else: - function.metadata['group_endpoint'] = kwargs.get('endpoint') - function.metadata.update(dict(kwargs)) function.metadata['tag'] = re.sub('_', '-', function.__name__).lower() - function.metadata['metric_name'] = re.sub('_', ' ', function.__name__).title() + function.metadata['endpoint'] = function.metadata['tag'] + function.metadata['name'] = re.sub('_', ' ', function.__name__).title() function.metadata['model'] = re.sub(r'(.*\.)', '', function.__module__) - function.metadata['ID'] = "{}-{}".format(function.metadata['model'].lower(), function.metadata['tag']) + + if kwargs.get('type', None): + function.metadata['type'] = kwargs.get('type') + else: + function.metadata['type'] = "standard" + + function.metadata.update(metadata) return function return decorate \ No newline at end of file diff --git a/conftest.py b/conftest.py new file mode 100644 --- /dev/null +++ b/conftest.py @@ -0,0 +1,31 @@ +import pytest +import re + +from augur.application import Application +from augur.cli.run import initialize_components + +default_repo_id = "25430" +default_repo_group_id = "10" + +def create_full_routes(routes): + full_routes = [] + for route in routes: + route = re.sub("<default_repo_id>", default_repo_id, route) + route = re.sub("<default_repo_group_id>", default_repo_group_id, route) + route = "http://localhost:5000/api/unstable/" + route + full_routes.append(route) + return full_routes + [email protected](scope="session") +def augur_app(): + augur_app = Application(disable_logs=True) + return augur_app + [email protected](scope="session") +def metrics(augur_app): + return augur_app.metrics + [email protected](scope="session") +def client(augur_app): + flask_client = initialize_components(augur_app, disable_housekeeper=True).load() + return flask_client.test_client() diff --git a/metadata.py b/metadata.py --- a/metadata.py +++ b/metadata.py @@ -6,8 +6,8 @@ __short_description__ = "Python 3 package for free/libre and open-source software community metrics & data collection" -__version__ = "0.12.0" -__release__ = "0.12.0" +__version__ = "0.12.1" +__release__ = "0.12.1" __license__ = "MIT" __copyright__ = "CHAOSS & Augurlabs 2020" diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -44,10 +44,8 @@ "psycopg2-binary", "click", "psutil", - "gunicorn==19.9.0", - "six>=1.14.0", - "boto3", - "slack", + "gunicorn", + "six>=1.14.0" ], extras_require={ "dev": [ @@ -63,7 +61,7 @@ }, entry_points={ "console_scripts": [ - "augur=augur.runtime:run" + "augur=augur.cli._multicommand:run" ], } ) diff --git a/util/alembic/env.py b/util/alembic/env.py deleted file mode 100644 --- a/util/alembic/env.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import with_statement -from alembic import context -from sqlalchemy import engine_from_config, pool -from logging.config import fileConfig - - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -from augur.models.common import Base -target_metadata = Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, target_metadata=target_metadata, literal_binds=True) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - import augur.application - app = augur.application.Application() - - context.configure( - connection=app.db.connect(), - target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py b/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py deleted file mode 100644 --- a/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Create basic tables - -Revision ID: 2eaa930b1f5a -Revises: -Create Date: 2019-02-09 16:10:24.251828 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '2eaa930b1f5a' -down_revision = None -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.create_table('repo', - sa.Column('url', sa.String(length=1024), nullable=False), - sa.Column('vcs', sa.String(length=64), nullable=True), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('url') - ) - op.create_table('repo_group', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('id') - ) - op.create_table('user', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('username', sa.String(length=64), nullable=False), - sa.Column('email', sa.String(length=64), nullable=False), - sa.Column('password_hash', sa.String(length=128), nullable=True), - sa.Column('email_confirmation_token', sa.String(length=128), nullable=True), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('password_updated_at', sa.DateTime(), nullable=True), - sa.Column('last_login_at', sa.DateTime(), nullable=True), - sa.Column('authenticated', sa.Boolean(), nullable=True), - sa.Column('active', sa.Boolean(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('email'), - sa.UniqueConstraint('username') - ) - op.create_table('repo_group_has_project', - sa.Column('repo_group_id', sa.Integer(), nullable=False), - sa.Column('repo_id', sa.String(length=1024), nullable=False), - sa.ForeignKeyConstraint(['repo_group_id'], ['repo_group.id'], ), - sa.ForeignKeyConstraint(['repo_id'], ['repo.url'], ), - sa.PrimaryKeyConstraint('repo_group_id', 'repo_id') - ) - op.create_table('user_has_repo_group', - sa.Column('user_id', sa.Integer(), nullable=False), - sa.Column('repo_group_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['repo_group_id'], ['repo_group.id'], ), - sa.ForeignKeyConstraint(['user_id'], ['user.id'], ), - sa.PrimaryKeyConstraint('user_id', 'repo_group_id') - ) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.drop_table('user_has_repo_group') - op.drop_table('repo_group_has_project') - op.drop_table('user') - op.drop_table('repo_group') - op.drop_table('repo') - # ### end Alembic commands ### diff --git a/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py b/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py deleted file mode 100644 --- a/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Add admin to User, name to RepoGroup - -Revision ID: a051167419fa -Revises: 2eaa930b1f5a -Create Date: 2019-02-17 13:09:42.138936 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = 'a051167419fa' -down_revision = '2eaa930b1f5a' -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.add_column('repo_group', sa.Column('name', sa.String(length=128), nullable=True)) - op.add_column('user', sa.Column('administrator', sa.Boolean(), nullable=True)) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.drop_column('user', 'administrator') - op.drop_column('repo_group', 'name') - # ### end Alembic commands ### diff --git a/workers/contributor_worker/contributor_worker/worker.py b/workers/contributor_worker/contributor_worker.py similarity index 68% rename from workers/contributor_worker/contributor_worker/worker.py rename to workers/contributor_worker/contributor_worker.py --- a/workers/contributor_worker/contributor_worker/worker.py +++ b/workers/contributor_worker/contributor_worker.py @@ -8,189 +8,50 @@ import statistics, logging, os, json, time import numpy as np import datetime -from workers.standard_methods import * #init_oauths, get_table_values, get_max_id, register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process, paginate -import warnings -warnings.filterwarnings('ignore') -class ContributorWorker: +from workers.worker_base import Worker + +class ContributorWorker(Worker): """ Worker that detects anomalies on a select few of our metrics task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self._task = task - self._child = None - self._queue = Queue() - self.db = None - self.tool_source = 'Contributor Worker' - self.tool_version = '0.0.1' # See __init__.py - self.data_source = 'Augur Commit Data' - self.finishing_task = False - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["contributors"] - } - ], - "config": [self.config] - } + def __init__(self, config={}): - self.results_counter = 0 + worker_type = "contributor_worker" - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) + given = [['git_url']] + models = ['contributors'] - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) + data_tables = ['contributors', 'contributors_aliases', 'contributor_affiliations', + 'issue_events', 'pull_request_events', 'issues', 'message', 'issue_assignees', + 'pull_request_assignees', 'pull_request_reviewers', 'pull_request_meta', 'pull_request_repo'] + operations_tables = ['worker_history', 'worker_job'] - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - # produce our own MetaData object - metadata = MetaData() - helper_metadata = MetaData() + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) - # we can reflect it ourselves from a database, using options - # such as 'only' to limit what tables we look at... - metadata.reflect(self.db, only=['contributors', 'contributors_aliases', 'contributor_affiliations', - 'issue_events', 'pull_request_events', 'issues', 'message', 'issue_assignees', - 'pull_request_assignees', 'pull_request_reviewers', 'pull_request_meta', 'pull_request_repo']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job']) - - # we can then produce a set of mappings from this MetaData. - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - # calling prepare() just sets up mapped classes and relationships. - Base.prepare() - HelperBase.prepare() - - # mapped classes are ready - self.contributors_table = Base.classes.contributors.__table__ - self.contributors_aliases_table = Base.classes.contributors_aliases.__table__ - self.contributor_affiliations_table = Base.classes.contributor_affiliations.__table__ - self.issue_events_table = Base.classes.issue_events.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.issues_table = Base.classes.issues.__table__ - self.message_table = Base.classes.message.__table__ - self.issue_assignees_table = Base.classes.issue_assignees.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5432/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'contributors': - self.contributors_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass + # These 3 are included in every tuple the worker inserts (data collection info) + self.tool_source = 'Contributor Worker' + self.tool_version = '0.0.1' + self.data_source = 'Augur Commit Data' def contributors_model(self, entry_info, repo_id): + # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's + self.logger.info("Querying starting ids info...\n") + + self.cntrb_id_inc = self.get_max_id('contributors', 'cntrb_id') + # Get and insert all users (emails) found by the facade worker self.insert_facade_contributors(entry_info, repo_id) # Get and insert all users github considers to be contributors for this repo - query_github_contributors(self, entry_info, repo_id) + self.query_github_contributors(entry_info, repo_id) - logging.info("Searching users for commits from the facade worker for repo with entry info: {}\n".format(entry_info)) + self.logger.info("Searching users for commits from the facade worker for repo with entry info: {}\n".format(entry_info)) # Get all distinct combinations of emails and names by querying the repo's commits userSQL = s.sql.text(""" @@ -242,7 +103,7 @@ def contributors_model(self, entry_info, repo_id): commit_cntrbs = json.loads(pd.read_sql(userSQL, self.db, \ params={'repo_id': repo_id}).to_json(orient="records")) - logging.info("We found {} distinct emails to search for in this repo (repo_id = {})".format( + self.logger.info("We found {} distinct emails to search for in this repo (repo_id = {})\n".format( len(commit_cntrbs), repo_id)) # For every unique commit contributor info combination... @@ -283,7 +144,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(times_used_tuple)) self.results_counter += 1 - logging.info("Updated cntrb_created_at and cntrb_last_used columns for existing " + self.logger.info("Updated cntrb_created_at and cntrb_last_used columns for existing " "tuple in the contributors table with email: {}\n".format(contributor['commit_email'])) # If cntrb_full_name column is not filled, go ahead and fill it bc we have that info @@ -297,7 +158,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(name_col)) - logging.info("Inserted cntrb_full_name column for existing tuple in the contributors " + self.logger.info("Inserted cntrb_full_name column for existing tuple in the contributors " "table with email: {}\n".format(contributor['cntrb_email'])) # If cntrb_canonical column is not filled, go ahead and fill it w main email bc @@ -312,7 +173,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(canonical_col)) - logging.info("Inserted cntrb_canonical column for existing tuple in the contributors " + self.logger.info("Inserted cntrb_canonical column for existing tuple in the contributors " "table with email: {}\n".format(contributor['cntrb_email'])) @@ -347,20 +208,20 @@ def contributors_model(self, entry_info, repo_id): url = 'https://api.github.com/search/users?q={}+in:email'.format( cmt_cntrb['email']) - logging.info("Hitting endpoint: " + url + " ...\n") + self.logger.info("Hitting endpoint: " + url + " ...\n") r = requests.get(url=url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) results = r.json() # If no matches or bad response, continue with other contributors if 'total_count' not in results: - logging.info("Search query returned an empty response, moving on...\n") + self.logger.info("Search query returned an empty response, moving on...\n") continue if results['total_count'] == 0: - logging.info("Search query did not return any results, moving on...\n") + self.logger.info("Search query did not return any results, moving on...\n") continue - logging.info("When searching for a contributor with info {}, we found the following users: {}\n".format( + self.logger.info("When searching for a contributor with info {}, we found the following users: {}\n".format( cmt_cntrb, results)) # Grab first result and make sure it has the highest match score @@ -370,9 +231,9 @@ def contributors_model(self, entry_info, repo_id): match = item cntrb_url = ("https://api.github.com/users/" + match['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) contributor = r.json() # Fill in all github information @@ -407,11 +268,12 @@ def contributors_model(self, entry_info, repo_id): } result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==self.cntrb_id_inc).values(cntrb_gh_info)) - logging.info("Updated existing tuple in the contributors table with github info after " + self.logger.info("Updated existing tuple in the contributors table with github info after " "a successful search query on a facade commit's author : {} {}\n".format(contributor, cntrb_gh_info)) # Dupe check + self.logger.info('Checking dupes.\n') dupe_cntrb_sql = s.sql.text(""" SELECT contributors.* FROM contributors inner join ( @@ -424,10 +286,23 @@ def contributors_model(self, entry_info, repo_id): dupe_cntrbs = pd.read_sql(dupe_cntrb_sql, self.db, params={}) - # Turn this column from nan to None - dupe_cntrbs['gh_user_id'] = dupe_cntrbs['gh_user_id'].where(pd.notnull(dupe_cntrbs['gh_user_id']), None) + self.logger.info(f'There are {len(dupe_cntrbs)} duplicates.\n') + + # Turn these columns from nan/nat to None + dupe_cntrbs['gh_user_id'] = dupe_cntrbs['gh_user_id'].where( + pd.notnull(dupe_cntrbs['gh_user_id']), None) + dupe_cntrbs['cntrb_last_used'] = dupe_cntrbs['cntrb_last_used'].astype( + object).where(dupe_cntrbs['cntrb_last_used'].notnull(), None) + dupe_cntrbs['cntrb_last_used'] = dupe_cntrbs['cntrb_last_used'].astype( + object).where(dupe_cntrbs['cntrb_last_used'].notnull(), None) for i, cntrb_existing in dupe_cntrbs.iterrows(): + + self.logger.info(f'Processing dupe: {cntrb_existing}.\n') + if i == 0: + self.logger.info('skipping first\n') + continue + cntrb_new = cntrb_existing.copy() del cntrb_new['cntrb_id'] del cntrb_new['data_collection_date'] @@ -447,22 +322,29 @@ def contributors_model(self, entry_info, repo_id): dupe_ids = pd.read_sql(dupe_ids_sql, self.db, params={'pk': pk, \ 'email': cntrb_new['cntrb_email']})['cntrb_id'].values.tolist() - self.map_new_id(self, dupe_ids, pk) + self.map_new_id(dupe_ids, pk) delete_dupe_ids_sql = s.sql.text(""" DELETE FROM contributors WHERE cntrb_id <> {} - AND cntrb_email = '{}' + AND cntrb_email = '{}'; """.format(pk, cntrb_new['cntrb_email'])) - self.db.execute(delete_dupe_ids_sql) + self.logger.info(f'Trying to delete dupes with sql: {delete_dupe_ids_sql}') + + try: + result = self.db.execute(delete_dupe_ids_sql) + except Exception as e: + self.logger.info(f'Deleting dupes failed with error: {e}') + + self.logger.info('Deleted duplicates.\n') # Register this task as completed - register_task_completion(self, entry_info, repo_id, "contributors") + self.register_task_completion(entry_info, repo_id, "contributors") def insert_facade_contributors(self, entry_info, repo_id): - logging.info("Beginning process to insert contributors from facade commits for repo w entry info: {}\n".format(entry_info)) + self.logger.info("Beginning process to insert contributors from facade commits for repo w entry info: {}\n".format(entry_info)) # Get all distinct combinations of emails and names by querying the repo's commits userSQL = s.sql.text(""" @@ -498,7 +380,7 @@ def insert_facade_contributors(self, entry_info, repo_id): """) commit_cntrbs = json.loads(pd.read_sql(userSQL, self.db, params={'repo_id': repo_id}).to_json(orient="records")) - logging.info("We found {} distinct contributors needing insertion (repo_id = {})".format( + self.logger.info("We found {} distinct contributors needing insertion (repo_id = {})".format( len(commit_cntrbs), repo_id)) for cntrb in commit_cntrbs: @@ -511,10 +393,10 @@ def insert_facade_contributors(self, entry_info, repo_id): 'cntrb_full_name': cntrb['name'] } result = self.db.execute(self.contributors_table.insert().values(cntrb_tuple)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: {}\n".format(cntrb['email'])) + self.logger.info("Inserted contributor: {}\n".format(cntrb['email'])) def handle_alias(self, tuple): cntrb_email = tuple['cntrb_email'] # canonical @@ -522,7 +404,7 @@ def handle_alias(self, tuple): cntrb_id = tuple['cntrb_id'] # Check existing contributors table tuple - existing_tuples = retrieve_tuple(self, {'cntrb_email': tuple['commit_email']}, ['contributors']) + existing_tuples = self.retrieve_tuple({'cntrb_email': tuple['commit_email']}, ['contributors']) if len(existing_tuples) == 0: """ Insert alias tuple into the contributor table """ @@ -543,15 +425,15 @@ def handle_alias(self, tuple): del cntrb['cntrb_id'] result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) + self.logger.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) alias_id = self.cntrb_id_inc elif len(existing_tuples) > 1: # fix all dupe references to dupe cntrb ids before we delete them - logging.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the contributors table, we will delete all tuples with this cntrb_email and re-insert only 1\n") - logging.info("For cntrb_email: {}".format(tuple['commit_email'])) + self.logger.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the contributors table, we will delete all tuples with this cntrb_email and re-insert only 1\n") + self.logger.info("For cntrb_email: {}".format(tuple['commit_email'])) """ Insert alias tuple into the contributor table """ @@ -576,7 +458,7 @@ def handle_alias(self, tuple): del cntrb['cntrb_id'] result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) + self.logger.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) alias_id = self.cntrb_id_inc @@ -610,14 +492,14 @@ def handle_alias(self, tuple): try: # Delete all dupes result = self.db.execute(deleteSQL) - logging.info("Deleted all non-canonical contributors with the email: {}\n".format(commit_email)) + self.logger.info("Deleted all non-canonical contributors with the email: {}\n".format(commit_email)) except Exception as e: - logging.info("When trying to delete a duplicate contributor, worker ran into error: {}".format(e)) + self.logger.info("When trying to delete a duplicate contributor, worker ran into error: {}".format(e)) else: #then there would be exactly 1 existing tuple, so that id is the one we want alias_id = existing_tuples[0]['cntrb_id'] - logging.info('Checking canonicals match.\n') + self.logger.info('Checking canonicals match.\n') alias_sql = s.sql.text(""" SELECT * FROM contributors @@ -636,14 +518,14 @@ def handle_alias(self, tuple): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_canonical==canonical_id_result.iloc[0]['cntrb_canonical'] ).values(canonical_col)) - logging.info("Updated cntrb_canonical column for existing tuple in the contributors " + self.logger.info("Updated cntrb_canonical column for existing tuple in the contributors " "table with email: {}\n".format(tuple['cntrb_email'])) # Now check existing alias table tuple - existing_tuples = retrieve_tuple(self, {'alias_email': commit_email}, ['contributors_aliases']) + existing_tuples = self.retrieve_tuple({'alias_email': commit_email}, ['contributors_aliases']) if len(existing_tuples) == 0: - logging.info("Finding cntrb_id for canonical email: {}".format(cntrb_email)) + self.logger.info("Finding cntrb_id for canonical email: {}".format(cntrb_email)) canonical_id_sql = s.sql.text(""" SELECT cntrb_id as canonical_id from contributors @@ -652,7 +534,7 @@ def handle_alias(self, tuple): canonical_id_result = json.loads(pd.read_sql(canonical_id_sql, self.db, params={'email': cntrb_email}).to_json( orient="records")) if len(canonical_id_result) > 1: - logging.info("MORE THAN ONE CANONICAL CONTRIBUTOR found for email: {}".format(cntrb_email)) + self.logger.info("MORE THAN ONE CANONICAL CONTRIBUTOR found for email: {}".format(cntrb_email)) alias_tuple = { 'cntrb_id': canonical_id_result[0]['canonical_id'], 'cntrb_a_id': alias_id, @@ -665,9 +547,9 @@ def handle_alias(self, tuple): } result = self.db.execute(self.contributors_aliases_table.insert().values(alias_tuple)) self.results_counter += 1 - logging.info("Inserted alias with email: {}\n".format(commit_email)) + self.logger.info("Inserted alias with email: {}\n".format(commit_email)) if len(existing_tuples) > 1: - logging.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the alias " + self.logger.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the alias " "table AND NEED TO ADD DELETION LOGIC: {}\n".format(existing_tuples)) def map_new_id(self, dupe_ids, new_id): @@ -693,48 +575,49 @@ def map_new_id(self, dupe_ids, new_id): alias_result = self.db.execute(self.contributors_aliases_table.update().where( self.contributors_aliases_table.c.cntrb_a_id.in_(dupe_ids)).values(alias_update_col)) - logging.info("Updated cntrb_a_id column for tuples in the contributors_aliases table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_a_id column for tuples in the contributors_aliases table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) except Exception as e: - logging.info(f'Alias re-map already done... error: {e}') + self.logger.info(f'Alias re-map already done... error: {e}') issue_events_result = self.db.execute(self.issue_events_table.update().where( self.issue_events_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the issue_events table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the issue_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_events_result = self.db.execute(self.pull_request_events_table.update().where( self.pull_request_events_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the pull_request_events table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the pull_request_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issues_cntrb_result = self.db.execute(self.issues_table.update().where( self.issues_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the issues table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issues_reporter_result = self.db.execute(self.issues_table.update().where( self.issues_table.c.reporter_id.in_(dupe_ids)).values(reporter_col)) - logging.info("Updated reporter_id column in the issues table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated reporter_id column in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issue_assignee_result = self.db.execute(self.issue_assignees_table.update().where( self.issue_assignees_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the issue_assignees table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the issue_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_assignee_result = self.db.execute(self.pull_request_assignees_table.update().where( self.pull_request_assignees_table.c.contrib_id.in_(dupe_ids)).values(pr_assignee_col)) - logging.info("Updated contrib_id column for tuple in the pull_request_assignees table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated contrib_id column for tuple in the pull_request_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) message_result = self.db.execute(self.message_table.update().where( self.message_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the message table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the message table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_reviewers_result = self.db.execute(self.pull_request_reviewers_table.update().where( self.pull_request_reviewers_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_reviewers table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_reviewers table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_meta_result = self.db.execute(self.pull_request_meta_table.update().where( self.pull_request_meta_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_meta table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_meta table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_repo_result = self.db.execute(self.pull_request_repo_table.update().where( self.pull_request_repo_table.c.pr_cntrb_id.in_(dupe_ids)).values(pr_repo_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_repo table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_repo table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info('Done mapping new id.\n') diff --git a/workers/contributor_worker/contributor_worker/__init__.py b/workers/contributor_worker/contributor_worker/__init__.py deleted file mode 100644 --- a/workers/contributor_worker/contributor_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.0.1' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/contributor_worker/contributor_worker/runtime.py b/workers/contributor_worker/contributor_worker/runtime.py deleted file mode 100644 --- a/workers/contributor_worker/contributor_worker/runtime.py +++ /dev/null @@ -1,110 +0,0 @@ -from flask import Flask, jsonify, request -from contributor_worker.worker import ContributorWorker -import click, os, json, logging, requests -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}\n".format(str(request.json))) - app.contributor_worker.task = request.json - - #set task - return jsonify({"success": "sucess"}) - - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": app.contributor_worker._queue, - "tasks": [{ - "given": list(app.contributor_worker._queue) - }] - }) - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.contributor_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51252, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'contributor_worker', None, {}) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.contributor_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "anomaly_days": worker_info['anomaly_days'] if 'anomaly_days' in worker_info else 2, - "training_days": worker_info['training_days'] if 'training_days' in worker_info else 365, - "confidence_interval": worker_info['confidence_interval'] if 'confidence_interval' in worker_info else .95, - "contamination": worker_info['contamination'] if 'contamination' in worker_info else 0.041, - 'metrics': worker_info['metrics'] if 'metrics' in worker_info else {"issues-new": "issues", - "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"} - } - - #create instance of the worker - app.contributor_worker = ContributorWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - print("Starting Flask App on host {} with port {} with pid: ".format(broker_host, worker_port) + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - print("Killing Flask App: {} and telling broker that this worker is disconnected.".format(str(os.getpid()))) - try: - logging.info("Sending disconnected message to broker... @ -> {} with info: {}\n".format('http://{}:{}/api/unstable/workers'.format( - config['broker_host'], config['broker_port']), config)) - requests.post('http://{}:{}/api/unstable/workers/remove'.format( - config['broker_host'], config['broker_port']), json=config) #hello message - except Exception as e: - logging.info("Ran into error: {}".format(e)) - logging.info("Broker's port is busy, worker will not be able to accept tasks, " - "please restart Augur if you want this worker to attempt connection again.") - diff --git a/workers/contributor_worker/runtime.py b/workers/contributor_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/contributor_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.contributor_worker.contributor_worker import ContributorWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ContributorWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/contributor_worker/setup.py b/workers/contributor_worker/setup.py --- a/workers/contributor_worker/setup.py +++ b/workers/contributor_worker/setup.py @@ -31,7 +31,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'contributor_worker_start=contributor_worker.runtime:main', + 'contributor_worker_start=workers.contributor_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/facade_worker/facade_worker/facade00mainprogram.py b/workers/facade_worker/facade_worker/facade00mainprogram.py --- a/workers/facade_worker/facade_worker/facade00mainprogram.py +++ b/workers/facade_worker/facade_worker/facade00mainprogram.py @@ -26,20 +26,8 @@ # repos. It also rebuilds analysis data, checks any changed affiliations and # aliases, and caches data for display. -import pymysql -import sys -import platform -import imp -import time -import datetime -import html.parser -import subprocess -import os -import getopt -import xlsxwriter -import configparser +import pymysql, sys, platform, imp, time, datetime, html.parser, subprocess, os, getopt, xlsxwriter, configparser, logging from multiprocessing import Process, Queue - from facade_worker.facade01config import Config#increment_db, update_db, migrate_database_config, database_connection, get_setting, update_status, log_activity from facade_worker.facade02utilitymethods import update_repo_log, trim_commit, store_working_author, trim_author from facade_worker.facade03analyzecommit import analyze_commit @@ -48,55 +36,45 @@ from facade_worker.facade06analyze import analysis from facade_worker.facade07rebuildcache import nuke_affiliations, fill_empty_affiliations, invalidate_caches, rebuild_unknown_affiliation_and_web_caches -from workers.standard_methods import read_config +from workers.util import read_config +from workers.worker_base import Worker + +html = html.parser.HTMLParser() -import logging +class FacadeWorker(Worker): + def __init__(self, config={}, task=None): + worker_type = "facade_worker" -# if platform.python_implementation() == 'PyPy': -# import pymysql -# else: -# import MySQLdb -# ## End Imports + # Define what this worker can be given and know how to interpret + given = [['repo_group']] + models = ['commits'] -html = html.parser.HTMLParser() + # Define the tables needed to insert, update, or delete on + data_tables = [] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Facade-specific config + self.cfg = Config(self.logger) + + # Define data collection info + self.tool_source = 'Facade Worker' + self.tool_version = '0.0.1' + self.data_source = 'Git Log' -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class FacadeWorker: - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(os.getpid())) - - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.cfg = Config() - - ### The real program starts here ### + def initialize_database_connections(self): # Set up the database - db_user = self.config['user'] - db_pass = self.config['password'] - db_name = self.config['database'] - db_host = self.config['host'] - db_port = self.config['port'] - db_user_people = self.config['user'] - db_pass_people = self.config['password'] - db_name_people = self.config['database'] - db_host_people = self.config['host'] - db_port_people = self.config['port'] + db_user = self.config['user_database'] + db_pass = self.config['password_database'] + db_name = self.config['name_database'] + db_host = self.config['host_database'] + db_port = self.config['port_database'] # Open a general-purpose connection - db,cursor = self.cfg.database_connection( + self.db, self.cursor = self.cfg.database_connection( db_host, db_user, db_pass, @@ -104,157 +82,68 @@ def __init__(self, config, task=None): db_port, False, False) # Open a connection for the people database - db_people,cursor_people = self.cfg.database_connection( - db_host_people, - db_user_people, - db_pass_people, - db_name_people, - db_port_people, True, False) + self.db_people,self.cursor_people = self.cfg.database_connection( + db_host, + db_user, + db_pass, + db_name, + db_port, True, False) # Check if the database is current and update it if necessary try: - current_db = int(self.cfg.get_setting('database_version')) + self.current_db = int(self.cfg.get_setting('database_version')) except: # Catch databases which existed before database versioning - current_db = -1 - - #WHAT IS THE UPSTREAM_DB??? - # if current_db < upstream_db: - - # print(("Current database version: %s\nUpstream database version %s\n" % - # (current_db, upstream_db))) - - # self.cfg.update_db(current_db); + self.current_db = -1 - self.commit_model() - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - rg_id = value['given']['repo_group_id'] - - """ Query all repos """ - # repoUrlSQL = s.sql.text(""" - # SELECT repo_id,repo_group_id,repo_git FROM repo WHERE repo_group_id = '{}' - # """.format(rg_id)) - # rs = pd.read_sql(repoUrlSQL, self.db, params={}) - try: - if value['job_type'] == "UPDATE": - self._queue.put(CollectorTask(message_type='TASK', entry_info=value)) - elif value['job_type'] == "MAINTAIN": - self._maintain_queue.put(CollectorTask(message_type='TASK', entry_info=value)) - - except Exception as e: - logging.info("error: {}".format(e)) - - self._task = CollectorTask(message_type='TASK', entry_info={"task": value, "repo_id": repo_id}) - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...") - if self._child is None: - self._child = Process(target=self.collect, args=()) - self._child.start() - def collect(self): """ Function to process each entry in the worker's task queue Determines what action to take based off the message type """ + self.initialize_logging() # need to initialize logging again in child process cause multiprocessing + self.logger.info("Starting data collection process\n") + self.initialize_database_connections() while True: - time.sleep(0.5) if not self._queue.empty(): - message = self._queue.get() - logging.info("Popped off message: {}".format(message.entry_info)) - self.working_on = "UPDATE" + message = self._queue.get() # Get the task off our MP queue else: - if not self._maintain_queue.empty(): - message = self._maintain_queue.get() - logging.info("Popped off message: {}".format(message.entry_info)) - self.working_on = "MAINTAIN" - else: - break - - if message.type == 'EXIT': + break + self.logger.info("Popped off message: {}\n".format(str(message))) + + if message['job_type'] == 'STOP': break - if message.type != 'TASK': - raise ValueError(f'{message.type} is not a recognized task type') - - if message.type == 'TASK': - try: - git_url = message.entry_info['task']['given']['git_url'] - self.query_issues({'git_url': git_url, 'repo_id': message.entry_info['repo_id']}) - except Exception as e: - logging.info("Worker ran into an error for task: {}\n".format(message.entry_info['task'])) - logging.info("Error encountered: " + repr(e) + "\n") - logging.info("Notifying broker and logging task failure in database...\n") - message.entry_info['task']['worker_id'] = self.config['id'] - requests.post("http://{}:{}/api/unstable/task_error".format( - self.config['broker_host'],self.config['broker_port']), json=message.entry_info['task']) - # Add to history table - task_history = { - "repo_id": message.entry_info['repo_id'], - "worker": self.config['id'], - "job_model": message.entry_info['task']['models'][0], - "oauth_id": self.config['zombie_id'], - "timestamp": datetime.datetime.now(), - "status": "Error", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job error for: " + str(message.entry_info['task']) + "\n") - - # Update job process table - updated_job = { - "since_id_str": message.entry_info['repo_id'], - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==message.entry_info['task']['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + message.entry_info['task']['models'][0] + "\n") - - # Reset results counter for next task - self.results_counter = 0 - pass - - def commit_model(self): + # If task is not a valid job type + if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': + raise ValueError('{} is not a recognized task type'.format(message['job_type'])) + pass + try: + self.commits_model(message) + except Exception as e: + self.logger.error(e) + raise(e) + break + + def commits_model(self, message): # Figure out what we need to do - limited_run = read_config("Facade", name="limited_run", default=0) - delete_marked_repos = read_config("Facade", name="delete_marked_repos", default=0) - pull_repos = read_config("Facade", name="pull_repos", default=0) - clone_repos = read_config("Facade", name="clone_repos", default=1) - check_updates = read_config("Facade", name="check_updates", default=0) - force_updates = read_config("Facade", name="force_updates", default=0) - run_analysis = read_config("Facade", name="run_analysis", default=0) - force_analysis = read_config("Facade", name="force_analysis", default=0) - nuke_stored_affiliations = read_config("Facade", name="nuke_stored_affiliations", default=0) - fix_affiliations = read_config("Facade", name="fix_affiliations", default=1) - force_invalidate_caches = read_config("Facade", name="force_invalidate_caches", default=0) - rebuild_caches = read_config("Facade", name="rebuild_caches", default=1) #if abs((datetime.datetime.strptime(self.cfg.get_setting('aliases_processed')[:-3], + limited_run = self.augur_config.get_value("Facade", "limited_run") + delete_marked_repos = self.augur_config.get_value("Facade", "delete_marked_repos") + pull_repos = self.augur_config.get_value("Facade", "pull_repos") + clone_repos = self.augur_config.get_value("Facade", "clone_repos") + check_updates = self.augur_config.get_value("Facade", "check_updates") + force_updates = self.augur_config.get_value("Facade", "force_updates") + run_analysis = self.augur_config.get_value("Facade", "run_analysis") + force_analysis = self.augur_config.get_value("Facade", "force_analysis") + nuke_stored_affiliations = self.augur_config.get_value("Facade", "nuke_stored_affiliations") + fix_affiliations = self.augur_config.get_value("Facade", "fix_affiliations") + force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches") + rebuild_caches = self.augur_config.get_value("Facade", "rebuild_caches") #if abs((datetime.datetime.strptime(self.cfg.get_setting('aliases_processed')[:-3], # '%Y-%m-%d %I:%M:%S.%f') - datetime.datetime.now()).total_seconds()) // 3600 > int(self.cfg.get_setting( # 'update_frequency')) else 0 - force_invalidate_caches = read_config("Facade", name="force_invalidate_caches", default=0) - create_xlsx_summary_files = read_config("Facade", name="create_xlsx_summary_files", default=0) - multithreaded = read_config("Facade", name="multithreaded", default=1) + force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches") + create_xlsx_summary_files = self.augur_config.get_value("Facade", "create_xlsx_summary_files") + multithreaded = self.augur_config.get_value("Facade", "multithreaded") opts,args = getopt.getopt(sys.argv[1:],'hdpcuUaAmnfIrx') for opt in opts: @@ -355,9 +244,9 @@ def commit_model(self): if len(repo_base_directory) == 0: self.cfg.log_activity('Error','No base directory. It is unsafe to continue.') - update_status('Failed: No base directory') + self.cfg.update_status('Failed: No base directory') sys.exit(1) - + # Begin working start_time = time.time() diff --git a/workers/facade_worker/facade_worker/facade01config.py b/workers/facade_worker/facade_worker/facade01config.py --- a/workers/facade_worker/facade_worker/facade01config.py +++ b/workers/facade_worker/facade_worker/facade01config.py @@ -39,15 +39,15 @@ import json import logging -from workers.standard_methods import read_config - +from workers.util import read_config class Config: - def __init__(self): + def __init__(self, logger): self.upstream_db = 7 self.cursor = None self.cursor_people = None + self.logger = logger self.db = None self.db_people = None @@ -199,7 +199,7 @@ def log_activity(self, level, status): # "Debug", then just print it and don't save it in the database. log_options = ('Error','Quiet','Info','Verbose','Debug') - logging.info("* %s\n" % status) + self.logger.info("* %s\n" % status) if self.log_level == 'Debug' and level == 'Debug': return @@ -209,7 +209,7 @@ def log_activity(self, level, status): self.cursor.execute(query, (level, status)) self.db.commit() except Exception as e: - logging.info('Error encountered: {}\n'.format(e)) + self.logger.info('Error encountered: {}\n'.format(e)) # Set up the database db_user = read_config('Database', 'user', 'AUGUR_DB_USER', 'augur') diff --git a/workers/facade_worker/facade_worker/facade03analyzecommit.py b/workers/facade_worker/facade_worker/facade03analyzecommit.py --- a/workers/facade_worker/facade_worker/facade03analyzecommit.py +++ b/workers/facade_worker/facade_worker/facade03analyzecommit.py @@ -38,7 +38,7 @@ import configparser import traceback -from workers.standard_methods import read_config +from workers.util import read_config def analyze_commit(cfg, repo_id, repo_loc, commit, multithreaded): diff --git a/workers/facade_worker/facade_worker/facade07rebuildcache.py b/workers/facade_worker/facade_worker/facade07rebuildcache.py --- a/workers/facade_worker/facade_worker/facade07rebuildcache.py +++ b/workers/facade_worker/facade_worker/facade07rebuildcache.py @@ -156,7 +156,6 @@ def discover_null_affiliations(attribution,email): cfg.log_activity('Debug','Found domain match for %s' % email) - # try: for match in matches: update = ("UPDATE commits " "SET cmt_%s_affiliation = %%s " @@ -164,7 +163,6 @@ def discover_null_affiliations(attribution,email): "AND cmt_%s_affiliation IS NULL " "AND cmt_%s_date::date >= %%s::date" % (attribution, attribution, attribution, attribution)) - #"AND cmt_%s_date >= TO_TIMESTAMP(%%s, 'YYYY-MM-DD')" % cfg.log_activity('Info', 'attr: {} \nmatch:{}\nsql: {}'.format(attribution, match, update)) @@ -175,15 +173,6 @@ def discover_null_affiliations(attribution,email): cfg.log_activity('Info', 'Error encountered: {}'.format(e)) cfg.log_activity('Info', 'Affiliation insertion failed for %s ' % email) - # except Exception as e: - # cfg.log_activity('Info', '1st Error encountered: {}'.format(e)) - # cfg.log_activity('Info', 'Attribution matching failed for %s ' % email) - # except Exception as e: - # logging.info('2nd Error encountered: {}'.format(e)) - # cfg.log_activity('Info', 'Attribution matching failed and exception logging failed') - # else: - # cfg.log_activity('Info', 'Attribution matching failed and exception logging failed and the exception to the exception failed.') - def discover_alias(email): # Match aliases with their canonical email diff --git a/workers/facade_worker/facade_worker/runtime.py b/workers/facade_worker/facade_worker/runtime.py --- a/workers/facade_worker/facade_worker/runtime.py +++ b/workers/facade_worker/facade_worker/runtime.py @@ -1,102 +1,23 @@ from flask import Flask, jsonify, request, Response import click, os, json, requests, logging -from facade_worker.facade00mainprogram import FacadeWorker -from workers.standard_methods import read_config +from workers.facade_worker.facade_worker.facade00mainprogram import FacadeWorker +from workers.util import create_server, WorkerGunicornApplication -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(request.json)) - app.facade_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.facade_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51258, help='Port') -def main(augur_url, host, port): +def main(): """ Declares singular worker and creates the server and flask app that it will be running on """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'facade_worker', None, None) - worker_port = worker_info['port'] if 'port' in worker_info else port - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - config = { - "id": "com.augurlabs.core.facade_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } + app = Flask(__name__) + app.worker = FacadeWorker() - #create instance of the worker - app.facade_worker = FacadeWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") + create_server(app) + WorkerGunicornApplication(app).run() - app.run(debug=app.debug, host=host, port=worker_port) + if app.worker._child is not None: + app.worker._child.terminate() try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) except: pass - logging.info("Killing Flask App: " + str(os.getpid())) + os.kill(os.getpid(), 9) - diff --git a/workers/facade_worker/setup.py b/workers/facade_worker/setup.py --- a/workers/facade_worker/setup.py +++ b/workers/facade_worker/setup.py @@ -30,7 +30,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'facade_worker_start=facade_worker.runtime:main', + 'facade_worker_start=workers.facade_worker.facade_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/github_worker/github_worker/worker.py b/workers/github_worker/github_worker.py similarity index 56% rename from workers/github_worker/github_worker/worker.py rename to workers/github_worker/github_worker.py --- a/workers/github_worker/github_worker/worker.py +++ b/workers/github_worker/github_worker.py @@ -2,217 +2,61 @@ from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData import requests, time, logging, json, os from datetime import datetime -from sqlalchemy.ext.declarative import declarative_base -from workers.standard_methods import * +from workers.worker_base import Worker -class GitHubWorker: +class GitHubWorker(Worker): """ Worker that collects data from the Github API and stores it in our database task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - - self._task = task # task currently being worked on (dict) - self._child = None # process of currently running task (multiprocessing process) - self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes) - self.db = None # sql alchemy db session + def __init__(self, config={}): - # These 3 are included in every tuple the worker inserts (data collection info) - self.tool_source = 'GitHub API Worker' - self.tool_version = '0.0.3' # See __init__.py - self.data_source = 'GitHub API' - - self.results_counter = 0 # count of tuples inserted in the database (to store stats for each task in op tables) - self.finishing_task = True # if we are finishing a previous task, pagination works differenty - - self.specs = { - "id": self.config['id'], # what the broker knows this worker as - "location": self.config['location'], # host + port worker is running on (so broker can send tasks here) - "qualifications": [ - { - "given": [["github_url"]], # type of repo this worker can be given as a task - "models":["issues"] # models this worker can fill for a repo as a task - } - ], - "config": [self.config] - } - - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - # Create an sqlalchemy engine for both database schemas - logging.info("Making database connections... {}".format(DB_STR)) - db_schema = 'augur_data' - self.db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(db_schema)}) + worker_type = 'github_worker' - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) + given = [['github_url']] + models = ['issues'] - metadata = MetaData() - helper_metadata = MetaData() - - # Reflect only the tables we will use for each schema's metadata object - metadata.reflect(self.db, only=['contributors', 'issues', 'issue_labels', 'message', + data_tables = ['contributors', 'issues', 'issue_labels', 'message', 'issue_message_ref', 'issue_events','issue_assignees','contributors_aliases', 'pull_request_assignees', 'pull_request_events', 'pull_request_reviewers', 'pull_request_meta', - 'pull_request_repo']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - # So we can access all our tables when inserting, updating, etc - self.contributors_table = Base.classes.contributors.__table__ - self.issues_table = Base.classes.issues.__table__ - self.issue_labels_table = Base.classes.issue_labels.__table__ - self.issue_events_table = Base.classes.issue_events.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.message_table = Base.classes.message.__table__ - self.issues_message_ref_table = Base.classes.issue_message_ref.__table__ - self.issue_assignees_table = Base.classes.issue_assignees.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.contributors_aliases_table = Base.classes.contributors_aliases.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's - logging.info("Querying starting ids info...\n") - - self.issue_id_inc = get_max_id(self, 'issues', 'issue_id') - - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') + 'pull_request_repo'] + operations_tables = ['worker_history', 'worker_job'] - self.msg_id_inc = get_max_id(self, 'message', 'msg_id') - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) + # These 3 are included in every tuple the worker inserts (data collection info) + self.tool_source = 'GitHub API Worker' + self.tool_version = '0.0.3' # See __init__.py + self.data_source = 'GitHub API' - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5433/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - # If the task has one of our "valid" job types - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - # Setting that causes paginating through ALL pages, not just unknown ones - # This setting is set by the housekeeper and is attached to the task before it gets sent here - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - - self._task = value - self.run() + self.finishing_task = True # if we are finishing a previous task, pagination works differenty + self.platform_id = 25150 # GitHub - def cancel(self): - """ Delete/cancel current task - """ - self._task = None + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - # Spawn a subprocess to handle message reading and performing the tasks - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['github_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'issues': - self.issues_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass def issues_model(self, entry_info, repo_id): """ Data collection function Query the GitHub API for issues """ + + # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's + self.logger.info("Querying starting ids info...\n") + + self.issue_id_inc = self.get_max_id('issues', 'issue_id') + + self.msg_id_inc = self.get_max_id('message', 'msg_id') github_url = entry_info['given']['github_url'] - logging.info("Beginning filling the issues model for repo: " + github_url + "\n") - record_model_process(self, repo_id, 'issues') + self.logger.info("Beginning filling the issues model for repo: " + github_url + "\n") # Contributors are part of this model, and finding all for the repo saves us # from having to add them as we discover committers in the issue process - query_github_contributors(self, entry_info, repo_id) + self.query_github_contributors(entry_info, repo_id) # Extract the owner/repo for the endpoint path = urlparse(github_url) @@ -238,14 +82,14 @@ def issues_model(self, entry_info, repo_id): duplicate_col_map = {'gh_issue_id': 'id'} #list to hold issues needing insertion - issues = paginate(self, issues_url, duplicate_col_map, update_col_map, table, table_pkey, + issues = self.paginate(issues_url, duplicate_col_map, update_col_map, table, table_pkey, 'WHERE repo_id = {}'.format(repo_id)) # Discover and remove duplicates before we start inserting - logging.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") + self.logger.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") for issue_dict in issues: - logging.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") + self.logger.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") # Add the FK repo_id to the dict being inserted issue_dict['repo_id'] = repo_id @@ -253,17 +97,17 @@ def issues_model(self, entry_info, repo_id): # Figure out if this issue is a PR # still unsure about this key value pair/what it means pr_id = None - if "pull_request" in issue_dict: - logging.info("Issue is a PR\n") + if 'pull_request' in issue_dict: + self.logger.info("Issue is a PR\n") # Right now we are just storing our issue id as the PR id if it is one pr_id = self.issue_id_inc else: - logging.info("Issue is not a PR\n") + self.logger.info("Issue is not a PR\n") # Begin on the actual issue... issue = { "repo_id": issue_dict['repo_id'], - "reporter_id": find_id_from_login(self, issue_dict['user']['login']), + "reporter_id": self.find_id_from_login(issue_dict['user']['login']), "pull_request": pr_id, "pull_request_id": pr_id, "created_at": issue_dict['created_at'], @@ -292,20 +136,20 @@ def issues_model(self, entry_info, repo_id): if issue_dict['flag'] == 'need_update': result = self.db.execute(self.issues_table.update().where( self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) - logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( + self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( issue_dict['id'])) self.issue_id_inc = issue_dict['pkey'] elif issue_dict['flag'] == 'need_insertion': try: result = self.db.execute(self.issues_table.insert().values(issue)) - logging.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) self.results_counter += 1 self.issue_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + + self.logger.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + " and title of: {} and gh_issue_num of: {}\n".format(issue_dict['title'],issue_dict['number'])) except Exception as e: - logging.info("When inserting an issue, ran into the following error: {}\n".format(e)) - logging.info(issue) + self.logger.info("When inserting an issue, ran into the following error: {}\n".format(e)) + self.logger.info(issue) continue # Check if the assignee key's value is already recorded in the assignees key's value @@ -316,13 +160,13 @@ def issues_model(self, entry_info, repo_id): # Handles case if there are no assignees if collected_assignees[0] is not None: - logging.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") + self.logger.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") for assignee_dict in collected_assignees: if type(assignee_dict) != dict: continue assignee = { "issue_id": self.issue_id_inc, - "cntrb_id": find_id_from_login(self, assignee_dict['login']), + "cntrb_id": self.find_id_from_login(assignee_dict['login']), "tool_source": self.tool_source, "tool_version": self.tool_version, "data_source": self.data_source, @@ -331,13 +175,13 @@ def issues_model(self, entry_info, repo_id): } # Commit insertion to the assignee table result = self.db.execute(self.issue_assignees_table.insert().values(assignee)) - logging.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + + self.logger.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + " with login/cntrb_id: " + assignee_dict['login'] + " " + str(assignee['cntrb_id']) + "\n") else: - logging.info("Issue does not have any assignees\n") + self.logger.info("Issue does not have any assignees\n") # Insert the issue labels to the issue_labels table for label_dict in issue_dict['labels']: @@ -357,10 +201,10 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issue_labels_table.insert().values(label)) - logging.info("Primary key inserted into the issue_labels table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issue_labels table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted issue label with text: " + label_dict['name'] + "\n") + self.logger.info("Inserted issue label with text: " + label_dict['name'] + "\n") #### Messages/comments and events insertion @@ -375,19 +219,19 @@ def issues_model(self, entry_info, repo_id): duplicate_col_map = {'msg_timestamp': 'created_at'} #list to hold contributors needing insertion or update - issue_comments = paginate(self, comments_url, duplicate_col_map, update_col_map, table, table_pkey, + issue_comments = self.paginate(comments_url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="WHERE msg_id IN (SELECT msg_id FROM issue_message_ref WHERE issue_id = {})".format( self.issue_id_inc)) - logging.info("Number of comments needing insertion: {}\n".format(len(issue_comments))) + self.logger.info("Number of comments needing insertion: {}\n".format(len(issue_comments))) for comment in issue_comments: try: - commenter_cntrb_id = find_id_from_login(self, comment['user']['login']) + commenter_cntrb_id = self.find_id_from_login(comment['user']['login']) except: commenter_cntrb_id = None issue_comment = { - "pltfrm_id": 25150, + "pltfrm_id": self.platform_id, "msg_text": comment['body'], "msg_timestamp": comment['created_at'], "cntrb_id": commenter_cntrb_id, @@ -397,13 +241,13 @@ def issues_model(self, entry_info, repo_id): } try: result = self.db.execute(self.message_table.insert().values(issue_comment)) - logging.info("Primary key inserted into the message table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the message table: {}".format(result.inserted_primary_key)) self.results_counter += 1 self.msg_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted issue comment with id: {}\n".format(self.msg_id_inc)) + self.logger.info("Inserted issue comment with id: {}\n".format(self.msg_id_inc)) except Exception as e: - logging.info("Worker ran into error when inserting a message, likely had invalid characters. error: {}".format(e)) + self.logger.info("Worker ran into error when inserting a message, likely had invalid characters. error: {}".format(e)) ### ISSUE MESSAGE REF TABLE ### @@ -417,8 +261,8 @@ def issues_model(self, entry_info, repo_id): "issue_msg_ref_src_node_id": comment['node_id'] } - result = self.db.execute(self.issues_message_ref_table.insert().values(issue_message_ref)) - logging.info("Primary key inserted into the issue_message_ref table: {}".format(result.inserted_primary_key)) + result = self.db.execute(self.issue_message_ref_table.insert().values(issue_message_ref)) + self.logger.info("Primary key inserted into the issue_message_ref table: {}".format(result.inserted_primary_key)) self.results_counter += 1 # Base of the url for event endpoints @@ -434,7 +278,7 @@ def issues_model(self, entry_info, repo_id): pseudo_key_gh = 'url' pseudo_key_augur = 'node_url' table = 'issue_events' - event_table_values = get_table_values(self, [pseudo_key_augur], [table], "WHERE issue_id = {}".format(self.issue_id_inc)) + event_table_values = self.get_table_values([pseudo_key_augur], [table], "WHERE issue_id = {}".format(self.issue_id_inc)) # Paginate backwards through all the events but get first page in order # to determine if there are multiple pages and if the 1st page covers all @@ -442,29 +286,29 @@ def issues_model(self, entry_info, repo_id): multiple_pages = False while True: - logging.info("Hitting endpoint: " + events_url.format(i) + " ...\n") + self.logger.info("Hitting endpoint: " + events_url.format(i) + " ...\n") r = requests.get(url=events_url.format(i), headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) # Find last page so we can decrement from there if 'last' in r.links and not multiple_pages and not self.finishing_task: param = r.links['last']['url'][-6:] i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") + self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n") multiple_pages = True elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") + self.logger.info("Only 1 page of request\n") elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ... " + self.logger.info("Finishing a previous task, paginating forwards ... " "excess rate limit requests will be made\n") j = r.json() # Checking contents of requests with what we already have in the db - new_events = check_duplicates(j, event_table_values, pseudo_key_gh) + new_events = self.check_duplicates(j, event_table_values, pseudo_key_gh) if len(new_events) == 0 and multiple_pages and 'last' in r.links: if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown events, breaking from pagination.\n") + self.logger.info("No more pages with unknown events, breaking from pagination.\n") break elif len(new_events) != 0: to_add = [obj for obj in new_events if obj not in issue_events] @@ -474,29 +318,29 @@ def issues_model(self, entry_info, repo_id): # Since we already wouldve checked the first page... break if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") + self.logger.info("No more pages to check, breaking from pagination.\n") break - logging.info("Number of events needing insertion: " + str(len(issue_events)) + "\n") + self.logger.info("Number of events needing insertion: " + str(len(issue_events)) + "\n") # If the issue is closed, then we search for the closing event and store the user's id cntrb_id = None if 'closed_at' in issue_dict: for event in issue_events: if str(event['event']) != "closed": - logging.info("not closed, continuing") + self.logger.info("not closed, continuing") continue if not event['actor']: continue - cntrb_id = find_id_from_login(self, event['actor']['login']) + cntrb_id = self.find_id_from_login(event['actor']['login']) if cntrb_id is not None: break # Need to hit this single contributor endpoint to get extra created at data... cntrb_url = ("https://api.github.com/users/" + event['actor']['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) contributor = r.json() company = None @@ -543,20 +387,17 @@ def issues_model(self, entry_info, repo_id): # Commit insertion to table result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format( + self.logger.info("Primary key inserted into the contributors table: {}".format( result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - # Increment our global track of the cntrb id for the possibility of it being used as a FK - self.cntrb_id_inc = int(result.inserted_primary_key[0]) + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") for event in issue_events: if event['actor'] is not None: - event['cntrb_id'] = find_id_from_login(self, event['actor']['login']) + event['cntrb_id'] = self.find_id_from_login(event['actor']['login']) if event['cntrb_id'] is None: - logging.info("SOMETHING WRONG WITH FINDING ID FROM LOGIN") + self.logger.info("SOMETHING WRONG WITH FINDING ID FROM LOGIN") continue # event['cntrb_id'] = None else: @@ -578,10 +419,10 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issue_events_table.insert().values(issue_event)) - logging.info("Primary key inserted into the issue_events table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issue_events table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted issue event: " + event['event'] + " for issue id: {}\n".format(self.issue_id_inc)) + self.logger.info("Inserted issue event: " + event['event'] + " for issue id: {}\n".format(self.issue_id_inc)) if cntrb_id is not None: update_closing_cntrb = { @@ -589,11 +430,11 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issues_table.update().where( self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) - logging.info("Updated tuple in the issues table with contributor that closed it, issue_id: {}\n".format( + self.logger.info("Updated tuple in the issues table with contributor that closed it, issue_id: {}\n".format( issue_dict['id'])) self.issue_id_inc += 1 #Register this task as completed - register_task_completion(self, entry_info, repo_id, "issues") + self.register_task_completion(entry_info, repo_id, "issues") diff --git a/workers/github_worker/github_worker/__init__.py b/workers/github_worker/github_worker/__init__.py deleted file mode 100644 --- a/workers/github_worker/github_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.1.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/github_worker/github_worker/runtime.py b/workers/github_worker/github_worker/runtime.py deleted file mode 100644 --- a/workers/github_worker/github_worker/runtime.py +++ /dev/null @@ -1,107 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from github_worker.worker import GitHubWorker -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.github_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.github_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51236, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'github_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: # for multiple instances of workers - try: # trying each port for an already-alive worker until a free port is found - print("New github worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.github_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - app.github_worker = GitHubWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.github_worker._child is not None: - app.github_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/github_worker/runtime.py b/workers/github_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/github_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.github_worker.github_worker import GitHubWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitHubWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/github_worker/setup.py b/workers/github_worker/setup.py --- a/workers/github_worker/setup.py +++ b/workers/github_worker/setup.py @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'github_worker_start=github_worker.runtime:main', + 'github_worker_start=workers.github_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/insight_worker/insight_worker/__init__.py b/workers/insight_worker/__init__.py similarity index 100% rename from workers/insight_worker/insight_worker/__init__.py rename to workers/insight_worker/__init__.py diff --git a/workers/insight_worker/insight_worker/worker.py b/workers/insight_worker/insight_worker.py similarity index 80% rename from workers/insight_worker/insight_worker/worker.py rename to workers/insight_worker/insight_worker.py --- a/workers/insight_worker/insight_worker/worker.py +++ b/workers/insight_worker/insight_worker.py @@ -10,179 +10,55 @@ import scipy.stats import datetime from sklearn.ensemble import IsolationForest -from workers.standard_methods import * #init_oauths, get_table_values, get_max_id, register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process, paginate +from workers.worker_base import Worker import warnings warnings.filterwarnings('ignore') -class InsightWorker: +class InsightWorker(Worker): """ Worker that detects anomalies on a select few of our metrics task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self._task = task - self._child = None - self._queue = Queue() - self.db = None + def __init__(self, config={}): + + worker_type = "insight_worker" + + given = [['git_url']] + models = ['insights'] + + data_tables = ['chaoss_metric_status', 'repo_insights', 'repo_insights_records'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({ + 'api_host': self.augur_config.get_value('Server', 'host'), + 'api_port': self.augur_config.get_value('Server', 'port') + }) + + # These 3 are included in every tuple the worker inserts (data collection info) self.tool_source = 'Insight Worker' self.tool_version = '0.0.3' # See __init__.py self.data_source = 'Augur API' + self.refresh = True self.send_insights = True - self.finishing_task = False self.anomaly_days = self.config['anomaly_days'] self.training_days = self.config['training_days'] self.contamination = self.config['contamination'] self.confidence = self.config['confidence_interval'] / 100 self.metrics = self.config['metrics'] - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["insights"] - } - ], - "config": [self.config] - } - - self.results_counter = 0 - - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - # produce our own MetaData object - metadata = MetaData() - helper_metadata = MetaData() - - # we can reflect it ourselves from a database, using options - # such as 'only' to limit what tables we look at... - metadata.reflect(self.db, only=['chaoss_metric_status', 'repo_insights', 'repo_insights_records']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job']) - - # we can then produce a set of mappings from this MetaData. - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - # calling prepare() just sets up mapped classes and relationships. - Base.prepare() - HelperBase.prepare() - - # mapped classes are ready - self.chaoss_metric_status_table = Base.classes['chaoss_metric_status'].__table__ - self.repo_insights_table = Base.classes['repo_insights'].__table__ - self.repo_insights_records_table = Base.classes['repo_insights_records'].__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5432/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'insights': - self.insights_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - def insights_model(self, entry_info, repo_id): logging.info("Discovering insights for task with entry info: {}\n".format(entry_info)) - record_model_process(self, repo_id, 'insights') """ Collect data """ base_url = 'http://{}:{}/api/unstable/repo-groups/9999/repos/{}/'.format( - self.config['broker_host'], self.config['broker_port'], repo_id) + self.config['api_host'], self.config['api_port'], repo_id) # Dataframe to hold all endpoint results # Subtract configurable amount of time @@ -218,7 +94,7 @@ def insights_model(self, entry_info, repo_id): # If none of the endpoints returned data if df.size == 0: logging.info("None of the provided endpoints provided data for this repository. Anomaly detection is 'done'.\n") - register_task_completion(self, entry_info, repo_id, "insights") + self.register_task_completion(entry_info, repo_id, "insights") return """ Deletion of old insights """ @@ -258,7 +134,7 @@ def insights_model(self, entry_info, repo_id): result = self.db.execute(delete_points_SQL, repo_id=repo_id, min_date=min_date) # get table values to check for dupes later on - insight_table_values = get_table_values(self, ['*'], ['repo_insights_records'], where_clause="WHERE repo_id = {}".format(repo_id)) + insight_table_values = self.get_table_values(['*'], ['repo_insights_records'], where_clause="WHERE repo_id = {}".format(repo_id)) to_model_columns = df.columns[0:len(self.metrics)+1] @@ -415,7 +291,7 @@ def classify_anomalies(df,metric): logging.info("error occurred while storing datapoint: {}\n".format(repr(e))) break - register_task_completion(self, entry_info, repo_id, "insights") + self.register_task_completion(entry_info, repo_id, "insights") def confidence_interval_insights(self, entry_info): """ Anomaly detection method based on confidence intervals @@ -423,7 +299,6 @@ def confidence_interval_insights(self, entry_info): # Update table of endpoints before we query them all logging.info("Discovering insights for task with entry info: {}".format(entry_info)) - record_model_process(self, repo_id, 'insights') # Set the endpoints we want to discover insights for endpoints = [{'cm_info': "issues-new"}, {'cm_info': "code-changes"}, {'cm_info': "code-changes-lines"}, @@ -445,10 +320,10 @@ def confidence_interval_insights(self, entry_info): # If we are discovering insights for a group vs repo, the base url will change if 'repo_group_id' in entry_info and 'repo_id' not in entry_info: base_url = 'http://{}:{}/api/unstable/repo-groups/{}/'.format( - self.config['broker_host'],self.config['broker_port'], entry_info['repo_group_id']) + self.config['api_host'],self.config['api_port'], entry_info['repo_group_id']) else: base_url = 'http://{}:{}/api/unstable/repo-groups/9999/repos/{}/'.format( - self.config['broker_host'],self.config['broker_port'], repo_id) + self.config['api_host'],self.config['api_port'], repo_id) # Hit and discover insights for every endpoint we care about for endpoint in endpoints: @@ -610,50 +485,6 @@ def is_unique_key(key): self.register_task_completion(entry_info, "insights") - def register_task_completion(self, entry_info, model): - # Task to send back to broker - task_completed = { - 'worker_id': self.config['id'], - 'job_type': entry_info['job_type'], - 'repo_id': repo_id, - 'git_url': entry_info['git_url'] - } - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.config['zombie_id'], - "timestamp": datetime.datetime.now(), - "status": "Success", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job completion for: " + str(task_completed) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where( - self.job_table.c.job_model==model).values(updated_job)) - logging.info("Update job process for model: " + model + "\n") - - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - - # Reset results counter for next task - self.results_counter = 0 - def send_insight(self, insight, units_from_mean): try: repoSQL = s.sql.text(""" @@ -821,9 +652,9 @@ def confidence_interval(self, data, timeperiod='week', confidence=.95): def update_metrics(self): logging.info("Preparing to update metrics ...\n\n" + "Hitting endpoint: http://{}:{}/api/unstable/metrics/status ...\n".format( - self.config['broker_host'],self.config['broker_port'])) + self.config['api_host'],self.config['api_port'])) r = requests.get(url='http://{}:{}/api/unstable/metrics/status'.format( - self.config['broker_host'],self.config['broker_port'])) + self.config['api_host'],self.config['api_port'])) data = r.json() active_metrics = [metric for metric in data if metric['backend_status'] == 'implemented'] diff --git a/workers/insight_worker/insight_worker/runtime.py b/workers/insight_worker/insight_worker/runtime.py deleted file mode 100644 --- a/workers/insight_worker/insight_worker/runtime.py +++ /dev/null @@ -1,110 +0,0 @@ -from flask import Flask, jsonify, request -from insight_worker.worker import InsightWorker -import click, os, json, logging, requests -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}\n".format(str(request.json))) - app.insight_worker.task = request.json - - #set task - return jsonify({"success": "sucess"}) - - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": app.insight_worker._queue, - "tasks": [{ - "given": list(app.insight_worker._queue) - }] - }) - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.insight_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51252, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'insight_worker', None, {}) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.insight_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "anomaly_days": worker_info['anomaly_days'] if 'anomaly_days' in worker_info else 2, - "training_days": worker_info['training_days'] if 'training_days' in worker_info else 365, - "confidence_interval": worker_info['confidence_interval'] if 'confidence_interval' in worker_info else .95, - "contamination": worker_info['contamination'] if 'contamination' in worker_info else 0.041, - 'metrics': worker_info['metrics'] if 'metrics' in worker_info else {"issues-new": "issues", - "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"} - } - - #create instance of the worker - app.insight_worker = InsightWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - print("Starting Flask App on host {} with port {} with pid: ".format(broker_host, worker_port) + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - print("Killing Flask App: {} and telling broker that this worker is disconnected.".format(str(os.getpid()))) - try: - logging.info("Sending disconnected message to broker... @ -> {} with info: {}\n".format('http://{}:{}/api/unstable/workers'.format( - config['broker_host'], config['broker_port']), config)) - requests.post('http://{}:{}/api/unstable/workers/remove'.format( - config['broker_host'], config['broker_port']), json=config) #hello message - except Exception as e: - logging.info("Ran into error: {}".format(e)) - logging.info("Broker's port is busy, worker will not be able to accept tasks, " - "please restart Augur if you want this worker to attempt connection again.") - diff --git a/workers/insight_worker/runtime.py b/workers/insight_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/insight_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.insight_worker.insight_worker import InsightWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = InsightWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/insight_worker/setup.py b/workers/insight_worker/setup.py --- a/workers/insight_worker/setup.py +++ b/workers/insight_worker/setup.py @@ -31,7 +31,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'insight_worker_start=insight_worker.runtime:main', + 'insight_worker_start=workers.insight_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/linux_badge_worker/__init__.py b/workers/linux_badge_worker/__init__.py new file mode 100644 diff --git a/workers/linux_badge_worker/linux_badge_worker.py b/workers/linux_badge_worker/linux_badge_worker.py new file mode 100644 --- /dev/null +++ b/workers/linux_badge_worker/linux_badge_worker.py @@ -0,0 +1,63 @@ +import os +from datetime import datetime +import logging +import requests +import json +from urllib.parse import quote +from multiprocessing import Process, Queue + +import pandas as pd +import sqlalchemy as s +from sqlalchemy.ext.automap import automap_base +from sqlalchemy import MetaData +from workers.worker_base import Worker + +class LinuxBadgeWorker(Worker): + """ Worker that collects repo badging data from CII + config: database credentials, broker information, and ID + """ + def __init__(self, config={}): + + worker_type = "linux_badge_worker" + + given = [['git_url']] + models = ['badges'] + + data_tables = ['repo_badging'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({"endpoint": "https://bestpractices.coreinfrastructure.org/projects.json?pq="}) + self.tool_source = 'Linux Badge Worker' + self.tool_version = '0.1.0' + self.data_source = 'CII Badging API' + + + def badges_model(self, entry_info, repo_id): + """ Data collection and storage method + Query the CII API and store the result in the DB for the badges model + """ + git_url = entry_info['given']['git_url'] + self.logger.info("Collecting data for {}".format(git_url)) + extension = quote(git_url[0:-4]) + + url = self.config['endpoint'] + extension + self.logger.info("Hitting CII endpoint: " + url + " ...") + data = requests.get(url=url).json() + + if data != []: + self.logger.info("Inserting badging data for " + git_url) + self.db.execute(self.repo_badging_table.insert()\ + .values(repo_id=repo_id, + data=data, + tool_source=self.tool_source, + tool_version=self.tool_version, + data_source=self.data_source)) + + self.results_counter += 1 + else: + self.logger.info("No CII data found for {}\n".format(git_url)) + + self.register_task_completion(entry_info, repo_id, "badges") diff --git a/workers/linux_badge_worker/linux_badge_worker/__init__.py b/workers/linux_badge_worker/linux_badge_worker/__init__.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""linux_badge_worker - Augur worker that collects CII badging data""" - -__tool_source__ = 'Linux Badge Worker' -__tool_version__ = '0.1.0' -__data_source__ = 'CII Badging API' - -__author__ = 'Augur Team <[email protected]>' -__all__ = [] - diff --git a/workers/linux_badge_worker/linux_badge_worker/runtime.py b/workers/linux_badge_worker/linux_badge_worker/runtime.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/runtime.py +++ /dev/null @@ -1,107 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from linux_badge_worker.worker import BadgeWorker -from workers.standard_methods import read_config - -def create_server(app): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.linux_badge_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.linux_badge_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51235, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'linux_badge_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.linux_badge_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json?pq=", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - app.linux_badge_worker = BadgeWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - - if app.linux_badge_worker._child is not None: - app.linux_badge_worker._child.terminate() - - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/linux_badge_worker/linux_badge_worker/worker.py b/workers/linux_badge_worker/linux_badge_worker/worker.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/worker.py +++ /dev/null @@ -1,240 +0,0 @@ -import os -from datetime import datetime -import logging -import requests -import json -from urllib.parse import quote -from multiprocessing import Process, Queue - -from linux_badge_worker import __data_source__, __tool_source__, __tool_version__ -import pandas as pd -import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData -from workers.standard_methods import register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class BadgeWorker: - """ Worker that collects repo badging data from CII - config: database credentials, broker information, and ID - """ - def __init__(self, config, task=None): - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.config = config - - self.db = None - self.repo_badging_table = None - - self._task = task - self._queue = Queue() - self._child = None - - self.history_id = None - self.finishing_task = False - self.working_on = None - self.results_counter = 0 - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["badges"] - } - ], - "config": [self.config] - } - - self._db_str = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], - self.config['password'], - self.config['host'], - self.config['port'], - self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self._db_str, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self._db_str, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - logging.info("Database connection established...") - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['repo_badging']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - self.repo_badging_table = Base.classes.repo_badging.__table__ - logging.info("ORM setup complete...") - - # Organize different api keys/oauths available - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauth_sql = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(0)) - - for oauth in [{'oauth_id': 0, 'access_token': 0}] + json.loads(pd.read_sql(oauth_sql, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) \ - - datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - - # Send broker hello message - connect_to_broker(self) - logging.info("Connected to the broker...\n") - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "key": "", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['github_api_key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - - self._task = value - self.run() - - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def badges_model(self, entry_info, repo_id): - """ Data collection and storage method - Query the CII API and store the result in the DB for the badges model - """ - git_url = entry_info['given']['git_url'] - logging.info("Collecting data for {}".format(git_url)) - extension = quote(git_url[0:-4]) - - url = self.config['endpoint'] + extension - logging.info("Hitting CII endpoint: " + url + " ...") - data = requests.get(url=url).json() - - if data != []: - logging.info("Inserting badging data for " + git_url) - self.db.execute(self.repo_badging_table.insert()\ - .values(repo_id=repo_id, - data=data, - tool_source=__tool_source__, - tool_version=__tool_version__, - data_source=__data_source__)) - - self.results_counter += 1 - else: - logging.info("No CII data found for {}\n".format(git_url)) - - register_task_completion(self, entry_info, repo_id, "badges") - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'badges': - self.badges_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() diff --git a/workers/linux_badge_worker/runtime.py b/workers/linux_badge_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/linux_badge_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.linux_badge_worker.linux_badge_worker import LinuxBadgeWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = LinuxBadgeWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/linux_badge_worker/setup.py b/workers/linux_badge_worker/setup.py --- a/workers/linux_badge_worker/setup.py +++ b/workers/linux_badge_worker/setup.py @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'linux_badge_worker_start=linux_badge_worker.runtime:main', + 'linux_badge_worker_start=workers.linux_badge_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/metric_status_worker/metric_status_worker/__init__.py b/workers/metric_status_worker/metric_status_worker/__init__.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.1.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/metric_status_worker/metric_status_worker/runtime.py b/workers/metric_status_worker/metric_status_worker/runtime.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/runtime.py +++ /dev/null @@ -1,108 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, logging, requests, json -from metric_status_worker.worker import MetricStatusWorker -import os -import json -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.metric_status_worker.task = request.json - - #set task - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "success" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.metric_status_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51263, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'metric_status_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.metric_status_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - app.metric_status_worker = MetricStatusWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=port) - if app.metric_status_worker._child is not None: - app.metric_status_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/metric_status_worker/metric_status_worker/worker.py b/workers/metric_status_worker/metric_status_worker/worker.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/worker.py +++ /dev/null @@ -1,719 +0,0 @@ -import base64 -import logging -import os -import re -import sys -import json -import time -from abc import ABC -from datetime import datetime -from multiprocessing import Process, Queue -from urllib.parse import urlparse - -import pandas as pd -import requests -import sqlalchemy as s -from github import Github -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - - -class MetricStatusWorker: - def __init__(self, config, task=None): - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.db = None - self.table = None - self.API_KEY = self.config['key'] - self.tool_source = 'Metric Status Worker' - self.tool_version = '0.0.1' - self.data_source = 'GitHub API' - self.results_counter = 0 - self.working_on = None - - - # url = 'https://api.github.com' - # response = requests.get(url, headers=self.headers) - # self.rate_limit = int(response.headers['X-RateLimit-Remaining']) - - specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["chaoss_metric_status"] - } - ], - "config": [self.config] - } - - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - logging.info("Making database connections...") - - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['chaoss_metric_status']) - # helper_metadata.reflect(self.helper_db) - - Base = automap_base(metadata=metadata) - - Base.prepare() - - self.chaoss_metric_status_table = Base.classes['chaoss_metric_status'].__table__ - - try: - requests.post('http://{}:{}/api/unstable/workers'.format( - self.config['broker_host'],self.config['broker_port']), json=specs) - except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker') - sys.exit('Cannot connect to the broker! Quitting...') - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced """ - return self._task - - @task.setter - def task(self, value): - try: - if value['job_type'] == 'UPDATE': - self._queue.put(CollectorTask('TASK', {})) - elif value['job_type'] == 'MAINTAIN': - self._maintain_queue.put(CollectorTask('TASK', {})) - - if 'focused_task' in value: - if value['focused_task'] == 1: - self.finishing_task = True - except Exception as e: - logging.error("Error: {},".format(str(e))) - - self._task = CollectorTask(message_type='TASK', entry_info={}) - self.run() - - def cancel(self): - """ Delete/cancel current task """ - self._task = None - - def run(self): - logging.info("Running...") - if self._child is None: - self._child = Process(target=self.collect, args=()) - self._child.start() - requests.post("http://{}:{}/api/unstable/add_pids".format( - self.config['broker_host'],self.config['broker_port']), json={'pids': [self._child.pid, os.getpid()]}) - - def collect(self): - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = 'UPDATE' - elif not self._maintain_queue.empty(): - message = self._maintain_queue.get() - logging.info("Popped off message: {}".format(str(message.entry_info))) - self.working_on = "MAINTAIN" - else: - break - - - if message.type == 'EXIT': - break - if message.type != 'TASK': - raise ValueError( - f'{message.type} is not a recognized task type') - - if message.type == 'TASK': - self.update_metrics(message.entry_info) - - def update_metrics(self, entry_info): - """ Data colletction function - Query the github api for metric status - """ - status = MetricsStatus(self.API_KEY) - status.create_metrics_status() - metrics = status.metrics_status - - # convert to dict - dict_metrics = [] - for metric in metrics: - metric_info = { - 'cm_group': metric['group'], - 'cm_source': metric['data_source'], - 'cm_type': metric['metric_type'], - 'cm_backend_status': metric['backend_status'], - 'cm_frontend_status': metric['frontend_status'], - 'cm_api_endpoint_repo': metric['endpoint_repo'], - 'cm_api_endpoint_rg': metric['endpoint_group'], - 'cm_defined': metric['is_defined'], - 'cm_name': metric['display_name'], - 'cm_working_group': metric['group'], - 'cm_info': metric['tag'], - 'cm_working_group_focus_area': metric['focus_area'], - 'tool_source': self.tool_source, - 'tool_version': self.tool_version, - 'data_source': self.data_source, - } - dict_metrics.append(metric_info) - - need_insertion = self.filter_duplicates({'cm_api_endpoint_repo': "cm_api_endpoint_repo", 'cm_backend_status':'cm_api_endpoint_rg'}, ['chaoss_metric_status'], - dict_metrics) - logging.info("Count of contributors needing insertion: " + str(len(need_insertion)) + "\n") - for metric in need_insertion: - result = self.db.execute(self.chaoss_metric_status_table.insert().values(metric)) - logging.info("Primary key inserted into the metrics table: " + str(result.inserted_primary_key)) - self.results_counter += 1 - - self.register_task_completion() - - - # def filter_duplicates(self, og_data): - # need_insertion = [] - # colSQL = s.sql.text(""" - # SELECT * FROM chaoss_metric_status - # """) - # values = pd.read_sql(colSQL, self.db) - # for obj in og_data: - # location = values.loc[ (values['cm_name']==obj['cm_name'] ) & ( values['cm_working_group']==obj[ - # 'cm_working_group']) & ()] - # if not location.empty: - # logging.info("value of tuple exists: " + str(obj['cm_name'])) - # else: - # need_insertion.append(obj) - # - # logging.info("While filtering duplicates, we reduced the data size from " + str(len(og_data)) + - # " to " + str(len(need_insertion)) + "\n") - # - # return need_insertion - - def filter_duplicates(self, cols, tables, og_data): - need_insertion = [] - - table_str = tables[0] - del tables[0] - for table in tables: - table_str += ", " + table - for col in cols.keys(): - colSQL = s.sql.text(""" - SELECT {} FROM {} - """.format(col, table_str)) - values = pd.read_sql(colSQL, self.db, params={}) - - for obj in og_data: - if values.isin([obj[cols[col]]]).any().any(): - logging.info("value of tuple exists: " + str(obj[cols[col]]) + "\n") - elif obj not in need_insertion: - need_insertion.append(obj) - logging.info("While filtering duplicates, we reduced the data size from " + str(len(og_data)) + - " to " + str(len(need_insertion)) + "\n") - return need_insertion - - def update_exist_metrics(self, metrics): - need_update = [] - need_insert = [] - - for metric in metrics: - result = self.db.execute(self.chaoss_metric_status_table.update().where((self.chaoss_metric_status_table.c.cm_name == metric['cm_name'])&(self.chaoss_metric_status_table.c.cm_group == metric['cm_group']) & ((self.chaoss_metric_status_table.c.cm_api_endpoint_repo != metric['cm_api_endpoint_repo']) | (self.chaoss_metric_status_table.c.cm_api_endpoint_rg != metric['cm_api_endpoint_rg'])|(self.chaoss_metric_status_table.c.cm_source != metric['cm_source'])) - ).values(metric)) - - if result.rowcount: - logging.info("Update Metric {}-{}".format(metric['cm_group'], metric['cm_name'])) - - def register_task_completion(self): - task_completed = { - 'worker_id': self.config['id'], - 'job_type': self.working_on, - } - - logging.info("Telling broker we completed task: " + str(task_completed) + "\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - self.results_counter = 0 - - - - -class FrontendStatusExtractor(object): - - def __init__(self): - pass - self.api_text = open(os.path.abspath(os.path.dirname(os.path.dirname(os.getcwd()))) + - "/frontend/src/AugurAPI.ts", 'r').read() - self.attributes = re.findall( - r'(?:(GitEndpoint|Endpoint|Timeseries|addRepoMetric|addRepoGroupMetric)\()\'(.*)\', \'(.*)\'', - self.api_text) - self.timeseries = [ - attribute for attribute in self.attributes if attribute[0] == "Timeseries"] - self.endpoints = [ - attribute for attribute in self.attributes if attribute[0] == "Endpoint"] - self.git_endpoints = [ - attribute for attribute in self.attributes if attribute[0] == "GitEndpoint"] - self.repo_metrics = [ - attribute for attribute in self.attributes if attribute[0] == 'addRepoMetric'] - self.group_metric = [ - attribute for attribute in self.attributes if attribute[0] == 'addRepoMetric'] - - def determine_frontend_status(self, metric): - metric.frontend_status = 'unimplemented' - attribute = None - - if metric.metric_type == "timeseries": - attribute = next((attribute for attribute in self.timeseries if - "/api/unstable/<owner>/<repo>/timeseries/{}".format(attribute[2]) == metric.endpoint_repo), - None) - - elif metric.metric_type == "metric": - attribute = next((attribute for attribute in self.endpoints if - "/api/unstable/<owner>/<repo>/{}".format(attribute[2]) == metric.endpoint_repo), None) - if not attribute: - attribute = next((attribute for attribute in self.repo_metrics if - "/api/unstable/repo-groups/<repo_group_id>/repos/<repo_id>/{}".format( - attribute[2]) == metric.endpoint_repo), None) - if not attribute and metric.endpoint_group: - attribute = next((attribute for attribute in self.repo_metrics if - "/api/unstable/repo-groups/<repo_group_id>/{}".format(attribute[2]) == metric.endpoint_group), None) - - elif metric.metric_type == "git": - attribute = next((attribute for attribute in self.git_endpoints if - "/api/unstable/git/{}".format(attribute[2]) == metric.endpoint_repo), None) - - if attribute is not None: - metric.frontend_status = 'implemented' - metric.chart_mapping = attribute[1] - else: - metric.frontend_status = 'unimplemented' - - -class Metric(ABC): - - def __init__(self): - self.ID = None - self.tag = None - self.display_name = None - self.group = None - self.backend_status = 'unimplemented' - self.frontend_status = 'unimplemented' - self.chart_mapping = None - self.data_source = None - self.metric_type = None - self.documentation_url = None - self.is_defined = False - self.focus_area = None - self.endpoint_group = None - self.endpoint_repo = None - - -class GroupedMetric(Metric): - - def __init__(self, display_name, group, tag, focus_area): - Metric.__init__(self) - self.display_name = display_name - self.tag = tag - self.ID = re.sub(r'-$|\*', '', 'none' + '-' + self.tag) - self.group = group - self.focus_area = focus_area - - -class ImplementedMetric(Metric): - - def __init__(self, metadata, frontend_status_extractor): - Metric.__init__(self) - - self.ID = metadata['ID'] - self.tag = metadata['tag'] - self.display_name = metadata['metric_name'] - self.backend_status = 'implemented' - self.data_source = metadata['source'] - self.group = "experimental" - self.endpoint_group = None - self.endpoint_repo = None - - - if 'metric_type' in metadata: - self.metric_type = metadata['metric_type'] - else: - self.metric_type = 'metric' - - if 'endpoint' in metadata: - if 'group_endpoint' in metadata: - self.endpoint_group = metadata['group_endpoint'] - if 'repo_endpoint' in metadata: - self.endpoint_repo = metadata['repo_endpoint'] - else: - self.endpoint_repo = metadata['endpoint'] - frontend_status_extractor.determine_frontend_status(self) - - -class MetricsStatus(object): - wg_evolution = { - "repo": "chaoss/wg-evolution", - "focus_area": "focus_areas", - "name": 'evolution' - } - - wg_diversity_inclusion = { - "repo": "chaoss/wg-diversity-inclusion", - "focus_area": "focus-areas", - "name": "diversity-inclusion" - } - - wg_value = { - "repo": "chaoss/wg-value", - "focus_area": 'focus-areas', - "name": "value" - } - - wg_common = { - "repo": "chaoss/wg-common", - "focus_area": "focus-areas", - "name": "common" - } - - wg_risk = { - "repo": "chaoss/wg-risk", - "focus_area": "focus-areas", - "name": "risk" - } - - def __init__(self, githubapi): - self.__githubapi = githubapi - self.github = Github(self.__githubapi) - - # TODO: don't hardcode this - self.groups = { - "evolution": "Evolution", - "diversity-inclusion": "Diversity and Inclusion metrics", - "value": "Value", - "risk": "Risk", - "common": "Common", - "experimental": "Experimental", - "all": "All" - } - - self.implemented_metrics = [] - - self.evo_metrics = [] - self.di_metrics = [] - self.risk_metrics = [] - self.value_metrics = [] - self.common_metrics = [] - self.experimental_metrics = [] - - self.metrics_by_group = [] - - self.metrics_status = [] - - self.data_sources = [] - self.metric_types = [] - self.tags = {} - self.metadata = [] - - def create_metrics_status(self): - - self.build_implemented_metrics() - - self.evo_metrics = self.create_grouped_metrics( - self.wg_evolution, "evolution") - self.risk_metrics = self.create_grouped_metrics(self.wg_risk, "risk") - self.common_metrics = self.create_grouped_metrics( - self.wg_common, 'common') - self.di_metrics = self.create_grouped_metrics( - self.wg_diversity_inclusion, 'diversity-inclusion') - self.value_metrics = self.create_grouped_metrics( - self.wg_value, 'value') - - self.metrics_by_group = [self.evo_metrics, self.risk_metrics, - self.common_metrics, self.di_metrics, self.value_metrics] - - self.create_experimental_metrics() - self.metrics_by_group.append(self.experimental_metrics) - # - self.copy_implemented_metrics() - - self.find_defined_metrics() - - self.build_metrics_status() - - # self.build_metadata() - - def build_implemented_metrics(self): - frontend_status_extractor = FrontendStatusExtractor() - - r = requests.get( - url='http://{}:{}/api/unstable/batch/metadata'.format( - self.config['broker_host'],self.config['broker_port'])) - data = json.loads(r.text) - - for metric in data: - if "ID" in metric.keys(): - self.implemented_metrics.append( - ImplementedMetric(metric, frontend_status_extractor)) - - def create_grouped_metrics(self, group, group_name): - metrics = self.find_metrics_from_focus_area( - group['repo'], group['focus_area']) - - remote_metrics = [] - for metric in metrics: - remote_metrics.append(GroupedMetric(metric.display_name, group['name'], metric.tag, - metric.focus_area)) - - return remote_metrics - - def find_metrics_from_focus_area(self, repo_name, focus_area_path): - focus_areas = self.github.get_repo( - repo_name).get_dir_contents(focus_area_path) - metrics = [] - for area in focus_areas: - # get focus area name from filename - # focus_area_name = re.sub('.md','',re.sub('-', ' ',area.name)) - focus_area_name = None - focus_area_name_splited = [a.capitalize() for a in re.sub( - '.md', '', re.sub('[_]|[-]', ' ', area.name)).split()] - focus_area_name = ' '.join(focus_area_name_splited) - - # extract structure :focus_area_name/readme.md - if area.type == 'dir': - tmp = self.github.get_repo( - repo_name).get_dir_contents(area.path) - readme = [a for a in tmp if 'readme' in a.name.lower()] - if len(readme) == 0: - continue - else: - area = readme[0] - elif 'readme' in area.name.lower() or 'changelog' in area.name.lower(): - continue - - # decode content; github api return encoded content - decoded_content = base64.b64decode(area.content).decode('utf-8') - metric_name_tag = self.parse_table( - decoded_content) or self.parse_list(decoded_content) - - for name, tag in metric_name_tag.items(): - add_metric = Metric() - add_metric.display_name = name - add_metric.tag = tag - add_metric.focus_area = focus_area_name - - metrics.append(add_metric) - - if metric_name_tag is None: - continue - - return metrics - - def parse_table(self, md_content): - # group 0 is header, group 2 is |---|--|, and group 3 is table content - tables = re.findall( - r'^(\|?[^\n]+\|[^\n]+\|?\r?\n)((?:\|?\s*:?[-]+\s*:?)+\|?)(\n(?:\|?[^\n]+\|[^\n]+\|?\r?\n?)*)?$', md_content, - re.MULTILINE) - - if not tables: - return None - - box = [] - metrics_name_tag = {} - for table in tables: - # get metric name by 'metric_name' index in column - metric_index, length_in_row = self.get_metric_index_in_table_row( - table[0]) - table_content = [x.strip() - for x in table[2].replace('\n', '|').split('|')] - # remove two empty str - table_content.pop(0) - table_content.pop() - - raw_metrics = [table_content[a] for a in range( - metric_index, len(table_content), length_in_row)] - - for raw_metric in raw_metrics: - metric_name, metric_link = self.is_has_link( - raw_metric, md_content) - metric_name = re.sub('[\[]|[\]]', '', metric_name) - if not metric_link: - metric_link = re.sub(' ', '-', metric_name).lower() - metrics_name_tag[metric_name] = self.link_to_tag( - metric_name, str(metric_link)) - - return metrics_name_tag - - def get_metric_index_in_table_row(self, row): - header_names = [x.strip().lower() for x in row.split('|')] - # print(header_names) - index = None - if 'metric' in header_names: - index = header_names.index('metric') - elif 'name' in header_names: - index = header_names.index('name') - - return index, len(header_names) - - def parse_list(self, md_content): - matched_lists = re.findall(r'[-]\s+(.+)\n', md_content) - metric_names = {} - # print(matched_lists) - for matched in matched_lists: - # print(matched) - metirc_name = re.sub(r'.+:\s', '', matched) - metirc_name, metric_link = self.is_has_link( - metirc_name, md_content) - metirc_name = re.sub('[\[]|[\]]', '', metirc_name) - metric_names[metirc_name] = self.link_to_tag( - metirc_name, metric_link) - return metric_names - - def is_has_link(self, s, md_content): - # remove leading whitespace if exist - s = s.strip() - pattern_inline = re.compile(r'\[([^\[\]]+)\]\(([^)]+)') - match = pattern_inline.match(s) - - if match: - return match.group(1), match.group(2) - - pattern_ref = re.compile(r'\[([^\[\]]+)\]\[([^]]+)') - match2 = pattern_ref.match(s) - - if match2: - link = match2.group(2) - p = re.compile(r'\n\[' + link + r'\]:\s+(.+)\n') - res = p.search(md_content, re.DOTALL) - if res: - return match2.group(1), res.group(1) - else: - return s, None - - def link_to_tag(self, name, s): - - # generate tag if undefined metric - if not s: - return re.sub(' ', '-', name.lower()) - - pattern = re.compile(r'\/?([a-zA-Z_-]+)(\.md)?$') - m = pattern.search(s) - if m: - return re.sub('_', '-', re.sub('.md', '', m.group(1).lower())) - else: - return re.sub(' ', '-', re.sub('\(s\)', 's', name)) - - def create_experimental_metrics(self): - tags = [] - for group in self.metrics_by_group: - for metric in group: - tags.append(metric.tag) - - self.experimental_metrics = [ - metric for metric in self.implemented_metrics if metric.tag not in tags] - - def copy_implemented_metrics(self): - # takes implemented metrics and copies their data to the appropriate metric object - # I am so very sorry - # TODO: burn this into the ground - for group in enumerate(self.metrics_by_group): - if group[1] is not self.experimental_metrics: - for grouped_metric in group[1]: - defined_implemented_metrics = [ - metric for metric in self.implemented_metrics if grouped_metric.tag == metric.tag] - if defined_implemented_metrics != []: - for metric in defined_implemented_metrics: - metric.group = group[1][0].group - metric.focus_area = grouped_metric.focus_area - group[1].append(metric) - self.implemented_metrics.remove(metric) - grouped_metric.ID = 'n/a' - self.metrics_by_group[group[0]] = [ - metric for metric in group[1] if metric.ID != 'n/a'] - - def find_defined_metrics(self): - # return map {tag: html_url} - repo_names = [self.wg_common['repo'], self.wg_evolution['repo'], - self.wg_diversity_inclusion['repo'], self.wg_risk['repo'], self.wg_value['repo']] - - md_files = {} - - for repo_name in repo_names: - repo = self.github.get_repo(repo_name) - contents = repo.get_contents("") - - while len(contents) > 1: - file_content = contents.pop(0) - if file_content.type == "dir": - contents.extend(repo.get_contents(file_content.path)) - elif '.md' in file_content.name: - name = re.sub( - '_', '-', re.sub('.md', '', file_content.name)) - md_files[name.lower()] = file_content.html_url - - for group in self.metrics_by_group: - for metric in group: - if metric.tag in md_files.keys(): - metric.is_defined = True - metric.documentation_url = md_files[metric.tag] - - def build_metrics_status(self): - for group in self.metrics_by_group: - for metric in group: - self.metrics_status.append(metric.__dict__) - - def build_metadata(self): - self.get_metric_sources() - self.get_metric_types() - self.get_metric_tags() - - self.metadata = { - "remotes": { - "diversity_inclusion_urls": self.diversity_inclusion_urls, - "growth_maturity_decline_urls": self.growth_maturity_decline_urls, - "risk_urls": self.risk_urls, - "value_urls": self.value_urls, - "activity_repo_urls": self.activity_urls - }, - "groups": self.groups, - "data_sources": self.data_sources, - "metric_types": self.metric_types, - "tags": self.tags - } - - def get_metric_sources(self): - for data_source in [metric['data_source'] for metric in self.metrics_status]: - data_source = data_source.lower() - if data_source not in self.data_sources and data_source != "none": - self.data_sources.append(data_source) - self.data_sources.append("all") - - def get_metric_types(self): - for metric_type in [metric['metric_type'] for metric in self.metrics_status]: - metric_type = metric_type.lower() - if metric_type not in self.metric_types and metric_type != "none": - self.metric_types.append(metric_type) - self.metric_types.append("all") - - def get_metric_tags(self): - for tag in [(metric['tag'], metric['group']) for metric in self.metrics_status]: - # tag[0] = tag[0].lower() - if tag[0] not in [tag[0] for tag in self.tags] and tag[0] != "none": - self.tags[tag[0]] = tag[1] \ No newline at end of file diff --git a/workers/pull_request_worker/pull_request_worker/__init__.py b/workers/pull_request_worker/__init__.py similarity index 100% rename from workers/pull_request_worker/pull_request_worker/__init__.py rename to workers/pull_request_worker/__init__.py diff --git a/workers/pull_request_worker/pull_request_worker/worker.py b/workers/pull_request_worker/pull_request_worker.py similarity index 62% rename from workers/pull_request_worker/pull_request_worker/worker.py rename to workers/pull_request_worker/pull_request_worker.py --- a/workers/pull_request_worker/pull_request_worker/worker.py +++ b/workers/pull_request_worker/pull_request_worker.py @@ -1,225 +1,42 @@ import ast, json, logging, os, sys, time, traceback, requests from datetime import datetime from multiprocessing import Process, Queue -from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base -from workers.standard_methods import * from sqlalchemy.sql.expression import bindparam +from workers.worker_base import Worker -class GHPullRequestWorker: +class GitHubPullRequestWorker(Worker): """ Worker that collects Pull Request related data from the Github API and stores it in our database. :param task: most recent task the broker added to the worker's queue :param config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.working_on = None - self.config = config - LOG_FORMAT = '%(levelname)s:[%(name)s]: %(message)s' - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO, format=LOG_FORMAT) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self.db = None - self.table = None - self.API_KEY = self.config['key'] - self.tool_source = 'GitHub Pull Request Worker' - self.tool_version = '0.0.1' # See __init__.py - self.data_source = 'GitHub API' - self.results_counter = 0 - self.headers = {'Authorization': f'token {self.API_KEY}'} - self.history_id = None - self.finishing_task = True - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [['github_url']], - "models":['pull_requests', 'pull_request_commits', 'pull_request_files'] - } - ], - "config": [self.config] - } - - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], - self.config['port'], self.config['database'] - ) - - #Database connections - logging.info("Making database connections...\n") - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) + def __init__(self, config={}): - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) + worker_type = "pull_request_worker" - metadata = MetaData() - helper_metadata = MetaData() + # Define what this worker can be given and know how to interpret + given = [['github_url']] + models = ['pull_requests', 'pull_request_commits', 'pull_request_files'] - metadata.reflect(self.db, only=['contributors', 'pull_requests', + # Define the tables needed to insert, update, or delete on + data_tables = ['contributors', 'pull_requests', 'pull_request_assignees', 'pull_request_events', 'pull_request_labels', 'pull_request_message_ref', 'pull_request_meta', 'pull_request_repo', 'pull_request_reviewers', 'pull_request_teams', 'message', 'pull_request_commits', - 'pull_request_files']) - - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.contributors_table = Base.classes.contributors.__table__ - self.pull_requests_table = Base.classes.pull_requests.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.pull_request_labels_table = Base.classes.pull_request_labels.__table__ - self.pull_request_message_ref_table = Base.classes.pull_request_message_ref.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_teams_table = Base.classes.pull_request_teams.__table__ - self.message_table = Base.classes.message.__table__ - self.pull_request_commits_table = Base.classes.pull_request_commits.__table__ - self.pull_request_files_table = Base.classes.pull_request_files.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - logging.info("Querying starting ids info...\n") + 'pull_request_files'] + operations_tables = ['worker_history', 'worker_job'] - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - self.pr_id_inc = get_max_id(self, 'pull_requests', 'pull_request_id') - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - self.msg_id_inc = get_max_id(self, 'message', 'msg_id') - self.pr_msg_ref_id_inc = get_max_id(self, 'pull_request_message_ref', 'pr_msg_ref_id') - self.label_id_inc = get_max_id(self, 'pull_request_labels', 'pr_label_id') - self.event_id_inc = get_max_id(self, 'pull_request_events', 'pr_event_id') - self.reviewer_id_inc = get_max_id(self, 'pull_request_reviewers', 'pr_reviewer_map_id') - self.assignee_id_inc = get_max_id(self, 'pull_request_assignees', 'pr_assignee_map_id') - self.pr_meta_id_inc = get_max_id(self, 'pull_request_meta', 'pr_repo_meta_id') - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - # self.pull_requests_graphql({ - # 'job_type': 'MAINTAIN', - # 'models': ['pull_request_files'], - # 'display_name': 'pull_request_files model for url: https://github.com/zephyrproject-rtos/actions_sandbox.git', - # 'given': { - # 'github_url': 'https://github.com/zephyrproject-rtos/actions_sandbox.git' - # } - # }, 25201) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - github_url = value['given']['github_url'] - - repo_url_SQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(github_url)) - rs = pd.read_sql(repo_url_SQL, self.db, params={}) - - try: - repo_id = int(rs.iloc[0]['repo_id']) - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - if 'focused_task' in value: - if value['focused_task'] == 1: - self.finishing_task = True - - except Exception as e: - logging.error(f"error: {e}, or that repo is not in our database: {value}\n") - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query all repos with repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['github_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'pull_requests': - self.pull_requests_model(message, repo_id) - elif message['models'][0] == 'pull_request_commits': - self.pull_request_commits_model(message, repo_id) - elif message['models'][0] == 'pull_request_files': - self.pull_requests_graphql(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + # Define data collection info + self.tool_source = 'GitHub Pull Request Worker' + self.tool_version = '0.0.1' # See __init__.py + self.data_source = 'GitHub API' + def graphql_paginate(self, query, data_subjects, before_parameters=None): """ Paginate a GitHub GraphQL query backwards @@ -227,7 +44,7 @@ def graphql_paginate(self, query, data_subjects, before_parameters=None): :rtype: A Pandas DataFrame, contains all data contained in the pages """ - logging.info(f'Start paginate with params: \n{data_subjects} ' + self.logger.info(f'Start paginate with params: \n{data_subjects} ' f'\n{before_parameters}') def all_items(dictionary): @@ -262,7 +79,7 @@ def find_root_of_subject(data, key_subject): for data_subject, nest in data_subjects.items(): - logging.info(f'Beginning paginate process for field {data_subject} ' + self.logger.info(f'Beginning paginate process for field {data_subject} ' f'for query: {query}') page_count = 0 @@ -274,13 +91,13 @@ def find_root_of_subject(data, key_subject): success = False for attempt in range(num_attempts): - logging.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint ' + self.logger.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint ' f'page number {page_count}\n') response = requests.post(base_url, json={'query': query.format( **before_parameters)}, headers=self.headers) - update_gh_rate_limit(self, response) + self.update_gh_rate_limit(response) try: data = response.json() @@ -288,9 +105,9 @@ def find_root_of_subject(data, key_subject): data = json.loads(json.dumps(response.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) + self.logger.info("Error!: {}".format(data['errors'])) if data['errors'][0]['type'] == 'RATE_LIMITED': - update_gh_rate_limit(self, response) + self.update_gh_rate_limit(response) num_attempts -= 1 continue @@ -302,18 +119,18 @@ def find_root_of_subject(data, key_subject): data = root['edges'] break else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url)) break if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': num_attempts -= 1 - update_gh_rate_limit(self, response, temporarily_disable=True) + self.update_gh_rate_limit(response, temporarily_disable=True) if data['message'] == 'Bad credentials': - update_gh_rate_limit(self, response, bad_credentials=True) + self.update_gh_rate_limit(response, bad_credentials=True) if not success: - logging.info('GraphQL query failed: {}'.format(query)) + self.logger.info('GraphQL query failed: {}'.format(query)) continue before_parameters.update({ @@ -323,7 +140,7 @@ def find_root_of_subject(data, key_subject): tuples += data - logging.info(f'Paged through {page_count} pages and ' + self.logger.info(f'Paged through {page_count} pages and ' f'collected {len(tuples)} data points\n') if not nest: @@ -333,9 +150,9 @@ def find_root_of_subject(data, key_subject): before_parameters=before_parameters) - def pull_requests_graphql(self, task_info, repo_id): + def pull_request_files_model(self, task_info, repo_id): - owner, repo = get_owner_repo(task_info['given']['github_url']) + owner, repo = self.get_owner_repo(task_info['given']['github_url']) # query existing PRs and the respective url we will append the commits url to pr_number_sql = s.sql.text(""" @@ -349,7 +166,7 @@ def pull_requests_graphql(self, task_info, repo_id): for index, pull_request in enumerate(pr_numbers.itertuples()): - logging.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}') + self.logger.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}') query = """ {{ @@ -394,26 +211,24 @@ def pull_requests_graphql(self, task_info, repo_id): WHERE pull_request_files.pull_request_id = pull_requests.pull_request_id AND repo_id = :repo_id """) - logging.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n') + self.logger.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n') table_values = pd.read_sql(table_values_sql, self.db, params={'repo_id': repo_id}) # Compare queried values against table values for dupes/updates if len(pr_file_rows) > 0: table_columns = pr_file_rows[0].keys() else: - logging.info(f'No rows need insertion for repo {repo_id}\n') - register_task_completion(self, task_info, repo_id, 'pull_request_files') + self.logger.info(f'No rows need insertion for repo {repo_id}\n') + self.register_task_completion(task_info, repo_id, 'pull_request_files') + return # Compare queried values against table values for dupes/updates pr_file_rows_df = pd.DataFrame(pr_file_rows) pr_file_rows_df = pr_file_rows_df.dropna(subset=['pull_request_id']) - pr_file_rows_df['need_update'] = 0 dupe_columns = ['pull_request_id', 'pr_file_path'] update_columns = ['pr_file_additions', 'pr_file_deletions'] - logging.info(f'{pr_file_rows_df}') - logging.info(f'{table_values}') need_insertion = pr_file_rows_df.merge(table_values, suffixes=('','_table'), how='outer', indicator=True, on=dupe_columns).loc[ lambda x : x['_merge']=='left_only'][table_columns] @@ -430,7 +245,7 @@ def pull_requests_graphql(self, task_info, repo_id): pr_file_insert_rows = need_insertion.to_dict('records') pr_file_update_rows = need_updates.to_dict('records') - logging.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and ' + self.logger.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and ' f'{len(need_updates)} updates.\n') if len(pr_file_update_rows) > 0: @@ -447,7 +262,7 @@ def pull_requests_graphql(self, task_info, repo_id): ) success = True except Exception as e: - logging.info('error: {}'.format(e)) + self.logger.info('error: {}'.format(e)) time.sleep(5) if len(pr_file_insert_rows) > 0: @@ -460,14 +275,22 @@ def pull_requests_graphql(self, task_info, repo_id): ) success = True except Exception as e: - logging.info('error: {}'.format(e)) + self.logger.info('error: {}'.format(e)) time.sleep(5) - register_task_completion(self, task_info, repo_id, 'pull_request_files') + self.register_task_completion(task_info, repo_id, 'pull_request_files') def pull_request_commits_model(self, task_info, repo_id): """ Queries the commits related to each pull request already inserted in the db """ + self.logger.info("Querying starting ids info...\n") + + # Increment so we are ready to insert the 'next one' of each of these most recent ids + self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 + self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id') + self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id') + + # query existing PRs and the respective url we will append the commits url to pr_url_sql = s.sql.text(""" SELECT DISTINCT pr_url, pull_requests.pull_request_id @@ -484,7 +307,7 @@ def pull_request_commits_model(self, task_info, repo_id): update_col_map = {} # Use helper paginate function to iterate the commits url and check for dupes - pr_commits = paginate(self, commits_url, duplicate_col_map, update_col_map, table, table_pkey, + pr_commits = self.paginate(commits_url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="where pull_request_id = {}".format(pull_request.pull_request_id)) for pr_commit in pr_commits: # post-pagination, iterate results @@ -500,9 +323,9 @@ def pull_request_commits_model(self, task_info, repo_id): 'data_source': 'GitHub API', } result = self.db.execute(self.pull_request_commits_table.insert().values(pr_commit_row)) - logging.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n") + self.logger.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n") - register_task_completion(self, task_info, repo_id, 'pull_request_commits') + self.register_task_completion(task_info, repo_id, 'pull_request_commits') def pull_requests_model(self, entry_info, repo_id): """Pull Request data collection function. Query GitHub API for PhubRs. @@ -510,11 +333,18 @@ def pull_requests_model(self, entry_info, repo_id): :param entry_info: A dictionary consisiting of 'git_url' and 'repo_id' :type entry_info: dict """ + + self.logger.info("Querying starting ids info...\n") + + # Increment so we are ready to insert the 'next one' of each of these most recent ids + self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 + self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id') + self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id') + github_url = entry_info['given']['github_url'] - logging.info('Beginning collection of Pull Requests...\n') - logging.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n') - record_model_process(self, repo_id, 'pull_requests') + self.logger.info('Beginning collection of Pull Requests...\n') + self.logger.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n') owner, repo = self.get_owner_repo(github_url) @@ -530,12 +360,12 @@ def pull_requests_model(self, entry_info, repo_id): duplicate_col_map = {'pr_src_id': 'id'} #list to hold pull requests needing insertion - prs = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, + prs = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey, where_clause='WHERE repo_id = {}'.format(repo_id), value_update_col_map={'pr_augur_contributor_id': float('nan')}) # Discover and remove duplicates before we start inserting - logging.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n") + self.logger.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n") for pr_dict in prs: @@ -553,7 +383,7 @@ def pull_requests_model(self, entry_info, repo_id): 'pr_src_state': pr_dict['state'], 'pr_src_locked': pr_dict['locked'], 'pr_src_title': pr_dict['title'], - 'pr_augur_contributor_id': find_id_from_login(self, pr_dict['user']['login']), + 'pr_augur_contributor_id': self.find_id_from_login(pr_dict['user']['login']), 'pr_body': pr_dict['body'], 'pr_created_at': pr_dict['created_at'], 'pr_updated_at': pr_dict['updated_at'], @@ -581,21 +411,21 @@ def pull_requests_model(self, entry_info, repo_id): } if pr_dict['flag'] == 'need_insertion': - logging.info(f'PR {pr_dict["id"]} needs to be inserted\n') + self.logger.info(f'PR {pr_dict["id"]} needs to be inserted\n') result = self.db.execute(self.pull_requests_table.insert().values(pr)) - logging.info(f"Added Pull Request: {result.inserted_primary_key}") + self.logger.info(f"Added Pull Request: {result.inserted_primary_key}") self.pr_id_inc = int(result.inserted_primary_key[0]) elif pr_dict['flag'] == 'need_update': result = self.db.execute(self.pull_requests_table.update().where( self.pull_requests_table.c.pr_src_id==pr_dict['id']).values(pr)) - logging.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format( + self.logger.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format( pr_dict['id'])) self.pr_id_inc = pr_dict['pkey'] else: - logging.info("PR does not need to be inserted. Fetching its id from DB") + self.logger.info("PR does not need to be inserted. Fetching its id from DB") pr_id_sql = s.sql.text(""" SELECT pull_request_id FROM pull_requests WHERE pr_src_id={} @@ -609,16 +439,16 @@ def pull_requests_model(self, entry_info, repo_id): self.query_reviewers(pr_dict['requested_reviewers'], self.pr_id_inc) self.query_pr_meta(pr_dict['head'], pr_dict['base'], self.pr_id_inc) - logging.info(f"Inserted PR data for {owner}/{repo}") + self.logger.info(f"Inserted PR data for {owner}/{repo}") self.results_counter += 1 - register_task_completion(self, entry_info, repo_id, 'pull_requests') + self.register_task_completion(entry_info, repo_id, 'pull_requests') def query_labels(self, labels, pr_id): - logging.info('Querying PR Labels\n') + self.logger.info('Querying PR Labels\n') if len(labels) == 0: - logging.info('No new labels to add\n') + self.logger.info('No new labels to add\n') return table = 'pull_request_labels' @@ -629,12 +459,12 @@ def query_labels(self, labels, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - pr_labels_table_values = get_table_values(self, cols_query, [table]) + pr_labels_table_values = self.get_table_values(cols_query, [table]) - new_labels = assign_tuple_action(self, labels, pr_labels_table_values, update_col_map, duplicate_col_map, + new_labels = self.assign_tuple_action(labels, pr_labels_table_values, update_col_map, duplicate_col_map, table_pkey) - logging.info(f'Found {len(new_labels)} labels\n') + self.logger.info(f'Found {len(new_labels)} labels\n') for label_dict in new_labels: @@ -653,14 +483,13 @@ def query_labels(self, labels, pr_id): if label_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_labels_table.insert().values(label)) - logging.info(f"Added PR Label: {result.inserted_primary_key}\n") - logging.info(f"Inserted PR Labels data for PR with id {pr_id}\n") + self.logger.info(f"Added PR Label: {result.inserted_primary_key}\n") + self.logger.info(f"Inserted PR Labels data for PR with id {pr_id}\n") self.results_counter += 1 - self.label_id_inc = int(result.inserted_primary_key[0]) def query_pr_events(self, owner, repo, gh_pr_no, pr_id): - logging.info('Querying PR Events\n') + self.logger.info('Querying PR Events\n') url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' + '/events?per_page=100&page={}') @@ -674,14 +503,14 @@ def query_pr_events(self, owner, repo, gh_pr_no, pr_id): duplicate_col_map = {'issue_event_src_id': 'id'} #list to hold contributors needing insertion or update - pr_events = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey) + pr_events = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n") + self.logger.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n") for pr_event_dict in pr_events: if pr_event_dict['actor']: - cntrb_id = find_id_from_login(self, pr_event_dict['actor']['login']) + cntrb_id = self.find_id_from_login(pr_event_dict['actor']['login']) else: cntrb_id = 1 @@ -700,18 +529,17 @@ def query_pr_events(self, owner, repo, gh_pr_no, pr_id): } result = self.db.execute(self.pull_request_events_table.insert().values(pr_event)) - logging.info(f"Added PR Event: {result.inserted_primary_key}\n") + self.logger.info(f"Added PR Event: {result.inserted_primary_key}\n") self.results_counter += 1 - self.event_id_inc = int(result.inserted_primary_key[0]) - logging.info(f"Inserted PR Events data for PR with id {pr_id}\n") + self.logger.info(f"Inserted PR Events data for PR with id {pr_id}\n") def query_reviewers(self, reviewers, pr_id): - logging.info('Querying Reviewers') + self.logger.info('Querying Reviewers') if reviewers is None or len(reviewers) == 0: - logging.info('No reviewers to add') + self.logger.info('No reviewers to add') return table = 'pull_request_reviewers' @@ -722,15 +550,15 @@ def query_reviewers(self, reviewers, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - reviewers_table_values = get_table_values(self, cols_query, [table]) + reviewers_table_values = self.get_table_values(cols_query, [table]) - new_reviewers = assign_tuple_action(self, reviewers, reviewers_table_values, update_col_map, duplicate_col_map, + new_reviewers = self.assign_tuple_action(reviewers, reviewers_table_values, update_col_map, duplicate_col_map, table_pkey) for reviewers_dict in new_reviewers: if 'login' in reviewers_dict: - cntrb_id = find_id_from_login(self, reviewers_dict['login']) + cntrb_id = self.find_id_from_login(reviewers_dict['login']) else: cntrb_id = 1 @@ -744,18 +572,17 @@ def query_reviewers(self, reviewers, pr_id): if reviewers_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_reviewers_table.insert().values(reviewer)) - logging.info(f"Added PR Reviewer {result.inserted_primary_key}") + self.logger.info(f"Added PR Reviewer {result.inserted_primary_key}") - self.reviewer_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 - logging.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}") + self.logger.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}") def query_assignee(self, assignees, pr_id): - logging.info('Querying Assignees') + self.logger.info('Querying Assignees') if assignees is None or len(assignees) == 0: - logging.info('No assignees to add') + self.logger.info('No assignees to add') return table = 'pull_request_assignees' @@ -766,15 +593,15 @@ def query_assignee(self, assignees, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - assignee_table_values = get_table_values(self, cols_query, [table]) + assignee_table_values = self.get_table_values(cols_query, [table]) - assignees = assign_tuple_action(self, assignees, assignee_table_values, update_col_map, duplicate_col_map, + assignees = self.assign_tuple_action(assignees, assignee_table_values, update_col_map, duplicate_col_map, table_pkey) for assignee_dict in assignees: if 'login' in assignee_dict: - cntrb_id = find_id_from_login(self, assignee_dict['login']) + cntrb_id = self.find_id_from_login(assignee_dict['login']) else: cntrb_id = 1 @@ -788,15 +615,14 @@ def query_assignee(self, assignees, pr_id): if assignee_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_assignees_table.insert().values(assignee)) - logging.info(f'Added PR Assignee {result.inserted_primary_key}') + self.logger.info(f'Added PR Assignee {result.inserted_primary_key}') - self.assignee_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 - logging.info(f'Finished inserting PR Assignee data for PR with id {pr_id}') + self.logger.info(f'Finished inserting PR Assignee data for PR with id {pr_id}') def query_pr_meta(self, head, base, pr_id): - logging.info('Querying PR Meta') + self.logger.info('Querying PR Meta') table = 'pull_request_meta' duplicate_col_map = {'pr_sha': 'sha'} @@ -808,12 +634,12 @@ def query_pr_meta(self, head, base, pr_id): update_keys += list(value_update_col_map.keys()) cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - meta_table_values = get_table_values(self, cols_query, [table]) + meta_table_values = self.get_table_values(cols_query, [table]) pr_meta_dict = { - 'head': assign_tuple_action(self, [head], meta_table_values, update_col_map, duplicate_col_map, + 'head': self.assign_tuple_action([head], meta_table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map=value_update_col_map)[0], - 'base': assign_tuple_action(self, [base], meta_table_values, update_col_map, duplicate_col_map, + 'base': self.assign_tuple_action([base], meta_table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map=value_update_col_map)[0] } @@ -824,7 +650,7 @@ def query_pr_meta(self, head, base, pr_id): 'pr_src_meta_label': pr_meta_data['label'], 'pr_src_meta_ref': pr_meta_data['ref'], 'pr_sha': pr_meta_data['sha'], - 'cntrb_id': find_id_from_login(self, pr_meta_data['user']['login']) if pr_meta_data['user'] \ + 'cntrb_id': self.find_id_from_login(pr_meta_data['user']['login']) if pr_meta_data['user'] \ and 'login' in pr_meta_data['user'] else None, 'tool_source': self.tool_source, 'tool_version': self.tool_version, @@ -836,13 +662,12 @@ def query_pr_meta(self, head, base, pr_id): self.pull_request_meta_table.c.pr_sha==pr_meta['pr_sha'] and self.pull_request_meta_table.c.pr_head_or_base==pr_side ).values(pr_meta)) - logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( - issue_dict['id'])) - self.issue_id_inc = issue_dict['pkey'] + # self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format(issue_dict['id'])) + self.pr_meta_id_inc = pr_meta_data['pkey'] elif pr_meta_data['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_meta_table.insert().values(pr_meta)) - logging.info(f'Added PR Head {result.inserted_primary_key}') + self.logger.info(f'Added PR Head {result.inserted_primary_key}') self.pr_meta_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 @@ -857,12 +682,12 @@ def query_pr_meta(self, head, base, pr_id): if pr_meta_data['repo']: self.query_pr_repo(pr_meta_data['repo'], pr_side, self.pr_meta_id_inc) else: - logging.info('No new PR Head data to add') + self.logger.info('No new PR Head data to add') - logging.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}') + self.logger.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}') def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): - logging.info('Querying PR Comments') + self.logger.info('Querying PR Comments') url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' + '/comments?per_page=100&page={}') @@ -876,14 +701,14 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): duplicate_col_map = {'pr_message_ref_src_comment_id': 'id'} #list to hold contributors needing insertion or update - pr_messages = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey) + pr_messages = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n") + self.logger.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n") for pr_msg_dict in pr_messages: if pr_msg_dict['user'] and 'login' in pr_msg_dict['user']: - cntrb_id = find_id_from_login(self, pr_msg_dict['user']['login']) + cntrb_id = self.find_id_from_login(pr_msg_dict['user']['login']) else: cntrb_id = 1 @@ -901,12 +726,11 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): } result = self.db.execute(self.message_table.insert().values(msg)) - logging.info(f'Added PR Comment {result.inserted_primary_key}') - self.msg_id_inc = int(result.inserted_primary_key[0]) + self.logger.info(f'Added PR Comment {result.inserted_primary_key}') pr_msg_ref = { 'pull_request_id': pr_id, - 'msg_id': self.msg_id_inc, + 'msg_id': int(result.inserted_primary_key[0]), 'pr_message_ref_src_comment_id': pr_msg_dict['id'], 'pr_message_ref_src_node_id': pr_msg_dict['node_id'], 'tool_source': self.tool_source, @@ -917,15 +741,14 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): result = self.db.execute( self.pull_request_message_ref_table.insert().values(pr_msg_ref) ) - logging.info(f'Added PR Message Ref {result.inserted_primary_key}') - self.pr_msg_ref_id_inc = int(result.inserted_primary_key[0]) + self.logger.info(f'Added PR Message Ref {result.inserted_primary_key}') self.results_counter += 1 - logging.info(f'Finished adding PR Message data for PR with id {pr_id}') + self.logger.info(f'Finished adding PR Message data for PR with id {pr_id}') def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): - logging.info(f'Querying PR {pr_repo_type} repo') + self.logger.info(f'Querying PR {pr_repo_type} repo') table = 'pull_request_repo' duplicate_col_map = {'pr_src_repo_id': 'id'} @@ -935,13 +758,13 @@ def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - pr_repo_table_values = get_table_values(self, cols_query, [table]) + pr_repo_table_values = self.get_table_values(cols_query, [table]) - new_pr_repo = assign_tuple_action(self, [pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map, + new_pr_repo = self.assign_tuple_action([pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map, table_pkey)[0] if new_pr_repo['owner'] and 'login' in new_pr_repo['owner']: - cntrb_id = find_id_from_login(self, new_pr_repo['owner']['login']) + cntrb_id = self.find_id_from_login(new_pr_repo['owner']['login']) else: cntrb_id = 1 @@ -962,20 +785,8 @@ def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): if new_pr_repo['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_repo_table.insert().values(pr_repo)) - logging.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}') + self.logger.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}') self.results_counter += 1 - logging.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}') - - def get_owner_repo(self, github_url): - split = github_url.split('/') - - owner = split[-2] - repo = split[-1] - - if '.git' in repo: - repo = repo[:-4] - - return owner, repo - + self.logger.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}') diff --git a/workers/pull_request_worker/pull_request_worker/runtime.py b/workers/pull_request_worker/pull_request_worker/runtime.py deleted file mode 100644 --- a/workers/pull_request_worker/pull_request_worker/runtime.py +++ /dev/null @@ -1,109 +0,0 @@ -import json, logging, os, click -import requests -from flask import Flask, Response, jsonify, request -from pull_request_worker.worker import GHPullRequestWorker -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': # POST a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.gh_pr_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.gh_pr_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51239, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'pull_request_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - print("New pull request worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.pull_request_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - - app.gh_pr_worker = GHPullRequestWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.gh_pr_worker._child is not None: - app.gh_pr_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/pull_request_worker/runtime.py b/workers/pull_request_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/pull_request_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.pull_request_worker.pull_request_worker import GitHubPullRequestWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitHubPullRequestWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/pull_request_worker/setup.py b/workers/pull_request_worker/setup.py --- a/workers/pull_request_worker/setup.py +++ b/workers/pull_request_worker/setup.py @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'pull_request_worker_start=pull_request_worker.runtime:main', + 'pull_request_worker_start=workers.pull_request_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/template_worker/template_worker/__init__.py b/workers/release_worker/__init__.py similarity index 50% rename from workers/template_worker/template_worker/__init__.py rename to workers/release_worker/__init__.py --- a/workers/template_worker/template_worker/__init__.py +++ b/workers/release_worker/__init__.py @@ -1,4 +1,4 @@ -"""gh_repo_info_worker - Augur Worker that collects GitHub Repo Info data""" +"""gh_release_worker - Augur Worker that collects GitHub Repo Info data""" __version__ = '0.0.0' __author__ = 'Augur Team <[email protected]>' diff --git a/workers/release_worker/release_worker.py b/workers/release_worker/release_worker.py new file mode 100644 --- /dev/null +++ b/workers/release_worker/release_worker.py @@ -0,0 +1,154 @@ +import logging, os, sys, time, requests, json +from datetime import datetime +from multiprocessing import Process, Queue +from urllib.parse import urlparse +import pandas as pd +import sqlalchemy as s +from sqlalchemy import MetaData +from sqlalchemy.ext.automap import automap_base +from workers.worker_base import Worker + +#TODO - fully edit to match releases +class ReleaseWorker(Worker): + def __init__(self, config={}): + + worker_type = "release_worker" + + # Define what this worker can be given and know how to interpret + given = [['github_url']] + models = ['releases'] + + # Define the tables needed to insert, update, or delete on + data_tables = ['releases'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Define data collection info + self.tool_source = 'Release Worker' + self.tool_version = '0.0.1' + self.data_source = 'GitHub API' + + def releases_model(self, task, repo_id): + + github_url = task['given']['github_url'] + + self.logger.info("Beginning filling the releases model for repo: " + github_url + "\n") + + owner, repo = self.get_owner_repo(github_url) + + url = 'https://api.github.com/graphql' + + query = """ + { + repository(owner:"%s", name:"%s"){ + id + releases(orderBy: {field: CREATED_AT, direction: ASC}, last: %d) { + edges { + node { + name + publishedAt + createdAt + description + id + isDraft + isPrerelease + tagName + url + updatedAt + author { + name + company + } + } + } + } + } + } + """ % (owner, repo, 10) + + # Hit the graphql endpoint and retry 3 times in case of failure + num_attempts = 0 + success = False + while num_attempts < 3: + self.logger.info("Hitting endpoint: {} ...\n".format(url)) + r = requests.post(url, json={'query': query}, headers=self.headers) + self.update_gh_rate_limit(r) + + try: + data = r.json() + except: + data = json.loads(json.dumps(r.text)) + + if 'errors' in data: + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': + self.update_gh_rate_limit(r) + continue + + if 'data' in data: + success = True + data = data['data']['repository'] + break + else: + self.logger.info("Request returned a non-data dict: {}\n".format(data)) + if data['message'] == 'Not Found': + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + break + if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': + self.update_gh_rate_limit(r, temporarily_disable=True) + continue + if data['message'] == 'Bad credentials': + self.update_gh_rate_limit(r, bad_credentials=True) + continue + num_attempts += 1 + if not success: + self.register_task_failure(task, repo_id, "Failed to hit endpoint: {}".format(url)) + return + + if 'repository' in data: + if 'releases' in data['repository']: + if 'edges' in data['repository']['releases']: + for n in data['repository']['releases']['edges']: + if 'node' in n: + release = n['node'] + insert_release(self, repo_id, owner, release) + self.logger.info("There's no release to insert. Current node is not available in releases: {}\n".format(n)) + self.logger.info("There are no releases to insert for current repository: {}\n".format(data)) + self.logger.info("Graphql response does not contain releases: {}\n".format(data)) + self.logger.info("Graphql response does not contain repository: {}\n".format(data)) + + def insert_release(self, repo_id, owner, release): + author = release['author']['name']+'_'+release['author']['company'] + # Put all data together in format of the table + self.logger.info(f'Inserting release for repo with id:{repo_id}, owner:{owner}, release name:{release["name"]}\n') + release_inf = { + 'release_id': release['id'], + 'repo_id': repo_id, + 'release_name': release['name'], + 'release_description': release['description'], + 'release_author': release['author'], + 'release_created_at': release['createdAt'], + 'release_published_at': release['publishedAt'], + 'release_updated_at': release['updatedAt'], + 'release_is_draft': release['isDraft'], + 'release_is_prerelease': release['isPrerelease'], + 'release_tag_name': release['tagName'], + 'release_url': release['url'], + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source + } + + result = self.db.execute(self.releases_table.insert().values(release_inf)) + self.logger.info(f"Primary Key inserted into releases table: {result.inserted_primary_key}\n") + self.results_counter += 1 + + self.logger.info(f"Inserted info for {owner}/{repo}/{release['name']}\n") + + #Register this task as completed + self.register_task_completion(task, release_id, "releases") + return + + diff --git a/workers/release_worker/runtime.py b/workers/release_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/release_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.release_worker.release_worker import ReleaseWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ReleaseWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/metric_status_worker/setup.py b/workers/release_worker/setup.py similarity index 85% rename from workers/metric_status_worker/setup.py rename to workers/release_worker/setup.py --- a/workers/metric_status_worker/setup.py +++ b/workers/release_worker/setup.py @@ -5,22 +5,20 @@ from setuptools import find_packages from setuptools import setup - def read(filename): filename = os.path.join(os.path.dirname(__file__), filename) text_type = type(u"") with io.open(filename, mode="r", encoding='utf-8') as fd: return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) - setup( - name="metric_status_worker", + name="release_worker", version="0.1.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", author_email="[email protected]", - description="Augur Worker that collects GitHub data", + description="Augur Worker that collects data about GitHub releases", packages=find_packages(exclude=('tests',)), install_requires=[ 'flask', @@ -30,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'metric_status_worker_start=metric_status_worker.runtime:main', + 'release_worker_start=workers.release_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/repo_info_worker/__init__.py b/workers/repo_info_worker/__init__.py new file mode 100644 diff --git a/workers/repo_info_worker/repo_info_worker/worker.py b/workers/repo_info_worker/repo_info_worker.py similarity index 61% rename from workers/repo_info_worker/repo_info_worker/worker.py rename to workers/repo_info_worker/repo_info_worker.py --- a/workers/repo_info_worker/repo_info_worker/worker.py +++ b/workers/repo_info_worker/repo_info_worker.py @@ -1,15 +1,22 @@ import logging, os, sys, time, requests, json from datetime import datetime from multiprocessing import Process, Queue -from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base from workers.worker_base import Worker +# NOTE: This worker primarily inserts rows into the REPO_INFO table, which serves the primary purposes of +# 1. Displaying discrete metadata like "number of forks" and how they change over time +# 2. Validating other workers, like those related to pull requests, issues, and commits. Our totals should be at or very near the totals in the repo_info table. + +# This table also updates the REPO table in 2 cases: +# 1. Recognizing when a repository is a forked repository by updating the "forked_from" field and +# 2. Recognizing when a repository is archived, and recording the data we observed the change in status. + class RepoInfoWorker(Worker): - def __init__(self, config): + def __init__(self, config={}): + + worker_type = "repo_info_worker" # Define what this worker can be given and know how to interpret given = [['github_url']] @@ -20,7 +27,7 @@ def __init__(self, config): operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) # Define data collection info self.tool_source = 'Repo Info Worker' @@ -31,7 +38,7 @@ def repo_info_model(self, task, repo_id): github_url = task['given']['github_url'] - logging.info("Beginning filling the repo_info model for repo: " + github_url + "\n") + self.logger.info("Beginning filling the repo_info model for repo: " + github_url + "\n") owner, repo = self.get_owner_repo(github_url) @@ -100,7 +107,7 @@ def repo_info_model(self, task, repo_id): num_attempts = 0 success = False while num_attempts < 3: - logging.info("Hitting endpoint: {} ...\n".format(url)) + self.logger.info("Hitting endpoint: {} ...\n".format(url)) r = requests.post(url, json={'query': query}, headers=self.headers) self.update_gh_rate_limit(r) @@ -110,8 +117,8 @@ def repo_info_model(self, task, repo_id): data = json.loads(json.dumps(r.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) - if data['errors']['message'] == 'API rate limit exceeded': + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': self.update_gh_rate_limit(r) continue @@ -120,9 +127,9 @@ def repo_info_model(self, task, repo_id): data = data['data']['repository'] break else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) break if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': self.update_gh_rate_limit(r, temporarily_disable=True) @@ -132,14 +139,22 @@ def repo_info_model(self, task, repo_id): continue num_attempts += 1 if not success: - self.register_task_failure(task, repo_id, "Failed to hit endpoint: {}".format(url)) + self.register_task_failure(self.task, repo_id, "Failed to hit endpoint: {}".format(url)) return # Get committers count info that requires seperate endpoint committers_count = self.query_committers_count(owner, repo) + # Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table. + forked = self.is_forked(owner, repo) + archived = self.is_archived(owner, repo) + if archived is not False: + archived_date_collected = archived + archived = True + else: + archived_date_collected = None # Put all data together in format of the table - logging.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n') + self.logger.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n') rep_inf = { 'repo_id': repo_id, 'last_updated': data['updatedAt'] if 'updatedAt' in data else None, @@ -174,19 +189,22 @@ def repo_info_model(self, task, repo_id): 'tool_source': self.tool_source, 'tool_version': self.tool_version, 'data_source': self.data_source + # 'forked_from': forked, + # 'repo_archived': archived, + # 'repo_archived_date_collected': archived_date_collected } result = self.db.execute(self.repo_info_table.insert().values(rep_inf)) - logging.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n") + self.logger.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n") self.results_counter += 1 - logging.info(f"Inserted info for {owner}/{repo}\n") + self.logger.info(f"Inserted info for {owner}/{repo}\n") - #Register this task as completed - self.register_task_completion(task, repo_id, "repo_info") + # Register this task as completed + self.register_task_completion(self.task, repo_id, "repo_info") def query_committers_count(self, owner, repo): - logging.info('Querying committers count\n') + self.logger.info('Querying committers count\n') url = f'https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100' committers = 0 @@ -205,3 +223,63 @@ def query_committers_count(self, owner, repo): return committers + def is_forked(self, owner, repo): #/repos/:owner/:repo parent + logging.info('Querying parent info to verify if the repo is forked\n') + url = f'https://api.github.com/repos/{owner}/{repo}' + + r = requests.get(url, headers=self.headers) + self.update_gh_rate_limit(r) + + data = self.get_repo_data(url, r) + + if 'fork' in data: + if 'parent' in data: + return data['parent']['full_name'] + return 'Parent not available' + + return False + + def is_archived(self, owner, repo): + logging.info('Querying committers count\n') + url = f'https://api.github.com/repos/{owner}/{repo}' + + r = requests.get(url, headers=self.headers) + self.update_gh_rate_limit(r) + + data = self.get_repo_data(url, r) + + if 'archived' in data: + if data['archived']: + if 'updated_at' in data: + return data['updated_at'] + return 'Date not available' + return False + + return False + + def get_repo_data(self, url, response): + success = False + try: + data = response.json() + except: + data = json.loads(json.dumps(response.text)) + + if 'errors' in data: + logging.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': + self.update_gh_rate_limit(response) + + if 'id' in data: + success = True + else: + logging.info("Request returned a non-data dict: {}\n".format(data)) + if data['message'] == 'Not Found': + logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': + self.update_gh_rate_limit(r, temporarily_disable=True) + if data['message'] == 'Bad credentials': + self.update_gh_rate_limit(r, bad_credentials=True) + if not success: + self.register_task_failure(self.task, repo_id, "Failed to hit endpoint: {}".format(url)) + + return data diff --git a/workers/repo_info_worker/repo_info_worker/__init__.py b/workers/repo_info_worker/repo_info_worker/__init__.py deleted file mode 100644 --- a/workers/repo_info_worker/repo_info_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""gh_repo_info_worker - Augur Worker that collects GitHub Repo Info data""" - -__version__ = '0.0.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/repo_info_worker/repo_info_worker/runtime.py b/workers/repo_info_worker/repo_info_worker/runtime.py deleted file mode 100644 --- a/workers/repo_info_worker/repo_info_worker/runtime.py +++ /dev/null @@ -1,55 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from repo_info_worker.worker import RepoInfoWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'repo_info_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.repo_info_worker.{}".format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - 'gh_api_key': read_config('Database', 'key', 'AUGUR_GITHUB_API_KEY', 'key') - } - - #create instance of the worker - app.gh_repo_info_worker = RepoInfoWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - if app.gh_repo_info_worker._child is not None: - app.gh_repo_info_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/repo_info_worker/runtime.py b/workers/repo_info_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/repo_info_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.repo_info_worker.repo_info_worker import RepoInfoWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = RepoInfoWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/repo_info_worker/setup.py b/workers/repo_info_worker/setup.py --- a/workers/repo_info_worker/setup.py +++ b/workers/repo_info_worker/setup.py @@ -19,16 +19,15 @@ def read(filename): author="Augurlabs", author_email="[email protected]", description="Augur Worker that collects general data about a repo on GitHub", - packages=find_packages(exclude=('tests',)), + packages=find_packages(), install_requires=[ 'flask', 'requests', - 'psycopg2-binary', - 'click' + 'psycopg2-binary' ], entry_points={ 'console_scripts': [ - 'repo_info_worker_start=repo_info_worker.runtime:main', + 'repo_info_worker_start=workers.repo_info_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/standard_methods.py b/workers/standard_methods.py deleted file mode 100644 --- a/workers/standard_methods.py +++ /dev/null @@ -1,712 +0,0 @@ -""" Helper methods constant across all workers """ -import requests, datetime, time, traceback, json, os, sys, math -import sqlalchemy as s -import pandas as pd -import os -import sys, logging -from urllib.parse import urlparse - -def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}): - """ map objects => { *our db col* : *gh json key*} """ - need_insertion_count = 0 - need_update_count = 0 - for i, obj in enumerate(new_data): - if type(obj) != dict: - logging.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) - continue - - obj['flag'] = 'none' # default of no action needed - existing_tuple = None - for db_dupe_key in list(duplicate_col_map.keys()): - - if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any(): - if table_values[table_values[db_dupe_key].isin( - [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records'): - existing_tuple = table_values[table_values[db_dupe_key].isin( - [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0] - continue - - logging.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) - obj['flag'] = 'need_insertion' - need_insertion_count += 1 - break - - if obj['flag'] == 'need_insertion': - logging.info('Already determined that current tuple needs insertion, skipping checking updates. ' - 'Moving to next tuple.\n') - continue - - # If we need to check the values of the existing tuple to determine if an update is needed - for augur_col, value_check in value_update_col_map.items(): - not_nan_check = not (pd.isna(value_check) and pd.isna(existing_tuple[augur_col])) if value_check is not None else True - if existing_tuple[augur_col] != value_check and not_nan_check: - continue - logging.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) - obj['flag'] = 'need_update' - obj['pkey'] = existing_tuple[table_pkey] - need_update_count += 1 - - if obj['flag'] == 'need_update': - logging.info('Already determined that current tuple needs update, skipping checking further updates. ' - 'Moving to next tuple.\n') - continue - - # Now check the existing tuple's values against the response values to determine if an update is needed - for col in update_col_map.keys(): - if update_col_map[col] not in obj: - continue - if obj[update_col_map[col]] == existing_tuple[col]: - continue - logging.info("Found a tuple that needs an update for column: {}\n".format(col)) - obj['flag'] = 'need_update' - obj['pkey'] = existing_tuple[table_pkey] - need_update_count += 1 - - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + - "was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count)) - return new_data - -def check_duplicates(new_data, table_values, key): - need_insertion = [] - for obj in new_data: - if type(obj) == dict: - if not table_values.isin([obj[key]]).any().any(): - need_insertion.append(obj) - # else: - # logging.info("Tuple with github's {} key value already".format(key) + - # "exists in our db: {}\n".format(str(obj[key]))) - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + - "was reduced to {} tuples.\n".format(str(len(need_insertion)))) - return need_insertion - -def connect_to_broker(self): - connected = False - for i in range(5): - try: - logging.info("attempt {}\n".format(i)) - if i > 0: - time.sleep(10) - requests.post('http://{}:{}/api/unstable/workers'.format( - self.config['broker_host'],self.config['broker_port']), json=self.specs) - logging.info("Connection to the broker was successful\n") - connected = True - break - except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker. Trying again...\n') - if not connected: - sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n') - -def dump_queue(queue): - """ - Empties all pending items in a queue and returns them in a list. - """ - result = [] - queue.put("STOP") - for i in iter(queue.get, 'STOP'): - result.append(i) - # time.sleep(.1) - return result - -def find_id_from_login(self, login): - idSQL = s.sql.text(""" - SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' - """.format(login)) - rs = pd.read_sql(idSQL, self.db, params={}) - data_list = [list(row) for row in rs.itertuples(index=False)] - try: - return data_list[0][0] - except: - logging.info("contributor needs to be added...") - - cntrb_url = ("https://api.github.com/users/" + login) - logging.info("Hitting endpoint: {} ...\n".format(cntrb_url)) - r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) - contributor = r.json() - - company = None - location = None - email = None - if 'company' in contributor: - company = contributor['company'] - if 'location' in contributor: - location = contributor['location'] - if 'email' in contributor: - email = contributor['email'] - - cntrb = { - "cntrb_login": contributor['login'] if 'login' in contributor else None, - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, - "cntrb_canonical": None, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } - result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) - self.results_counter += 1 - self.cntrb_id_inc = int(result.inserted_primary_key[0]) - - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - return find_id_from_login(self, login) - -def get_owner_repo(github_url): - split = github_url.split('/') - - owner = split[-2] - repo = split[-1] - - if '.git' in repo: - repo = repo[:-4] - - return owner, repo - -def get_max_id(self, table, column, default=25150, operations_table=False): - maxIdSQL = s.sql.text(""" - SELECT max({0}.{1}) AS {1} - FROM {0} - """.format(table, column)) - db = self.db if not operations_table else self.helper_db - rs = pd.read_sql(maxIdSQL, db, params={}) - if rs.iloc[0][column] is not None: - max_id = int(rs.iloc[0][column]) + 1 - logging.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) - else: - max_id = default - logging.info("Could not find max id for {} column in the {} table... using default set to: \ - {}\n".format(column, table, max_id)) - return max_id - -def get_table_values(self, cols, tables, where_clause=""): - table_str = tables[0] - del tables[0] - - col_str = cols[0] - del cols[0] - - for table in tables: - table_str += ", " + table - for col in cols: - col_str += ", " + col - - tableValuesSQL = s.sql.text(""" - SELECT {} FROM {} {} - """.format(col_str, table_str, where_clause)) - logging.info("Getting table values with the following PSQL query: \n{}\n".format(tableValuesSQL)) - values = pd.read_sql(tableValuesSQL, self.db, params={}) - return values - -def init_oauths(self): - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauthSQL = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(self.config['key'])) - for oauth in [{'oauth_id': 0, 'access_token': self.config['key']}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}\n".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - -def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}): - # Paginate backwards through all the tuples but get first page in order - # to determine if there are multiple pages and if the 1st page covers all - update_keys = list(update_col_map.keys()) if update_col_map else [] - update_keys += list(value_update_col_map.keys()) if value_update_col_map else [] - cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - table_values = get_table_values(self, cols_query, [table], where_clause) - - i = 1 - multiple_pages = False - tuples = [] - while True: - num_attempts = 0 - success = False - while num_attempts < 3: - logging.info("Hitting endpoint: " + url.format(i) + " ...\n") - r = requests.get(url=url.format(i), headers=self.headers) - update_gh_rate_limit(self, r) - logging.info("Analyzing page {} of {}\n".format(i, int(r.links['last']['url'][-6:].split('=')[1]) + 1 if 'last' in r.links else '*last page not known*')) - - try: - j = r.json() - except: - j = json.loads(json.dumps(r.text)) - - if type(j) != dict and type(j) != str: - success = True - break - elif type(j) == dict: - logging.info("Request returned a dict: {}\n".format(j)) - if j['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) - break - if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': - num_attempts -= 1 - update_gh_rate_limit(self, r, temporarily_disable=True) - if j['message'] == 'Bad credentials': - update_gh_rate_limit(self, r, bad_credentials=True) - elif type(j) == str: - logging.info("J was string: {}\n".format(j)) - if '<!DOCTYPE html>' in j: - logging.info("HTML was returned, trying again...\n") - elif len(j) == 0: - logging.info("Empty string, trying again...\n") - else: - try: - j = json.loads(j) - success = True - break - except: - pass - num_attempts += 1 - if not success: - break - - # Find last page so we can decrement from there - if 'last' in r.links and not multiple_pages and not self.finishing_task: - param = r.links['last']['url'][-6:] - i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") - multiple_pages = True - elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") - elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ..." - " excess rate limit requests will be made\n") - - if len(j) == 0: - logging.info("Response was empty, breaking from pagination.\n") - break - - # Checking contents of requests with what we already have in the db - j = assign_tuple_action(self, j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map) - if not j: - logging.info("Assigning tuple action failed, moving to next page.\n") - i = i + 1 if self.finishing_task else i - 1 - continue - try: - to_add = [obj for obj in j if obj not in tuples and obj['flag'] != 'none'] - except Exception as e: - logging.info("Failure accessing data of page: {}. Moving to next page.\n".format(e)) - i = i + 1 if self.finishing_task else i - 1 - continue - if len(to_add) == 0 and multiple_pages and 'last' in r.links: - logging.info("{}".format(r.links['last'])) - if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown tuples, breaking from pagination.\n") - break - tuples += to_add - - i = i + 1 if self.finishing_task else i - 1 - - # Since we already wouldve checked the first page... break - if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") - break - - return tuples - -def query_github_contributors(self, entry_info, repo_id): - - """ Data collection function - Query the GitHub API for contributors - """ - logging.info("Querying contributors with given entry info: " + str(entry_info) + "\n") - - github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url'] - - # Extract owner/repo from the url for the endpoint - path = urlparse(github_url) - split = path[2].split('/') - - owner = split[1] - name = split[2] - - # Handles git url case by removing the extension - if ".git" in name: - name = name[:-4] - - # Set the base of the url and place to hold contributors to insert - contributors_url = ("https://api.github.com/repos/" + owner + "/" + - name + "/contributors?per_page=100&page={}") - - # Get contributors that we already have stored - # Set our duplicate and update column map keys (something other than PK) to - # check dupicates/needed column updates with - table = 'contributors' - table_pkey = 'cntrb_id' - update_col_map = {'cntrb_email': 'email'} - duplicate_col_map = {'cntrb_login': 'login'} - - #list to hold contributors needing insertion or update - contributors = paginate(self, contributors_url, duplicate_col_map, update_col_map, table, table_pkey) - - logging.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") - - for repo_contributor in contributors: - try: - # Need to hit this single contributor endpoint to get extra data including... - # `created at` - # i think that's it - cntrb_url = ("https://api.github.com/users/" + repo_contributor['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") - r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) - contributor = r.json() - - company = None - location = None - email = None - if 'company' in contributor: - company = contributor['company'] - if 'location' in contributor: - location = contributor['location'] - if 'email' in contributor: - email = contributor['email'] - canonical_email = contributor['email'] - - cntrb = { - "cntrb_login": contributor['login'], - "cntrb_created_at": contributor['created_at'], - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - # "cntrb_type": , dont have a use for this as of now ... let it default to null - "cntrb_canonical": canonical_email, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } - - # Commit insertion to table - if repo_contributor['flag'] == 'need_update': - result = self.db.execute(self.contributors_table.update().where( - self.history_table.c.cntrb_email==email).values(cntrb)) - logging.info("Updated tuple in the contributors table with existing email: {}".format(email)) - self.cntrb_id_inc = repo_contributor['pkey'] - elif repo_contributor['flag'] == 'need_insertion': - result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) - self.results_counter += 1 - - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - # Increment our global track of the cntrb id for the possibility of it being used as a FK - self.cntrb_id_inc = int(result.inserted_primary_key[0]) - - except Exception as e: - logging.info("Caught exception: {}".format(e)) - logging.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) - continue - -def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): - """ - Read a variable in specified section of the config file, unless provided an environment variable - - :param section: location of given variable - :param name: name of variable - """ - config_file_path = os.getenv("AUGUR_CONFIG_FILE", config_file_path) - _config_file_name = 'augur.config.json' - _config_bad = False - _already_exported = {} - _runtime_location = 'runtime/' - _default_config = {} - _config_file = None - - try: - _config_file = open(config_file_path, 'r+') - except: - print('Couldn\'t open {}'.format(_config_file_name)) - - # Load the config file - try: - config_text = _config_file.read() - _config = json.loads(config_text) - except json.decoder.JSONDecodeError as e: - if not _config_bad: - _using_config_file = False - print('{} could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: {}'.format(config_file_path, str(e))) - _config = _default_config - - value = None - if environment_variable is not None: - value = os.getenv(environment_variable) - if value is None: - try: - if name is not None: - value = _config[section][name] - else: - value = _config[section] - except Exception as e: - value = default - if not section in _config: - _config[section] = {} - - return value - - -def record_model_process(self, repo_id, model): - - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Stopped", - "total_results": self.results_counter - } - if self.finishing_task: - result = self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - self.history_id += 1 - else: - result = self.helper_db.execute(self.history_table.insert().values(task_history)) - logging.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) - self.history_id = int(result.inserted_primary_key[0]) - -def register_task_completion(self, task, repo_id, model): - # Task to send back to broker - task_completed = { - 'worker_id': self.config['id'], - 'job_type': "MAINTAIN", - 'repo_id': repo_id, - 'job_model': model - } - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] if 'git_url' in task['given'] else "INVALID_GIVEN" - if key == 'INVALID_GIVEN': - register_task_failure(self, task, repo_id, "INVALID_GIVEN: not github nor git url") - return - - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Success", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job completion for: " + str(task_completed) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where( - self.job_table.c.job_model==model).values(updated_job)) - logging.info("Updated job process for model: " + model + "\n") - - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - - # Reset results counter for next task - self.results_counter = 0 - -def register_task_failure(self, task, repo_id, e): - - logging.info("Worker ran into an error for task: {}\n".format(task)) - logging.info("Printing traceback...\n") - tb = traceback.format_exc() - logging.info(tb) - - logging.info(f'This task inserted {self.results_counter} tuples before failure.\n') - logging.info("Notifying broker and logging task failure in database...\n") - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - url = task['given'][key] - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(url)) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - task['worker_id'] = self.config['id'] - try: - requests.post("http://{}:{}/api/unstable/task_error".format( - self.config['broker_host'],self.config['broker_port']), json=task) - except requests.exceptions.ConnectionError: - logging.error('Could not send task failure message to the broker\n') - except Exception: - logging.exception('An error occured while informing broker about task failure\n') - - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": task['models'][0], - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Error", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job error in the history table for: " + str(task) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==task['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + task['models'][0] + "\n") - - # Reset results counter for next task - self.results_counter = 0 - -def retrieve_tuple(self, key_values, tables): - table_str = tables[0] - del tables[0] - - key_values_items = list(key_values.items()) - for col, value in [key_values_items[0]]: - where_str = col + " = '" + value + "'" - del key_values_items[0] - - for col, value in key_values_items: - where_str += ' AND ' + col + " = '" + value + "'" - for table in tables: - table_str += ", " + table - - retrieveTupleSQL = s.sql.text(""" - SELECT * FROM {} WHERE {} - """.format(table_str, where_str)) - values = json.loads(pd.read_sql(retrieveTupleSQL, self.db, params={}).to_json(orient="records")) - return values - -def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): - # Try to get rate limit from request headers, sometimes it does not work (GH's issue) - # In that case we just decrement from last recieved header count - if bad_credentials and len(self.oauths) > 1: - logging.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) - del self.oauths[0] - - if temporarily_disable: - logging.info("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") - self.oauths[0]['rate_limit'] = 0 - else: - try: - self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - logging.info("Recieved rate limit from headers\n") - except: - self.oauths[0]['rate_limit'] -= 1 - logging.info("Headers did not work, had to decrement\n") - logging.info("Updated rate limit, you have: " + - str(self.oauths[0]['rate_limit']) + " requests remaining.\n") - if self.oauths[0]['rate_limit'] <= 0: - try: - reset_time = response.headers['X-RateLimit-Reset'] - except Exception as e: - logging.info("Could not get reset time from headers because of error: {}".format(e)) - logging.info('Headers: {}'.format(response.headers)) - reset_time = 3600 - time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() - logging.info("Rate limit exceeded, checking for other available keys to use.\n") - - # We will be finding oauth with the highest rate limit left out of our list of oauths - new_oauth = self.oauths[0] - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] - for oauth in other_oauths: - logging.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - response = requests.get(url=url, headers=self.headers) - oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() - - # Update oauth to switch to if a higher limit is found - if oauth['rate_limit'] > new_oauth['rate_limit']: - logging.info("Higher rate limit found in oauth: {}\n".format(oauth)) - new_oauth = oauth - elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: - logging.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) - new_oauth = oauth - - if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: - logging.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) - time.sleep(new_oauth['seconds_to_reset']) - - # Make new oauth the 0th element in self.oauths so we know which one is in use - index = self.oauths.index(new_oauth) - self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] - logging.info("Using oauth: {}\n".format(self.oauths[0])) - - # Change headers to be using the new oauth's key - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} diff --git a/workers/template_worker/__init__.py b/workers/template_worker/__init__.py new file mode 100644 diff --git a/workers/template_worker/runtime.py b/workers/template_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/template_worker/runtime.py @@ -0,0 +1,23 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.template_worker.template_worker import TemplateWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ + Creates the Flask app and data collection worker, then starts the Gunicorn server + """ + app = Flask(__name__) + app.worker = TemplateWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/template_worker/setup.py b/workers/template_worker/setup.py --- a/workers/template_worker/setup.py +++ b/workers/template_worker/setup.py @@ -19,16 +19,15 @@ def read(filename): author="Augur Team", author_email="[email protected]", description="Template worker to be used as an example", - packages=find_packages(exclude=('tests',)), + packages=find_packages(), install_requires=[ 'flask', 'requests', - 'psycopg2-binary', - 'click' + 'psycopg2-binary' ], entry_points={ 'console_scripts': [ - 'template_worker_start=template_worker.runtime:main', + 'template_worker_start=workers.template_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/template_worker/template_worker/worker.py b/workers/template_worker/template_worker.py similarity index 76% rename from workers/template_worker/template_worker/worker.py rename to workers/template_worker/template_worker.py --- a/workers/template_worker/template_worker/worker.py +++ b/workers/template_worker/template_worker.py @@ -6,12 +6,16 @@ from workers.worker_base import Worker class TemplateWorker(Worker): - def __init__(self, config): + def __init__(self, config={}): - # Define what this worker can be given and know how to interpret + # Define the worker's type, which will be used for self identification. + # Should be unique among all workers and is the same key used to define + # this worker's settings in the configuration file. + worker_type = "template_worker" + # Define what this worker can be given and know how to interpret # given is usually either [['github_url']] or [['git_url']] (depending if your - # worker is exclusive to repos that are on the GitHub platform) + # worker is exclusive to repos that are on the GitHub platform) given = [[]] # The name the housekeeper/broker use to distinguish the data model this worker can fill @@ -28,7 +32,14 @@ def __init__(self, config): operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Do any additional configuration after the general initialization has been run + self.config.update(config) + + # If you need to do some preliminary interactions with the database, these MUST go + # in the model method. The database connection is instantiated only inside of each + # data collection process # Define data collection info self.tool_source = 'Fake Template Worker' @@ -54,8 +65,11 @@ def fake_data_model(self, task, repo_id): } :param repo_id: the collect() method queries the repo_id given the git/github url and passes it along to make things easier. An int such as: 27869 + """ + # Any initial database instructions, like finding the last tuple inserted or generate the next ID value + # Collection and insertion of data happens here # ... diff --git a/workers/template_worker/template_worker/runtime.py b/workers/template_worker/template_worker/runtime.py deleted file mode 100644 --- a/workers/template_worker/template_worker/runtime.py +++ /dev/null @@ -1,58 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from template_worker.worker import TemplateWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'repo_info_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.template_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port) - } - - #create instance of the worker - app.template_worker = TemplateWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - - if app.template_worker._child is not None: - app.template_worker._child.terminate() - - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - - - diff --git a/workers/util.py b/workers/util.py --- a/workers/util.py +++ b/workers/util.py @@ -1,5 +1,6 @@ import os, json, requests, logging from flask import Flask, Response, jsonify, request +import gunicorn.app.base def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): """ @@ -47,7 +48,7 @@ def read_config(section, name=None, environment_variable=None, default=None, con return value -def create_server(app, worker): +def create_server(app, worker=None): """ Consists of AUGWOP endpoints for the broker to communicate to this worker Can post a new task to be added to the workers queue Can retrieve current status of the worker @@ -83,4 +84,28 @@ def heartbeat(): def augwop_config(): """ Retrieve worker's config """ - return app.worker.config \ No newline at end of file + return app.worker.config + +class WorkerGunicornApplication(gunicorn.app.base.BaseApplication): + + def __init__(self, app): + self.options = { + 'bind': '%s:%s' % (app.worker.config["host"], app.worker.config["port"]), + 'workers': 1, + 'errorlog': app.worker.config['server_logfile'], + 'accesslog': app.worker.config['server_logfile'], + 'loglevel': app.worker.config['log_level'], + 'capture_output': app.worker.config['capture_output'] + } + + self.application = app + super().__init__() + + def load_config(self): + config = {key: value for key, value in self.options.items() + if key in self.cfg.settings and value is not None} + for key, value in config.items(): + self.cfg.set(key.lower(), value) + + def load(self): + return self.application diff --git a/workers/value_worker/__init__.py b/workers/value_worker/__init__.py new file mode 100644 diff --git a/workers/value_worker/runtime.py b/workers/value_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/value_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.value_worker.value_worker import ValueWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ValueWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) \ No newline at end of file diff --git a/workers/value_worker/setup.py b/workers/value_worker/setup.py --- a/workers/value_worker/setup.py +++ b/workers/value_worker/setup.py @@ -5,33 +5,23 @@ from setuptools import find_packages from setuptools import setup - -def read(filename): - filename = os.path.join(os.path.dirname(__file__), filename) - text_type = type(u"") - with io.open(filename, mode="r", encoding='utf-8') as fd: - return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) - - setup( name="value_worker", version="0.1.0", url="https://github.com/chaoss/augur", license='MIT', - author="Augurlabs", author_email="[email protected]", - description="Augur Worker that gathers value data", - long_description=read("README.md"), - packages=find_packages(exclude=('tests',)), - - install_requires=['flask', 'requests', 'psycopg2-binary', 'click'], - + install_requires=[ + 'flask', + 'requests', + 'psycopg2-binary', + ], entry_points={ 'console_scripts': [ - 'value_worker_start=value_worker.runtime:main', + 'value_worker_start=workers.value_worker.runtime:main', ], }, diff --git a/workers/value_worker/value_worker.py b/workers/value_worker/value_worker.py new file mode 100644 --- /dev/null +++ b/workers/value_worker/value_worker.py @@ -0,0 +1,94 @@ +import os, subprocess +from datetime import datetime +import logging +import requests +import json +from urllib.parse import quote +from multiprocessing import Process, Queue + +import pandas as pd +import sqlalchemy as s +from sqlalchemy.ext.automap import automap_base +from sqlalchemy import MetaData +from workers.worker_base import Worker + +class ValueWorker(Worker): + def __init__(self, config={}): + + worker_type = "value_worker" + + # Define what this worker can be given and know how to interpret + given = [['git_url']] + models = ['value'] + + # Define the tables needed to insert, update, or delete on + data_tables = ['repo_labor'] + operations_tables = ['worker_history', 'worker_job'] + + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({ + 'repo_directory': self.augur_config.get_value('Workers', 'facade_worker')['repo_directory'] + }) + + self.tool_source = 'Value Worker' + self.tool_version = '0.1.0' + self.data_source = 'SCC' + + def value_model(self, entry_info, repo_id): + """ Data collection and storage method + """ + self.logger.info(entry_info) + self.logger.info(repo_id) + + repo_path_sql = s.sql.text(""" + SELECT repo_id, CONCAT(repo_group_id || chr(47) || repo_path || repo_name) AS path + FROM repo + WHERE repo_id = :repo_id + """) + + relative_repo_path = self.db.execute(repo_path_sql, {'repo_id': repo_id}).fetchone()[1] + absolute_repo_path = self.config['repo_directory'] + relative_repo_path + + try: + self.generate_value_data(repo_id, absolute_repo_path) + except Exception as e: + self.logger.error(e) + + self.register_task_completion(entry_info, repo_id, "value") + + def generate_value_data(self, repo_id, path): + """Runs scc on repo and stores data in database + + :param repo_id: Repository ID + :param path: Absolute path of the Repostiory + """ + self.logger.info('Running `scc`....') + self.logger.info(f'Repo ID: {repo_id}, Path: {path}') + + output = subprocess.check_output([self.config['scc_bin'], '-f', 'json', path]) + records = json.loads(output.decode('utf8')) + + for record in records: + for file in record['Files']: + repo_labor = { + 'repo_id': repo_id, + 'rl_analysis_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'), + 'programming_language': file['Language'], + 'file_path': file['Location'], + 'file_name': file['Filename'], + 'total_lines': file['Lines'], + 'code_lines': file['Code'], + 'comment_lines': file['Comment'], + 'blank_lines': file['Blank'], + 'code_complexity': file['Complexity'], + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source, + 'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') + } + + result = self.db.execute(self.repo_labor_table.insert().values(repo_labor)) + self.logger.info(f"Added Repo Labor Data: {result.inserted_primary_key}") diff --git a/workers/value_worker/value_worker/__init__.py b/workers/value_worker/value_worker/__init__.py deleted file mode 100644 --- a/workers/value_worker/value_worker/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""value_worker - Augur Worker that collects value data""" - -__tool_source__ = 'Value Worker' -__tool_version__ = '0.1.0' -__data_source__ = 'SCC' - -__author__ = 'Augur Team <[email protected]>' -__all__ = [] - diff --git a/workers/value_worker/value_worker/runtime.py b/workers/value_worker/value_worker/runtime.py deleted file mode 100644 --- a/workers/value_worker/value_worker/runtime.py +++ /dev/null @@ -1,122 +0,0 @@ -import json -import logging -import os -import subprocess -import sys - -import click -import requests -from flask import Flask, Response, jsonify, request - -from value_worker.worker import ValueWorker - -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - # POST a task to be added to the queue - if request.method == 'POST': - logging.info("Sending to work on task: {}".format(str(request.json))) - app.value_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.value_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51239, help='Port') [email protected]('--scc-bin', default=f'{os.environ["HOME"]}/go/bin/scc', help='scc binary') -def main(augur_url, host, port, scc_bin): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'value_worker', None, - { - "port": 37300, - "scc_bin": "/home/sean/go/bin/scc" - }) - - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.value_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - 'scc_bin': worker_info['scc_bin'], - 'repo_directory': read_config('Workers', 'facade_worker', None, None)['repo_directory'], - } - - # Create the worker that will be running on this server with specified config - app.value_worker = ValueWorker(config) - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - - app.run(debug=app.debug, host=host, port=worker_port) - if app.value_worker._child is not None: - app.value_worker._child.terminate() - try: - requests.post(f'http://{server["host"]}:{server["port"]}/api/unstable/workers/remove', json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/value_worker/value_worker/worker.py b/workers/value_worker/value_worker/worker.py deleted file mode 100644 --- a/workers/value_worker/value_worker/worker.py +++ /dev/null @@ -1,267 +0,0 @@ -import os, subprocess -from datetime import datetime -import logging -import requests -import json -from urllib.parse import quote -from multiprocessing import Process, Queue - -from value_worker import __data_source__, __tool_source__, __tool_version__ -import pandas as pd -import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData -from workers.standard_methods import register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class ValueWorker: - def __init__(self, config, task=None): - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.config = config - - self.db = None - self.value_table = None - - self._task = task - self._queue = Queue() - self._child = None - - self.history_id = None - self.finishing_task = False - self.working_on = None - self.results_counter = 0 - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["value"] - } - ], - "config": [self.config] - } - - self._db_str = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], - self.config['password'], - self.config['host'], - self.config['port'], - self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self._db_str, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self._db_str, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - logging.info("Database connection established...") - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['repo_labor']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.repo_labor_table = Base.classes.repo_labor.__table__ - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - logging.info("ORM setup complete...") - - # Organize different api keys/oauths available - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauth_sql = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(0)) - - for oauth in [{'oauth_id': 0, 'access_token': 0}] + json.loads(pd.read_sql(oauth_sql, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) \ - - datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - - # Send broker hello message - connect_to_broker(self) - logging.info("Connected to the broker...\n") - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "key": "", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['github_api_key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - - self._task = value - self.run() - - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def value_model(self, entry_info, repo_id): - """ Data collection and storage method - """ - logging.info(entry_info) - logging.info(repo_id) - - repo_path_sql = s.sql.text(""" - SELECT repo_id, CONCAT(repo_group_id || chr(47) || repo_path || repo_name) AS path - FROM repo - WHERE repo_id = :repo_id - """) - - relative_repo_path = self.db.execute(repo_path_sql, {'repo_id': repo_id}).fetchone()[1] - absolute_repo_path = self.config['repo_directory'] + relative_repo_path - - try: - self.generate_value_data(repo_id, absolute_repo_path) - except Exception as e: - logging.error(e) - - register_task_completion(self, entry_info, repo_id, "value") - - def generate_value_data(self, repo_id, path): - """Runs scc on repo and stores data in database - - :param repo_id: Repository ID - :param path: Absolute path of the Repostiory - """ - logging.info('Running `scc`....') - logging.info(f'Repo ID: {repo_id}, Path: {path}') - - output = subprocess.check_output([self.config['scc_bin'], '-f', 'json', path]) - records = json.loads(output.decode('utf8')) - - for record in records: - for file in record['Files']: - repo_labor = { - 'repo_id': repo_id, - 'rl_analysis_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'), - 'programming_language': file['Language'], - 'file_path': file['Location'], - 'file_name': file['Filename'], - 'total_lines': file['Lines'], - 'code_lines': file['Code'], - 'comment_lines': file['Comment'], - 'blank_lines': file['Blank'], - 'code_complexity': file['Complexity'], - 'tool_source': __tool_source__, - 'tool_version': __tool_version__, - 'data_source': __data_source__, - 'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') - } - - result = self.db.execute(self.repo_labor_table.insert().values(repo_labor)) - logging.info(f"Added Repo Labor Data: {result.inserted_primary_key}") - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'value': - self.value_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() diff --git a/workers/worker_base.py b/workers/worker_base.py --- a/workers/worker_base.py +++ b/workers/worker_base.py @@ -1,22 +1,29 @@ """ Helper methods constant across all workers """ -import requests, datetime, time, traceback, json, os, sys, math +import requests, datetime, time, traceback, json, os, sys, math, logging +from logging import FileHandler, Formatter, StreamHandler from multiprocessing import Process, Queue import sqlalchemy as s import pandas as pd -import os -import sys, logging +from pathlib import Path from urllib.parse import urlparse -from workers.util import read_config from sqlalchemy import MetaData from sqlalchemy.ext.automap import automap_base +from augur.config import AugurConfig +from augur.logging import AugurLogging class Worker(): - def __init__(self, config={}, given=[], models=[], data_tables=[], operations_tables=[]): + ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + def __init__(self, worker_type, config={}, given=[], models=[], data_tables=[], operations_tables=[]): + + self.worker_type = worker_type self._task = None # task currently being worked on (dict) self._child = None # process of currently running task (multiprocessing process) self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes) + self.data_tables = data_tables + self.operations_tables = operations_tables + self._root_augur_dir = Worker.ROOT_AUGUR_DIR # count of tuples inserted in the database (to store stats for each task in op tables) self.results_counter = 0 @@ -25,23 +32,61 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta self.finishing_task = False # Update config with options that are general and not specific to any worker - self.config = config + self.augur_config = AugurConfig(self._root_augur_dir) + + self.config = { + 'worker_type': self.worker_type, + 'host': self.augur_config.get_value("Server", "host"), + 'gh_api_key': self.augur_config.get_value('Database', 'key'), + 'offline_mode': False + } + self.config.update(self.augur_config.get_section("Logging")) + + try: + worker_defaults = self.augur_config.get_default_config()['Workers'][self.config['worker_type']] + self.config.update(worker_defaults) + except KeyError as e: + logging.warn('Could not get default configuration for {}'.format(self.config['worker_type'])) + + worker_info = self.augur_config.get_value('Workers', self.config['worker_type']) + self.config.update(worker_info) + + worker_port = self.config['port'] + while True: + try: + r = requests.get('http://{}:{}/AUGWOP/heartbeat'.format( + self.config['host'], worker_port)).json() + if 'status' in r: + if r['status'] == 'alive': + worker_port += 1 + except: + break + self.config.update({ - 'port_broker': read_config('Server', 'port', 'AUGUR_PORT', 5000), - 'host_broker': read_config('Server', 'host', 'AUGUR_HOST', '0.0.0.0'), - 'host_database': read_config('Database', 'host', 'AUGUR_DB_HOST', 'host'), - 'port_database': read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - 'user_database': read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - 'name_database': read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - 'password_database': read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password') + "port": worker_port, + "id": "workers.{}.{}".format(self.worker_type, worker_port), + "capture_output": False, + 'location': 'http://{}:{}'.format(self.config["host"], worker_port), + 'port_broker': self.augur_config.get_value('Server', 'port'), + 'host_broker': self.augur_config.get_value('Server', 'host'), + 'host_database': self.augur_config.get_value('Database', 'host'), + 'port_database': self.augur_config.get_value('Database', 'port'), + 'user_database': self.augur_config.get_value('Database', 'user'), + 'name_database': self.augur_config.get_value('Database', 'name'), + 'password_database': self.augur_config.get_value('Database', 'password') }) + self.config.update(config) + + # Initialize logging in the main process + self.initialize_logging() + + # Clear log contents from previous runs + open(self.config["server_logfile"], "w").close() + open(self.config["collection_logfile"], "w").close() - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format( - self.config['id'].split('.')[len(self.config['id'].split('.')) - 1] - ), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) + # Get configured collection logger + self.logger = logging.getLogger(self.config["id"]) + self.logger.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) self.given = given self.models = models @@ -56,28 +101,100 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta ], 'config': self.config } - + + # Send broker hello message + if self.config["offline_mode"] is False: + self.connect_to_broker() + + try: + self.tool_source + self.tool_version + self.data_source + except: + self.tool_source = 'Augur Worker Testing' + self.tool_version = '0.0.0' + self.data_source = 'Augur Worker Testing' + + def __repr__(self): + return f"{self.config['id']}" + + def initialize_logging(self): + self.config["log_level"] = self.config["log_level"].upper() + if self.config["debug"]: + self.config["log_level"] = "DEBUG" + + if self.config["verbose"]: + format_string = AugurLogging.verbose_format_string + else: + format_string = AugurLogging.simple_format_string + + formatter = Formatter(fmt=format_string) + error_formatter = Formatter(fmt=AugurLogging.error_format_string) + + worker_dir = AugurLogging.get_log_directories(self.augur_config, reset_logfiles=False) + "/workers/" + Path(worker_dir).mkdir(exist_ok=True) + logfile_dir = worker_dir + f"/{self.worker_type}/" + Path(logfile_dir).mkdir(exist_ok=True) + + server_logfile = logfile_dir + '{}_{}_server.log'.format(self.worker_type, self.config["port"]) + collection_logfile = logfile_dir + '{}_{}_collection.log'.format(self.worker_type, self.config["port"]) + collection_errorfile = logfile_dir + '{}_{}_collection.err'.format(self.worker_type, self.config["port"]) + self.config.update({ + "logfile_dir": logfile_dir, + "server_logfile": server_logfile, + "collection_logfile": collection_logfile, + "collection_errorfile": collection_errorfile + }) + + collection_file_handler = FileHandler(filename=self.config["collection_logfile"], mode="a") + collection_file_handler.setFormatter(formatter) + collection_file_handler.setLevel(self.config["log_level"]) + + collection_errorfile_handler = FileHandler(filename=self.config["collection_errorfile"], mode="a") + collection_errorfile_handler.setFormatter(error_formatter) + collection_errorfile_handler.setLevel(logging.WARNING) + + logger = logging.getLogger(self.config["id"]) + logger.handlers = [] + logger.addHandler(collection_file_handler) + logger.addHandler(collection_errorfile_handler) + logger.setLevel(self.config["log_level"]) + logger.propagate = False + + if self.config["debug"]: + self.config["log_level"] = "DEBUG" + console_handler = StreamHandler() + console_handler.setFormatter(formatter) + console_handler.setLevel(self.config["log_level"]) + logger.addHandler(console_handler) + + if self.config["quiet"]: + logger.disabled = True + + self.logger = logger + + def initialize_database_connections(self): DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( self.config['user_database'], self.config['password_database'], self.config['host_database'], self.config['port_database'], self.config['name_database'] ) # Create an sqlalchemy engine for both database schemas - logging.info("Making database connections... {}".format(DB_STR)) + self.logger.info("Making database connections") db_schema = 'augur_data' - self.db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, + self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(db_schema)}) helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, + self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(helper_schema)}) metadata = MetaData() helper_metadata = MetaData() # Reflect only the tables we will use for each schema's metadata object - metadata.reflect(self.db, only=data_tables) - helper_metadata.reflect(self.helper_db, only=operations_tables) + metadata.reflect(self.db, only=self.data_tables) + helper_metadata.reflect(self.helper_db, only=self.operations_tables) Base = automap_base(metadata=metadata) HelperBase = automap_base(metadata=helper_metadata) @@ -86,18 +203,18 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta HelperBase.prepare() # So we can access all our tables when inserting, updating, etc - for table in data_tables: + for table in self.data_tables: setattr(self, '{}_table'.format(table), Base.classes[table].__table__) try: - logging.info(HelperBase.classes.keys()) + self.logger.info(HelperBase.classes.keys()) except: pass - for table in operations_tables: + for table in self.operations_tables: try: setattr(self, '{}_table'.format(table), HelperBase.classes[table].__table__) except Exception as e: - logging.info("Error setting attribute for table: {} : {}".format(table, e)) + self.logger.error("Error setting attribute for table: {} : {}".format(table, e)) # Increment so we are ready to insert the 'next one' of each of these most recent ids self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 @@ -105,9 +222,8 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta # Organize different api keys/oauths available if 'gh_api_key' in self.config: self.init_oauths() - - # Send broker hello message - self.connect_to_broker() + else: + self.oauths = [{'oauth_id': 0}] @property def task(self): @@ -128,7 +244,7 @@ def task(self, value): # This setting is set by the housekeeper and is attached to the task before it gets sent here if 'focused_task' in value: if value['focused_task'] == 1: - logging.info("Focused task is ON\n") + self.logger.debug("Focused task is ON\n") self.finishing_task = True self._task = value @@ -143,21 +259,23 @@ def run(self): """ Kicks off the processing of the queue if it is not already being processed Gets run whenever a new task is added """ - logging.info("Running...\n") # Spawn a subprocess to handle message reading and performing the tasks self._child = Process(target=self.collect, args=()) self._child.start() - + def collect(self): """ Function to process each entry in the worker's task queue Determines what action to take based off the message type """ + self.initialize_logging() # need to initialize logging again in child process cause multiprocessing + self.logger.info("Starting data collection process\n") + self.initialize_database_connections() while True: if not self._queue.empty(): message = self._queue.get() # Get the task off our MP queue else: break - logging.info("Popped off message: {}\n".format(str(message))) + self.logger.info("Popped off message: {}\n".format(str(message))) if message['job_type'] == 'STOP': break @@ -178,7 +296,7 @@ def collect(self): model_method = getattr(self, '{}_model'.format(message['models'][0])) self.record_model_process(repo_id, 'repo_info') except Exception as e: - logging.info('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) + + self.logger.error('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) + 'must have name of {}_model'.format(message['models'][0])) self.register_task_failure(message, repo_id, e) break @@ -187,17 +305,51 @@ def collect(self): # and worker can move onto the next task without stopping try: model_method(message, repo_id) - except Exception as e: + except Exception as e: # this could be a custom exception, might make things easier self.register_task_failure(message, repo_id, e) - pass + break + + self.logger.debug('Closing database connections\n') + self.db.dispose() + self.helper_db.dispose() + self.logger.info("Collection process finished") def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}): - """ map objects => { *our db col* : *gh json key*} """ + """ Include an extra key-value pair on each element of new_data that represents + the action that should be taken with this element (i.e. 'need_insertion') + + :param new_data: List of dictionaries, data to be assigned an action to + :param table_values: Pandas DataFrame, existing data in the database to check + what action should be taken on the new_data depending on the presence of + each element in this DataFrame + :param update_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + updates (if source data value != value in existing database row, then an + update is needed). Key is source data column name, value is database field name. + Example: {'id': 'gh_issue_id'} + :param duplicate_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + duplicates (if source data value == value in existing database row, then this + element is a duplicate and would not need an insertion). Key is source data + column name, value is database field name. Example: {'id': 'gh_issue_id'} + :param table_pkey: String, the field name of the primary key of the table in + the database that we are checking the table_values for. + :param value_update_col_map: Dictionary, sometimes we add a new field to a table, + and we want to trigger an update of that row in the database even if all of the + data values are the same and would not need an update ordinarily. Checking for + a specific existing value in the database field allows us to do this. The key is the + name of the field in the database we are checking for a specific value to trigger + an update, the value is the value we are checking for equality to trigger an update. + Example: {'cntrb_id': None} + :return: List of dictionaries, contains all the same elements of new_data, except + each element now has an extra key-value pair with the key being 'flag', and + the value being 'need_insertion', 'need_update', or 'none' + """ need_insertion_count = 0 need_update_count = 0 for i, obj in enumerate(new_data): if type(obj) != dict: - logging.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) + self.logger.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) continue obj['flag'] = 'none' # default of no action needed @@ -206,31 +358,37 @@ def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_ if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any(): continue - logging.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) + self.logger.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) obj['flag'] = 'need_insertion' need_insertion_count += 1 break if obj['flag'] == 'need_insertion': - logging.info('Already determined that current tuple needs insertion, skipping checking updates. ' + self.logger.info('Already determined that current tuple needs insertion, skipping checking updates. ' 'Moving to next tuple.\n') continue - existing_tuple = table_values[table_values[db_dupe_key].isin( + try: + existing_tuple = table_values[table_values[db_dupe_key].isin( [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0] + except Exception as e: + self.logger.info('Special case assign_tuple_action error') + self.logger.info(f'Error: {e}') + self.logger.info(f'Related vars: {table_values}, ' + + f'{table_values[db_dupe_key].isin([obj[duplicate_col_map[db_dupe_key]]])}') # If we need to check the values of the existing tuple to determine if an update is needed for augur_col, value_check in value_update_col_map.items(): not_nan_check = not (math.isnan(value_check) and math.isnan(existing_tuple[augur_col])) if value_check is not None else True if existing_tuple[augur_col] != value_check and not_nan_check: continue - logging.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) + self.logger.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) obj['flag'] = 'need_update' obj['pkey'] = existing_tuple[table_pkey] need_update_count += 1 if obj['flag'] == 'need_update': - logging.info('Already determined that current tuple needs update, skipping checking further updates. ' + self.logger.info('Already determined that current tuple needs update, skipping checking further updates. ' 'Moving to next tuple.\n') continue @@ -240,25 +398,34 @@ def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_ continue if obj[update_col_map[col]] == existing_tuple[col]: continue - logging.info("Found a tuple that needs an update for column: {}\n".format(col)) + self.logger.info("Found a tuple that needs an update for column: {}\n".format(col)) obj['flag'] = 'need_update' obj['pkey'] = existing_tuple[table_pkey] need_update_count += 1 - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + + self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + "was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count)) return new_data - def check_duplicates(new_data, table_values, key): + def check_duplicates(self, new_data, table_values, key): + """ Filters what items of the new_data json (list of dictionaries) that are not + present in the table_values df + + :param new_data: List of dictionaries, new data to filter duplicates out of + :param table_values: Pandas DataFrame, existing data to check what data is already + present in the database + :param key: String, key of each dict in new_data whose value we are checking + duplicates with + :return: List of dictionaries, contains elements of new_data that are not already + present in the database + """ need_insertion = [] for obj in new_data: - if type(obj) == dict: - if not table_values.isin([obj[key]]).any().any(): - need_insertion.append(obj) - # else: - # logging.info("Tuple with github's {} key value already".format(key) + - # "exists in our db: {}\n".format(str(obj[key]))) - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + + if type(obj) != dict: + continue + if not table_values.isin([obj[key]]).any().any(): + need_insertion.append(obj) + self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + "was reduced to {} tuples.\n".format(str(len(need_insertion)))) return need_insertion @@ -266,43 +433,39 @@ def connect_to_broker(self): connected = False for i in range(5): try: - logging.info("attempt {}\n".format(i)) + self.logger.debug("Connecting to broker, attempt {}\n".format(i)) if i > 0: time.sleep(10) requests.post('http://{}:{}/api/unstable/workers'.format( self.config['host_broker'],self.config['port_broker']), json=self.specs) - logging.info("Connection to the broker was successful\n") + self.logger.info("Connection to the broker was successful\n") connected = True break except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker. Trying again...\n') + self.logger.error('Cannot connect to the broker. Trying again...\n') if not connected: sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n') - def dump_queue(queue): - """ - Empties all pending items in a queue and returns them in a list. - """ - result = [] - queue.put("STOP") - for i in iter(queue.get, 'STOP'): - result.append(i) - # time.sleep(.1) - return result - def find_id_from_login(self, login): + """ Retrieves our contributor table primary key value for the contributor with + the given GitHub login credentials, if this contributor is not there, then + they get inserted. + + :param login: String, the GitHub login username to find the primary key id for + :return: Integer, the id of the row in our database with the matching GitHub login + """ idSQL = s.sql.text(""" SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' - """.format(login)) + """.format(login)) rs = pd.read_sql(idSQL, self.db, params={}) data_list = [list(row) for row in rs.itertuples(index=False)] try: return data_list[0][0] except: - logging.info("contributor needs to be added...") + self.logger.info('contributor needs to be added...') - cntrb_url = ("https://api.github.com/users/" + login) - logging.info("Hitting endpoint: {} ...\n".format(cntrb_url)) + cntrb_url = ('https://api.github.com/users/' + login) + self.logger.info('Hitting endpoint: {} ...\n'.format(cntrb_url)) r = requests.get(url=cntrb_url, headers=self.headers) self.update_gh_rate_limit(r) contributor = r.json() @@ -318,45 +481,50 @@ def find_id_from_login(self, login): email = contributor['email'] cntrb = { - "cntrb_login": contributor['login'] if 'login' in contributor else None, - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, - "cntrb_canonical": None, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source + 'cntrb_login': contributor['login'] if 'login' in contributor else None, + 'cntrb_email': email, + 'cntrb_company': company, + 'cntrb_location': location, + 'cntrb_created_at': contributor['created_at'] if 'created_at' in contributor else None, + 'cntrb_canonical': None, + 'gh_user_id': contributor['id'], + 'gh_login': contributor['login'], + 'gh_url': contributor['url'], + 'gh_html_url': contributor['html_url'], + 'gh_node_id': contributor['node_id'], + 'gh_avatar_url': contributor['avatar_url'], + 'gh_gravatar_id': contributor['gravatar_id'], + 'gh_followers_url': contributor['followers_url'], + 'gh_following_url': contributor['following_url'], + 'gh_gists_url': contributor['gists_url'], + 'gh_starred_url': contributor['starred_url'], + 'gh_subscriptions_url': contributor['subscriptions_url'], + 'gh_organizations_url': contributor['organizations_url'], + 'gh_repos_url': contributor['repos_url'], + 'gh_events_url': contributor['events_url'], + 'gh_received_events_url': contributor['received_events_url'], + 'gh_type': contributor['type'], + 'gh_site_admin': contributor['site_admin'], + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source } result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted contributor: " + contributor['login'] + "\n") + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") return self.find_id_from_login(login) - def get_owner_repo(self, github_url): - split = github_url.split('/') + def get_owner_repo(self, git_url): + """ Gets the owner and repository names of a repository from a git url + + :param git_url: String, the git url of a repository + :return: Tuple, includes the owner and repository names in that order + """ + split = git_url.split('/') owner = split[-2] repo = split[-1] @@ -367,6 +535,19 @@ def get_owner_repo(self, github_url): return owner, repo def get_max_id(self, table, column, default=25150, operations_table=False): + """ Gets the max value (usually used for id/pk's) of any Integer column + of any table + + :param table: String, the table that consists of the column you want to + query a max value for + :param column: String, the column that you want to query the max value for + :param default: Integer, if there are no values in the + specified column, the value of this parameter will be returned + :param operations_table: Boolean, if True, this signifies that the table/column + that is wanted to be queried is in the augur_operations schema rather than + the augur_data schema. Default False + :return: Integer, the max value of the specified column/table + """ maxIdSQL = s.sql.text(""" SELECT max({0}.{1}) AS {1} FROM {0} @@ -375,14 +556,24 @@ def get_max_id(self, table, column, default=25150, operations_table=False): rs = pd.read_sql(maxIdSQL, db, params={}) if rs.iloc[0][column] is not None: max_id = int(rs.iloc[0][column]) + 1 - logging.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) + self.logger.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) else: max_id = default - logging.info("Could not find max id for {} column in the {} table... using default set to: \ - {}\n".format(column, table, max_id)) + self.logger.warning('Could not find max id for {} column in the {} table... ' + + 'using default set to: {}\n'.format(column, table, max_id)) return max_id def get_table_values(self, cols, tables, where_clause=""): + """ Can query all values of any column(s) from any table(s) + with an optional where clause + + :param cols: List of Strings, column(s) that user wants to query + :param tables: List of Strings, table(s) that user wants to query + :param where_clause: String, optional where clause to filter the values + queried + :return: Pandas DataFrame, contains all values queried in the columns, tables, and + optional where clause provided + """ table_str = tables[0] del tables[0] @@ -394,27 +585,33 @@ def get_table_values(self, cols, tables, where_clause=""): for col in cols: col_str += ", " + col - tableValuesSQL = s.sql.text(""" + table_values_sql = s.sql.text(""" SELECT {} FROM {} {} """.format(col_str, table_str, where_clause)) - logging.info("Getting table values with the following PSQL query: \n{}\n".format(tableValuesSQL)) - values = pd.read_sql(tableValuesSQL, self.db, params={}) + self.logger.info('Getting table values with the following PSQL query: \n{}\n'.format( + table_values_sql)) + values = pd.read_sql(table_values_sql, self.db, params={}) return values def init_oauths(self): + """ Initialization required to have all GitHub tokens within access to GitHub workers + """ self.oauths = [] self.headers = None # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" + url = 'https://api.github.com/users/gabe-heim' # Make a list of api key in the config combined w keys stored in the database oauthSQL = s.sql.text(""" SELECT * FROM worker_oauth WHERE access_token <> '{}' """.format(self.config['gh_api_key'])) - for oauth in [{'oauth_id': 0, 'access_token': self.config['gh_api_key']}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): + + oauths = [{'oauth_id': 0, 'access_token': self.config['gh_api_key']}] + \ + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")) + for oauth in oauths: self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}\n".format(oauth)) + self.logger.debug("Getting rate limit info for oauth: {}\n".format(oauth)) response = requests.get(url=url, headers=self.headers) self.oauths.append({ 'oauth_id': oauth['oauth_id'], @@ -422,18 +619,52 @@ def init_oauths(self): 'rate_limit': int(response.headers['X-RateLimit-Remaining']), 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() }) - logging.info("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) + self.logger.debug("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") + self.logger.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") # First key to be used will be the one specified in the config (first element in # self.oauths array will always be the key in use) self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} + self.logger.info("OAuth initialized") def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}): - # Paginate backwards through all the tuples but get first page in order - # to determine if there are multiple pages and if the 1st page covers all + """ Paginate either backwards or forwards (depending on the value of the worker's + finishing_task attribute) through all the GitHub or GitLab api endpoint pages. + + :param url: String, the url of the API endpoint we are paginating through, expects + a curly brace string formatter within the string to format the Integer + representing the page number that is wanted to be returned + :param duplicate_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + duplicates (if source data value == value in existing database row, then this + element is a duplicate and would not need an insertion). Key is source data + column name, value is database field name. Example: {'id': 'gh_issue_id'} + :param update_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + updates (if source data value != value in existing database row, then an + update is needed). Key is source data column name, value is database field name. + Example: {'id': 'gh_issue_id'} + :param table: String, the name of the table that holds the values to check for + duplicates/updates against + :param table_pkey: String, the field name of the primary key of the table in + the database that we are getting the values for to cross-reference to check + for duplicates. + :param where_clause: String, optional where clause to filter the values + that are queried when preparing the values that will be cross-referenced + for duplicates/updates + :param value_update_col_map: Dictionary, sometimes we add a new field to a table, + and we want to trigger an update of that row in the database even if all of the + data values are the same and would not need an update ordinarily. Checking for + a specific existing value in the database field allows us to do this. The key is the + name of the field in the database we are checking for a specific value to trigger + an update, the value is the value we are checking for equality to trigger an update. + Example: {'cntrb_id': None} + :return: List of dictionaries, all data points from the pages of the specified API endpoint + each with a 'flag' key-value pair representing the required action to take with that + data point (i.e. 'need_insertion', 'need_update', 'none') + """ update_keys = list(update_col_map.keys()) if update_col_map else [] update_keys += list(value_update_col_map.keys()) if value_update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] @@ -446,10 +677,10 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh num_attempts = 0 success = False while num_attempts < 3: - logging.info("Hitting endpoint: " + url.format(i) + " ...\n") + self.logger.info(f'Hitting endpoint: {url.format(i)}...\n') r = requests.get(url=url.format(i), headers=self.headers) self.update_gh_rate_limit(r) - logging.info("Analyzing page {} of {}\n".format(i, int(r.links['last']['url'][-6:].split('=')[1]) + 1 if 'last' in r.links else '*last page not known*')) + self.logger.info("Analyzing page {} of {}\n".format(i, int(r.links['last']['url'][-6:].split('=')[1]) + 1 if 'last' in r.links else '*last page not known*')) try: j = r.json() @@ -460,9 +691,9 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh success = True break elif type(j) == dict: - logging.info("Request returned a dict: {}\n".format(j)) + self.logger.info("Request returned a dict: {}\n".format(j)) if j['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + self.logger.warning("Github repo was not found or does not exist for endpoint: {}\n".format(url)) break if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': num_attempts -= 1 @@ -470,11 +701,11 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh if j['message'] == 'Bad credentials': self.update_gh_rate_limit(r, bad_credentials=True) elif type(j) == str: - logging.info("J was string: {}\n".format(j)) + self.logger.info(f'J was string: {j}\n') if '<!DOCTYPE html>' in j: - logging.info("HTML was returned, trying again...\n") + self.logger.info('HTML was returned, trying again...\n') elif len(j) == 0: - logging.info("Empty string, trying again...\n") + self.logger.warning('Empty string, trying again...\n') else: try: j = json.loads(j) @@ -490,34 +721,34 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh if 'last' in r.links and not multiple_pages and not self.finishing_task: param = r.links['last']['url'][-6:] i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") + self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n") multiple_pages = True elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") + self.logger.info("Only 1 page of request\n") elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ..." + self.logger.info("Finishing a previous task, paginating forwards ..." " excess rate limit requests will be made\n") if len(j) == 0: - logging.info("Response was empty, breaking from pagination.\n") + self.logger.info("Response was empty, breaking from pagination.\n") break # Checking contents of requests with what we already have in the db j = self.assign_tuple_action(j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map) if not j: - logging.info("Assigning tuple action failed, moving to next page.\n") + self.logger.error("Assigning tuple action failed, moving to next page.\n") i = i + 1 if self.finishing_task else i - 1 continue try: to_add = [obj for obj in j if obj not in tuples and obj['flag'] != 'none'] except Exception as e: - logging.info("Failure accessing data of page: {}. Moving to next page.\n".format(e)) + self.logger.error("Failure accessing data of page: {}. Moving to next page.\n".format(e)) i = i + 1 if self.finishing_task else i - 1 continue if len(to_add) == 0 and multiple_pages and 'last' in r.links: - logging.info("{}".format(r.links['last'])) + self.logger.info("{}".format(r.links['last'])) if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown tuples, breaking from pagination.\n") + self.logger.info("No more pages with unknown tuples, breaking from pagination.\n") break tuples += to_add @@ -525,7 +756,7 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh # Since we already wouldve checked the first page... break if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") + self.logger.info("No more pages to check, breaking from pagination.\n") break return tuples @@ -535,24 +766,16 @@ def query_github_contributors(self, entry_info, repo_id): """ Data collection function Query the GitHub API for contributors """ - logging.info("Querying contributors with given entry info: " + str(entry_info) + "\n") + self.logger.info(f'Querying contributors with given entry info: {entry_info}\n') github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url'] # Extract owner/repo from the url for the endpoint - path = urlparse(github_url) - split = path[2].split('/') - - owner = split[1] - name = split[2] - - # Handles git url case by removing the extension - if ".git" in name: - name = name[:-4] + owner, name = self.get_owner_repo(github_url) # Set the base of the url and place to hold contributors to insert - contributors_url = ("https://api.github.com/repos/" + owner + "/" + - name + "/contributors?per_page=100&page={}") + contributors_url = (f'https://api.github.com/repos/{owner}/{name}/' + + 'contributors?per_page=100&page={}') # Get contributors that we already have stored # Set our duplicate and update column map keys (something other than PK) to @@ -565,7 +788,7 @@ def query_github_contributors(self, entry_info, repo_id): #list to hold contributors needing insertion or update contributors = self.paginate(contributors_url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") + self.logger.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") for repo_contributor in contributors: try: @@ -573,7 +796,7 @@ def query_github_contributors(self, entry_info, repo_id): # `created at` # i think that's it cntrb_url = ("https://api.github.com/users/" + repo_contributor['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) self.update_gh_rate_limit(r) contributor = r.json() @@ -624,70 +847,23 @@ def query_github_contributors(self, entry_info, repo_id): if repo_contributor['flag'] == 'need_update': result = self.db.execute(self.contributors_table.update().where( self.worker_history_table.c.cntrb_email==email).values(cntrb)) - logging.info("Updated tuple in the contributors table with existing email: {}".format(email)) + self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email)) self.cntrb_id_inc = repo_contributor['pkey'] elif repo_contributor['flag'] == 'need_insertion': result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: " + contributor['login'] + "\n") + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") # Increment our global track of the cntrb id for the possibility of it being used as a FK self.cntrb_id_inc = int(result.inserted_primary_key[0]) except Exception as e: - logging.info("Caught exception: {}".format(e)) - logging.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) + self.logger.error("Caught exception: {}".format(e)) + self.logger.error("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) continue - def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): - """ - Read a variable in specified section of the config file, unless provided an environment variable - - :param section: location of given variable - :param name: name of variable - """ - config_file_path = os.getenv("AUGUR_CONFIG_FILE", config_file_path) - _config_file_name = 'augur.config.json' - _config_bad = False - _already_exported = {} - _runtime_location = 'runtime/' - _default_config = {} - _config_file = None - - try: - _config_file = open(config_file_path, 'r+') - except: - print('Couldn\'t open {}'.format(_config_file_name)) - - # Load the config file - try: - config_text = _config_file.read() - _config = json.loads(config_text) - except json.decoder.JSONDecodeError as e: - if not _config_bad: - _using_config_file = False - print('{} could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: {}'.format(config_file_path, str(e))) - _config = _default_config - - value = None - if environment_variable is not None: - value = os.getenv(environment_variable) - if value is None: - try: - if name is not None: - value = _config[section][name] - else: - value = _config[section] - except Exception as e: - value = default - if not section in _config: - _config[section] = {} - - return value - - def record_model_process(self, repo_id, model): task_history = { @@ -705,7 +881,7 @@ def record_model_process(self, repo_id, model): self.history_id += 1 else: result = self.helper_db.execute(self.worker_history_table.insert().values(task_history)) - logging.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) + self.logger.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) self.history_id = int(result.inserted_primary_key[0]) def register_task_completion(self, task, repo_id, model): @@ -735,7 +911,7 @@ def register_task_completion(self, task, repo_id, model): self.helper_db.execute(self.worker_history_table.update().where( self.worker_history_table.c.history_id==self.history_id).values(task_history)) - logging.info("Recorded job completion for: " + str(task_completed) + "\n") + self.logger.info("Recorded job completion for: " + str(task_completed) + "\n") # Update job process table updated_job = { @@ -746,27 +922,29 @@ def register_task_completion(self, task, repo_id, model): } self.helper_db.execute(self.worker_job_table.update().where( self.worker_job_table.c.job_model==model).values(updated_job)) - logging.info("Updated job process for model: " + model + "\n") + self.logger.info("Updated job process for model: " + model + "\n") - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") + if self.config["offline_mode"] is False: + + # Notify broker of completion + self.logger.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + + "This task inserted: " + str(self.results_counter) + " tuples.\n") - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['host_broker'],self.config['port_broker']), json=task_completed) + requests.post('http://{}:{}/api/unstable/completed_task'.format( + self.config['host_broker'],self.config['port_broker']), json=task_completed) # Reset results counter for next task self.results_counter = 0 def register_task_failure(self, task, repo_id, e): - logging.info("Worker ran into an error for task: {}\n".format(task)) - logging.info("Printing traceback...\n") + self.logger.error("Worker ran into an error for task: {}\n".format(task)) + self.logger.error("Printing traceback...\n") tb = traceback.format_exc() - logging.info(tb) + self.logger.error(tb) - logging.info(f'This task inserted {self.results_counter} tuples before failure.\n') - logging.info("Notifying broker and logging task failure in database...\n") + self.logger.error(f'This task inserted {self.results_counter} tuples before failure.\n') + self.logger.error("Notifying broker and logging task failure in database...\n") key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" url = task['given'][key] @@ -781,9 +959,11 @@ def register_task_failure(self, task, repo_id, e): requests.post("http://{}:{}/api/unstable/task_error".format( self.config['host_broker'],self.config['port_broker']), json=task) except requests.exceptions.ConnectionError: - logging.error('Could not send task failure message to the broker\n') + self.logger.error('Could not send task failure message to the broker\n') + self.logger.error(e) except Exception: - logging.exception('An error occured while informing broker about task failure\n') + self.logger.error('An error occured while informing broker about task failure\n') + self.logger.error(e) # Add to history table task_history = { @@ -797,7 +977,7 @@ def register_task_failure(self, task, repo_id, e): } self.helper_db.execute(self.worker_history_table.update().where(self.worker_history_table.c.history_id==self.history_id).values(task_history)) - logging.info("Recorded job error in the history table for: " + str(task) + "\n") + self.logger.error("Recorded job error in the history table for: " + str(task) + "\n") # Update job process table updated_job = { @@ -807,7 +987,7 @@ def register_task_failure(self, task, repo_id, e): "analysis_state": 0 } self.helper_db.execute(self.worker_job_table.update().where(self.worker_job_table.c.job_model==task['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + task['models'][0] + "\n") + self.logger.info("Updated job process for model: " + task['models'][0] + "\n") # Reset results counter for next task self.results_counter = 0 @@ -836,29 +1016,29 @@ def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disa # Try to get rate limit from request headers, sometimes it does not work (GH's issue) # In that case we just decrement from last recieved header count if bad_credentials and len(self.oauths) > 1: - logging.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) + self.logger.warning("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) del self.oauths[0] if temporarily_disable: - logging.info("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") + self.logger.debug("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") self.oauths[0]['rate_limit'] = 0 else: try: self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - logging.info("Recieved rate limit from headers\n") + self.logger.info("Recieved rate limit from headers\n") except: self.oauths[0]['rate_limit'] -= 1 - logging.info("Headers did not work, had to decrement\n") - logging.info("Updated rate limit, you have: " + + self.logger.info("Headers did not work, had to decrement\n") + self.logger.info("Updated rate limit, you have: " + str(self.oauths[0]['rate_limit']) + " requests remaining.\n") if self.oauths[0]['rate_limit'] <= 0: try: reset_time = response.headers['X-RateLimit-Reset'] except Exception as e: - logging.info("Could not get reset time from headers because of error: {}".format(error)) + self.logger.error("Could not get reset time from headers because of error: {}".format(e)) reset_time = 3600 time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() - logging.info("Rate limit exceeded, checking for other available keys to use.\n") + self.logger.info("Rate limit exceeded, checking for other available keys to use.\n") # We will be finding oauth with the highest rate limit left out of our list of oauths new_oauth = self.oauths[0] @@ -867,7 +1047,7 @@ def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disa other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] for oauth in other_oauths: - logging.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) + self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) self.headers = {'Authorization': 'token %s' % oauth['access_token']} response = requests.get(url=url, headers=self.headers) oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) @@ -875,20 +1055,20 @@ def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disa # Update oauth to switch to if a higher limit is found if oauth['rate_limit'] > new_oauth['rate_limit']: - logging.info("Higher rate limit found in oauth: {}\n".format(oauth)) + self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth)) new_oauth = oauth elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: - logging.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) + self.logger.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) new_oauth = oauth if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: - logging.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) + self.logger.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) time.sleep(new_oauth['seconds_to_reset']) # Make new oauth the 0th element in self.oauths so we know which one is in use index = self.oauths.index(new_oauth) self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] - logging.info("Using oauth: {}\n".format(self.oauths[0])) + self.logger.info("Using oauth: {}\n".format(self.oauths[0])) # Change headers to be using the new oauth's key self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']}
diff --git a/test/api/test_experimental_routes.py b/test/api/test_experimental_routes.py deleted file mode 100644 --- a/test/api/test_experimental_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_insight_routes.py b/test/api/test_insight_routes.py deleted file mode 100644 --- a/test/api/test_insight_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_message_routes.py b/test/api/test_message_routes.py deleted file mode 100644 --- a/test/api/test_message_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_platform_routes.py b/test/api/test_platform_routes.py deleted file mode 100644 --- a/test/api/test_platform_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_util_routes.py b/test/api/test_util_routes.py deleted file mode 100644 --- a/test/api/test_util_routes.py +++ /dev/null @@ -1,31 +0,0 @@ -import requests -import pytest - [email protected](scope="session") -def metrics(): - pass - -def test_common(endpoint="http://localhost:5000/api/unstable/repos"): - response = requests.get(endpoint) - data = response.json() - assert response.status_code == 200 - assert len(data) >= 1 - -def test_get_all_repos(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repos') - -def test_repo_groups(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups') - -def test_repos_in_repo_groups(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups') - -def test_get_repo_for_dosocs(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/dosocs/repos') - -def test_aggregate_summary_by_repo(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups/10/repos/25430/aggregate-summary') - -def test_aggregate_summary_by_group(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups/10/aggregate-summary') - diff --git a/test/metrics/test_experimental_metrics.py b/test/metrics/test_experimental_metrics.py deleted file mode 100644 --- a/test/metrics/test_experimental_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_insight_metrics.py b/test/metrics/test_insight_metrics.py deleted file mode 100644 --- a/test/metrics/test_insight_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_message_metrics.py b/test/metrics/test_message_metrics.py deleted file mode 100644 --- a/test/metrics/test_message_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_platform_metrics.py b/test/metrics/test_platform_metrics.py deleted file mode 100644 --- a/test/metrics/test_platform_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_util_metrics.py b/test/metrics/test_util_metrics.py deleted file mode 100644 --- a/test/metrics/test_util_metrics.py +++ /dev/null @@ -1,14 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - -# def test_get_repos_for_dosocs(metrics): -# assert metrics.get_repos_for_dosocs().isin( -# ['/home/sean/git-repos/25430/github.com/rails/rails-dom-testing']).any().any() - diff --git a/augur/housekeeper/__init__.py b/tests/__init__.py similarity index 100% rename from augur/housekeeper/__init__.py rename to tests/__init__.py diff --git a/tests/test_application.py b/tests/test_application.py new file mode 100644 --- /dev/null +++ b/tests/test_application.py @@ -0,0 +1,20 @@ +import pytest +import augur.application +import sqlalchemy as s +import json + +from augur.application import Application + +def test_init_augur_regular(): + augur_app = Application(disable_logs=True) + assert augur_app is not None + +def test_connect_to_database(monkeypatch): + def mock_fail_connection(self): + raise(s.exc.OperationalError("fake", "error", "message")) + + monkeypatch.setattr(s.engine.Engine, "connect", mock_fail_connection) + monkeypatch.setenv("AUGUR_LOG_QUIET", "1") + + with pytest.raises(s.exc.OperationalError): + augur_app = Application(disable_logs=True) diff --git a/test/metrics/test_commit_metrics.py b/tests/test_metrics/test_commit_metrics.py similarity index 90% rename from test/metrics/test_commit_metrics.py rename to tests/test_metrics/test_commit_metrics.py --- a/test/metrics/test_commit_metrics.py +++ b/tests/test_metrics/test_commit_metrics.py @@ -2,12 +2,6 @@ import pytest [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_annual_commit_count_ranked_by_repo_in_repo_group(metrics): assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10).iloc[0].net > 0 assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10, 25430).iloc[0].net > 0 diff --git a/test/metrics/test_contributor_metrics.py b/tests/test_metrics/test_contributor_metrics.py similarity index 91% rename from test/metrics/test_contributor_metrics.py rename to tests/test_metrics/test_contributor_metrics.py --- a/test/metrics/test_contributor_metrics.py +++ b/tests/test_metrics/test_contributor_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_contributors(metrics): # repo group assert metrics.contributors(20).iloc[0]['total'] > 0 diff --git a/test/metrics/test_issue_metrics.py b/tests/test_metrics/test_issue_metrics.py similarity index 97% rename from test/metrics/test_issue_metrics.py rename to tests/test_metrics/test_issue_metrics.py --- a/test/metrics/test_issue_metrics.py +++ b/tests/test_metrics/test_issue_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_issues_new(metrics): #repo_id assert metrics.issues_new(10, 25430, period='year').iloc[0]['issues'] > 0 diff --git a/test/metrics/test_pull_request_metrics.py b/tests/test_metrics/test_pull_request_metrics.py similarity index 91% rename from test/metrics/test_pull_request_metrics.py rename to tests/test_metrics/test_pull_request_metrics.py --- a/test/metrics/test_pull_request_metrics.py +++ b/tests/test_metrics/test_pull_request_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_pull_requests_merge_contributor_new(metrics): # repo id assert metrics.pull_requests_merge_contributor_new(10, repo_id=25430, period='year').isin( diff --git a/test/metrics/test_repo_meta_metrics.py b/tests/test_metrics/test_repo_meta_metrics.py similarity index 96% rename from test/metrics/test_repo_meta_metrics.py rename to tests/test_metrics/test_repo_meta_metrics.py --- a/test/metrics/test_repo_meta_metrics.py +++ b/tests/test_metrics/test_repo_meta_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_code_changes(metrics): #repo_id assert metrics.code_changes(10, 25430, period='year').isin([pd.Timestamp('2019-01-01T00:00:00+00:00'), 2]).any().any() diff --git a/test/api/runner.py b/tests/test_routes/runner.py similarity index 84% rename from test/api/runner.py rename to tests/test_routes/runner.py --- a/test/api/runner.py +++ b/tests/test_routes/runner.py @@ -9,9 +9,10 @@ start = subprocess.Popen(["augur", "run", "--disable-housekeeper", "--skip-cleanup"], stdout=FNULL, stderr=subprocess.STDOUT) print("Waiting for the server to start...") time.sleep(5) -process = subprocess.run(["pytest", "-ra", "--tb=short", "-x", "test/metrics"]) + +process = subprocess.run(["pytest", "tests/test_routes/"]) time.sleep(2) + subprocess.Popen(["augur", "util", "kill"], stdout=FNULL, stderr=subprocess.STDOUT) print("Server successfully shutdown.") - sys.exit(process.returncode) diff --git a/test/api/test_commit_routes.py b/tests/test_routes/test_commit_routes.py similarity index 97% rename from test/api/test_commit_routes.py rename to tests/test_routes/test_commit_routes.py --- a/test/api/test_commit_routes.py +++ b/tests/test_routes/test_commit_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_annual_commit_count_ranked_by_new_repo_in_repo_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/annual-commit-count-ranked-by-new-repo-in-repo-group/') data = response.json() diff --git a/test/api/test_contributor_routes.py b/tests/test_routes/test_contributor_routes.py similarity index 95% rename from test/api/test_contributor_routes.py rename to tests/test_routes/test_contributor_routes.py --- a/test/api/test_contributor_routes.py +++ b/tests/test_routes/test_contributor_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_contributors_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/contributors') data = response.json() diff --git a/test/api/test_issue_routes.py b/tests/test_routes/test_issue_routes.py similarity index 99% rename from test/api/test_issue_routes.py rename to tests/test_routes/test_issue_routes.py --- a/test/api/test_issue_routes.py +++ b/tests/test_routes/test_issue_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_issues_new_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/issues-new') data = response.json() diff --git a/test/api/test_pull_request_routes.py b/tests/test_routes/test_pull_request_routes.py similarity index 94% rename from test/api/test_pull_request_routes.py rename to tests/test_routes/test_pull_request_routes.py --- a/test/api/test_pull_request_routes.py +++ b/tests/test_routes/test_pull_request_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_pull_requests_merge_contributor_new_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/pull-requests-merge-contributor-new') data = response.json() diff --git a/test/api/test_repo_meta_routes.py b/tests/test_routes/test_repo_meta_routes.py similarity index 98% rename from test/api/test_repo_meta_routes.py rename to tests/test_routes/test_repo_meta_routes.py --- a/test/api/test_repo_meta_routes.py +++ b/tests/test_routes/test_repo_meta_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_code_changes_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/code-changes') data = response.json() @@ -51,7 +47,6 @@ def test_sub_projects_by_repo(metrics): def test_cii_best_practices_badge_by_repo(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/repos/25430/cii-best-practices-badge') - print(response) data = response.json() assert response.status_code == 200 assert len(data) >= 1 diff --git a/tests/test_routes/test_util_routes.py b/tests/test_routes/test_util_routes.py new file mode 100644 --- /dev/null +++ b/tests/test_routes/test_util_routes.py @@ -0,0 +1,20 @@ +import requests +import pytest + +from conftest import create_full_routes + +util_routes = [\ +"repos",\ +"repo-groups",\ +"repo-groups",\ +"dosocs/repos",\ +"repo-groups/<default_repo_group_id>/aggregate-summary",\ +"repo-groups/<default_repo_group_id>/repos/<default_repo_id>/aggregate-summary",\ +] + [email protected]("endpoint", create_full_routes(util_routes)) +def test_base_test(client, endpoint): + response = client.get(endpoint) + data = response.get_json() + assert response.status_code == 200 + assert len(data) >= 1 diff --git a/tests/test_workers/test_repo_info_worker.py b/tests/test_workers/test_repo_info_worker.py new file mode 100644 --- /dev/null +++ b/tests/test_workers/test_repo_info_worker.py @@ -0,0 +1,29 @@ +import pytest +from time import sleep + +from workers.repo_info_worker.repo_info_worker import RepoInfoWorker + [email protected] +def test_task(): + return { + "given": { + "github_url": "https://github.com/chaoss/augur.git" + }, + "models": ["repo_info"], + "job_type": "MAINTAIN", + "display_name": "repo_info model for url: https://github.com/chaoss/augur.git", + "focused_task": 1 + } + [email protected] +def repo_info_worker(): + config = { + "offline_mode": True, + "quiet": True + } + + repo_info_worker = RepoInfoWorker(config=config) + return repo_info_worker + +def test_repo_info_worker(repo_info_worker, test_task): + assert repo_info_worker is not None diff --git a/test/__init__.py b/workers/contributor_worker/__init__.py similarity index 100% rename from test/__init__.py rename to workers/contributor_worker/__init__.py diff --git a/test/test_model.py b/workers/github_worker/__init__.py similarity index 100% rename from test/test_model.py rename to workers/github_worker/__init__.py diff --git a/workers/metric_status_worker/tests/tests_worker.py b/workers/metric_status_worker/tests/tests_worker.py deleted file mode 100644 --- a/workers/metric_status_worker/tests/tests_worker.py +++ /dev/null @@ -1,16 +0,0 @@ -import requests -import pytest - -from metric_status_worker.worker import MetricsStatus - -def test_get_metric_index_in_table_row(): - row = "metric |sTatuS|TestString" - metric_status = MetricsStatus("api.github.com") - result = metric_status.get_metric_index_in_table_row(row) - print(result) - assert result == (0, 3) - -def test_is_has_link(): - metric_status = MetricsStatus("api.github.com") - re_result = metric_status.is_has_link(" [oss](augur" , None) - assert re_result == ('oss', 'augur') diff --git a/workers/tests/test_standard_methods.py b/workers/tests/test_standard_methods.py deleted file mode 100644 --- a/workers/tests/test_standard_methods.py +++ /dev/null @@ -1,28 +0,0 @@ -# Sample Test passing with nose and pytest -import pandas as pd -import pytest -from workers.standard_methods import check_duplicates, dump_queue, read_config -from queue import Queue - - -def test_check_duplicates(): - obj = {"website":["walmart.com"]} - new_data = [obj] - table_values = pd.read_csv("augur/data/companies.csv") - assert check_duplicates(new_data, table_values, "website") == [obj] - -def test_dump_queues(): - sample_queue = Queue() - list_sample = ["[email protected]", "[email protected]", "[email protected]"] - for list_item in list_sample: - sample_queue.put(list_item) - queue_to_list = dump_queue(sample_queue) - assert queue_to_list == ["[email protected]", "[email protected]", "[email protected]"] - -def test_read_config_no_exception(): - db_name = read_config('Database', 'user', 'AUGUR_DB_USER', 'augur',config_file_path="augur.config.json") - assert db_name == "augur" - -def test_read_config_exception(): - with pytest.raises(AttributeError): - db_name = read_config('Server', 'username')
repo_info worker: dev/test branch Please help us help you by filling out the following sections as thoroughly as you can. **Description:** Looks like the new Fork information collection has some kind of mismatch between the method and parameters passed: ``` INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}} INFO:root:Printing traceback... INFO:root:Traceback (most recent call last): File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect model_method(message, repo_id) File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model forked = self.is_forked(owner, repo) File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked data = self.get_repo_data(self, url, r) TypeError: get_repo_data() takes 3 positional arguments but 4 were given INFO:root:This task inserted 0 tuples before failure. INFO:root:Notifying broker and logging task failure in database... INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 - INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'} INFO:root:Updated job process for model: repo_info ``` If the log does not provide enough info, let me know
2020-06-17T21:47:52Z
[]
[]
chaoss/augur
791
chaoss__augur-791
[ "737" ]
7bc330701304d22132f1d95ca326cb18b6988ebf
diff --git a/augur/__init__.py b/augur/__init__.py --- a/augur/__init__.py +++ b/augur/__init__.py @@ -1,10 +1,4 @@ #SPDX-License-Identifier: MIT -import logging -import coloredlogs - -coloredlogs.install() -logger = logging.getLogger('augur') - -# Classes -from .application import Application, logger +import os +ROOT_AUGUR_DIRECTORY = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) diff --git a/augur/application.py b/augur/application.py --- a/augur/application.py +++ b/augur/application.py @@ -4,72 +4,52 @@ """ import os -import time -import multiprocessing as mp +from pathlib import Path import logging +from logging import FileHandler, Formatter import coloredlogs import json -import pkgutil from beaker.cache import CacheManager from beaker.util import parse_cache_config_options import sqlalchemy as s import psycopg2 -from augur import logger +from augur import ROOT_AUGUR_DIRECTORY from augur.metrics import Metrics -from augur.cli.configure import default_config +from augur.config import AugurConfig +from augur.logging import AugurLogging -class Application(object): +logger = logging.getLogger(__name__) + +class Application(): """Initalizes all classes from Augur using a config file or environment variables""" - def __init__(self): + def __init__(self, given_config={}, disable_logs=False, offline_mode=False): """ Reads config, creates DB session, and initializes cache """ - self.config_file_name = 'augur.config.json' - self.__shell_config = None - self.__export_file = None - self.__env_file = None - self.config = default_config - self.env_config = {} - self.root_augur_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - default_config_path = self.root_augur_dir + '/' + self.config_file_name - using_config_file = False - - - config_locations = [self.config_file_name, default_config_path, f"/opt/augur/{self.config_file_name}"] - if os.getenv('AUGUR_CONFIG_FILE') is not None: - config_file_path = os.getenv('AUGUR_CONFIG_FILE') - using_config_file = True - else: - for index, location in enumerate(config_locations): - try: - f = open(location, "r+") - config_file_path = os.path.abspath(location) - using_config_file = True - f.close() - break - except FileNotFoundError: - pass - - if using_config_file: - try: - with open(config_file_path, 'r+') as config_file_handle: - self.config = json.loads(config_file_handle.read()) - except json.decoder.JSONDecodeError as e: - logger.warning('%s could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: %s', config_file_path, str(e)) - else: - logger.warning('%s could not be parsed, using defaults.') - - self.load_env_configuration() - - logger.setLevel(self.read_config("Development", "log_level")) + self.logging = AugurLogging(disable_logs=disable_logs) + self.root_augur_dir = ROOT_AUGUR_DIRECTORY + self.config = AugurConfig(self.root_augur_dir, given_config) + + # we need these for later + self.housekeeper = None + self.manager = None + + self.gunicorn_options = { + 'bind': '%s:%s' % (self.config.get_value("Server", "host"), self.config.get_value("Server", "port")), + 'workers': int(self.config.get_value('Server', 'workers')), + 'timeout': int(self.config.get_value('Server', 'timeout')) + } + self.logging.configure_logging(self.config) + self.gunicorn_options.update(self.logging.gunicorn_logging_options) self.cache_config = { 'cache.type': 'file', 'cache.data_dir': 'runtime/cache/', 'cache.lock_dir': 'runtime/cache/' } + if not os.path.exists(self.cache_config['cache.data_dir']): os.makedirs(self.cache_config['cache.data_dir']) if not os.path.exists(self.cache_config['cache.lock_dir']): @@ -77,75 +57,56 @@ def __init__(self): cache_parsed = parse_cache_config_options(self.cache_config) self.cache = CacheManager(**cache_parsed) - self.database = self.__connect_to_database() - self.spdx_db = self.__connect_to_database(include_spdx=True) + if offline_mode is False: + logger.debug("Running in online mode") + self.database, self.operations_database, self.spdx_database = self._connect_to_database() - self.metrics = Metrics(self) + self.metrics = Metrics(self) - def __connect_to_database(self, include_spdx=False): - user = self.read_config('Database', 'user') - host = self.read_config('Database', 'host') - port = self.read_config('Database', 'port') - dbname = self.read_config('Database', 'name') + def _connect_to_database(self): + logger.debug("Testing database connections") + user = self.config.get_value('Database', 'user') + host = self.config.get_value('Database', 'host') + port = self.config.get_value('Database', 'port') + dbname = self.config.get_value('Database', 'name') database_connection_string = 'postgresql://{}:{}@{}:{}/{}'.format( - user, self.read_config('Database', 'password'), host, port, dbname + user, self.config.get_value('Database', 'password'), host, port, dbname ) csearch_path_options = 'augur_data' - if include_spdx == True: - csearch_path_options += ',spdx' engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, connect_args={'options': f'-csearch_path={csearch_path_options}'}, pool_pre_ping=True) + csearch_path_options += ',spdx' + spdx_engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, + connect_args={'options': f'-csearch_path={csearch_path_options}'}, pool_pre_ping=True) + + helper_engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, + connect_args={'options': f'-csearch_path=augur_operations'}, pool_pre_ping=True) + try: - test_connection = engine.connect() - test_connection.close() - return engine + engine.connect().close() + helper_engine.connect().close() + spdx_engine.connect().close() + return engine, helper_engine, spdx_engine except s.exc.OperationalError as e: - logger.fatal(f"Unable to connect to the database. Terminating...") - exit() + logger.error("Unable to connect to the database. Terminating...") + raise(e) - def read_config(self, section, name=None): - """ - Read a variable in specified section of the config file, unless provided an environment variable + def shutdown(self): + if self.logging.stop_event is not None: + logger.debug("Stopping housekeeper logging listener...") + self.logging.stop_event.set() - :param section: location of given variable - :param name: name of variable - """ - if name is not None: - try: - value = self.config[section][name] - except KeyError as e: - value = default_config[section][name] - else: - try: - value = self.config[section] - except KeyError as e: - value = default_config[section] - - return value - - def load_env_configuration(self): - self.set_env_value(section='Database', name='key', environment_variable='AUGUR_GITHUB_API_KEY') - self.set_env_value(section='Database', name='host', environment_variable='AUGUR_DB_HOST') - self.set_env_value(section='Database', name='name', environment_variable='AUGUR_DB_NAME') - self.set_env_value(section='Database', name='port', environment_variable='AUGUR_DB_PORT') - self.set_env_value(section='Database', name='user', environment_variable='AUGUR_DB_USER') - self.set_env_value(section='Database', name='password', environment_variable='AUGUR_DB_PASSWORD') - self.set_env_value(section='Development', name='log_level', environment_variable='AUGUR_LOG_LEVEL') - - def set_env_value(self, section, name, environment_variable, sub_config=None): - """ - Sets names and values of specified config section according to their environment variables. - """ - # using sub_config lets us grab values from nested config blocks - if sub_config is None: - sub_config = self.config + if self.housekeeper is not None: + logger.debug("Shutting down housekeeper updates...") + self.housekeeper.shutdown_updates() + self.housekeeper = None - env_value = os.getenv(environment_variable) + if self.manager is not None: + logger.debug("Shutting down manager...") + self.manager.shutdown() + self.manager = None - if env_value is not None: - self.env_config[environment_variable] = env_value - sub_config[section][name] = env_value diff --git a/augur/cli/__init__.py b/augur/cli/__init__.py --- a/augur/cli/__init__.py +++ b/augur/cli/__init__.py @@ -0,0 +1,34 @@ +import click +from functools import update_wrapper + +from augur.application import Application +from augur.config import AugurConfig +from augur.logging import AugurLogging, ROOT_AUGUR_DIRECTORY + +def pass_application(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + ctx.obj = Application() + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def pass_config(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + ctx.obj = Application(offline_mode=True).config + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def pass_logs_dir(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + config = AugurConfig(ROOT_AUGUR_DIRECTORY) + ctx.obj = AugurLogging.get_log_directories(config, reset_logfiles=False) + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def initialize_logging(f): + def new_func(*args, **kwargs): + AugurLogging(reset_logfiles=False) + return f(*args, **kwargs) + return update_wrapper(new_func, f) \ No newline at end of file diff --git a/augur/runtime.py b/augur/cli/_multicommand.py similarity index 63% rename from augur/runtime.py rename to augur/cli/_multicommand.py --- a/augur/runtime.py +++ b/augur/cli/_multicommand.py @@ -6,16 +6,14 @@ import os import sys import click +import importlib import augur.application CONTEXT_SETTINGS = dict(auto_envvar_prefix='AUGUR') class AugurMultiCommand(click.MultiCommand): - def __commands_folder(self): - return os.path.abspath( - os.path.join(os.path.dirname(__file__), 'cli') - ) + return os.path.abspath(os.path.dirname(__file__)) def list_commands(self, ctx): rv = [] @@ -26,13 +24,8 @@ def list_commands(self, ctx): return rv def get_command(self, ctx, name): - # try: - if sys.version_info[0] == 2: - name = name.encode('ascii', 'replace') - mod = __import__('augur.cli.' + name, - None, None, ['cli']) - - return mod.cli + module = importlib.import_module('.' + name, 'augur.cli') + return module.cli @click.command(cls=AugurMultiCommand, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -40,11 +33,4 @@ def run(ctx): """ Augur is an application for open source community health analytics """ - - app = augur.application.Application() - ctx.obj = app - return ctx.obj - - -if __name__ == '__main__': - run() + return ctx diff --git a/augur/cli/configure.py b/augur/cli/configure.py --- a/augur/cli/configure.py +++ b/augur/cli/configure.py @@ -6,190 +6,15 @@ import os import click import json +import logging -from augur import logger +from augur.config import default_config, ENVVAR_PREFIX +from augur.cli import initialize_logging +from augur.logging import ROOT_AUGUR_DIRECTORY +logger = logging.getLogger(__name__) ENVVAR_PREFIX = "AUGUR_" -default_config = { - "Database": { - "name": "augur", - "host": "localhost", - "key": "key", - "password": "augur", - "port": 5432, - "user": "augur" - }, - "Housekeeper": { - "jobs": [ - { - "all_focused": 1, - "delay": 150000, - "given": [ - "github_url" - ], - "model": "issues", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "github_url" - ], - "model": "pull_request_commits", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "github_url" - ], - "model": "repo_info", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "repo_group" - ], - "model": "commits", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "github_url" - ], - "model": "pull_requests", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "contributors", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "insights", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "badges", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "value", - "repo_group_id": 0 - }, - { - "delay": 100000, - "given": [ - "github_url" - ], - "model": "pull_request_files", - "repo_group_id": 0 - } - ] - }, - "Workers": { - "facade_worker": { - "port": 50100, - "repo_directory": "repos/", - "switch": 1, - "workers": 1 - }, - "github_worker": { - "port": 50200, - "switch": 1, - "workers": 1 - }, - "insight_worker": { - "port": 50300, - "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"}, - "contamination": 0.041, - "switch": 0, - "workers": 1, - "training_days": 365, - "anomaly_days": 2 - }, - "linux_badge_worker": { - "port": 50400, - "switch": 1, - "workers": 1 - }, - "metric_status_worker": { - "port": 50500, - "switch": 0, - "workers": 1 - }, - "pull_request_worker": { - "port": 50600, - "switch": 1, - "workers": 1 - }, - "repo_info_worker": { - "port": 50700, - "switch": 1, - "workers": 1 - }, - "value_worker": { - "port": 50800, - "scc_bin": "scc", - "switch": 0, - "workers": 1 - }, - "contributor_worker": { - "port": 50900, - "switch": 1, - "workers": 1 - } - }, - "Facade": { - "check_updates": 1, - "clone_repos": 1, - "create_xlsx_summary_files": 1, - "delete_marked_repos": 0, - "fix_affiliations": 1, - "force_analysis": 1, - "force_invalidate_caches": 1, - "force_updates": 1, - "limited_run": 0, - "multithreaded": 0, - "nuke_stored_affiliations": 0, - "pull_repos": 1, - "rebuild_caches": 1, - "run_analysis": 1 - }, - "Server": { - "cache_expire": "3600", - "host": "0.0.0.0", - "port": "5000", - "workers": 4, - "timeout": 60 - }, - "Frontend": { - "host": "0.0.0.0", - "port": "5000" - }, - "Development": { - "log_level": "INFO" - } - } @click.group('configure', short_help='Generate an augur.config.json') def cli(): @@ -204,7 +29,9 @@ def cli(): @click.option('--github_api_key', help="GitHub API key for data collection from the GitHub API", envvar=ENVVAR_PREFIX + 'GITHUB_API_KEY') @click.option('--facade_repo_directory', help="Directory on the database server where Facade should clone repos", envvar=ENVVAR_PREFIX + 'FACADE_REPO_DIRECTORY') @click.option('--rc-config-file', help="File containing existing config whose values will be used as the defaults", type=click.Path(exists=True)) -def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file): [email protected]('--gitlab_api_key', help="GitLab API key for data collection from the GitLab API", envvar=ENVVAR_PREFIX + 'GITLAB_API_KEY') +@initialize_logging +def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file, gitlab_api_key): """ Generate an augur.config.json """ @@ -250,11 +77,13 @@ def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, fa config['Database']['password'] = db_password if github_api_key is not None: config['Database']['key'] = github_api_key + if gitlab_api_key is not None: + config['Database']['gitlab_api_key'] = gitlab_api_key if facade_repo_directory is not None: config['Workers']['facade_worker']['repo_directory'] = facade_repo_directory try: - with open(os.path.abspath('augur.config.json'), 'w') as f: + with open(os.path.abspath(ROOT_AUGUR_DIRECTORY + '/augur.config.json'), 'w') as f: json.dump(config, f, indent=4) logger.info('augur.config.json successfully created') except Exception as e: diff --git a/augur/cli/db.py b/augur/cli/db.py --- a/augur/cli/db.py +++ b/augur/cli/db.py @@ -1,5 +1,6 @@ from os import walk, chdir, environ, chmod, path import os +import logging from sys import exit import stat from collections import OrderedDict @@ -12,7 +13,9 @@ import pandas as pd from sqlalchemy import exc -from augur import logger +from augur.cli import pass_config, pass_application + +logger = logging.getLogger(__name__) @click.group('db', short_help='Database utilities') def cli(): @@ -20,14 +23,12 @@ def cli(): @cli.command('add-repos') @click.argument('filename', type=click.Path(exists=True)) [email protected]_context -def add_repos(ctx, filename): +@pass_application +def add_repos(augur_app, filename): """ Add repositories to Augur's database """ - app = ctx.obj - - df = app.database.execute(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups")) + df = augur_app.database.execute(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups")) repo_group_IDs = [group[0] for group in df.fetchall()] insertSQL = s.sql.text(""" @@ -41,33 +42,29 @@ def add_repos(ctx, filename): for row in data: logger.info(f"Inserting repo with Git URL `{row[1]}` into repo group {row[0]}") if int(row[0]) in repo_group_IDs: - result = app.database.execute(insertSQL, repo_group_id=int(row[0]), repo_git=row[1]) + result = augur_app.database.execute(insertSQL, repo_group_id=int(row[0]), repo_git=row[1]) else: - logger.warn(f"Invalid repo group id specified for {row[1]}, skipping.") + logger.warning(f"Invalid repo group id specified for {row[1]}, skipping.") @cli.command('get-repo-groups') [email protected]_context -def get_repo_groups(ctx): +@pass_application +def get_repo_groups(augur_app): """ List all repo groups and their associated IDs """ - app = ctx.obj - - df = pd.read_sql(s.sql.text("SELECT repo_group_id, rg_name, rg_description FROM augur_data.repo_groups"), app.database) + df = pd.read_sql(s.sql.text("SELECT repo_group_id, rg_name, rg_description FROM augur_data.repo_groups"), augur_app.database) print(df) return df @cli.command('add-repo-groups') @click.argument('filename', type=click.Path(exists=True)) [email protected]_context -def add_repo_groups(ctx, filename): +@pass_application +def add_repo_groups(augur_app, filename): """ Create new repo groups in Augur's database """ - app = ctx.obj - - df = pd.read_sql(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups"), app.database) + df = pd.read_sql(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups"), augur_app.database) repo_group_IDs = df['repo_group_id'].values.tolist() insert_repo_group_sql = s.sql.text(""" @@ -80,51 +77,48 @@ def add_repo_groups(ctx, filename): logger.info(f"Inserting repo group with name {row[1]} and ID {row[0]}...") if int(row[0]) not in repo_group_IDs: repo_group_IDs.append(int(row[0])) - app.database.execute(insert_repo_group_sql, repo_group_id=int(row[0]), repo_group_name=row[1]) + augur_app.database.execute(insert_repo_group_sql, repo_group_id=int(row[0]), repo_group_name=row[1]) else: logger.info(f"Repo group with ID {row[1]} for repo group {row[1]} already exists, skipping...") @cli.command('update-repo-directory') @click.argument('repo_directory') [email protected]_context -def update_repo_directory(ctx, repo_directory): +@pass_application +def update_repo_directory(augur_app, repo_directory): """ Update Facade worker repo cloning directory """ - app = ctx.obj - updateRepoDirectorySQL = s.sql.text(""" UPDATE augur_data.settings SET VALUE = :repo_directory WHERE setting='repo_directory'; """) - app.database.execute(updateRepoDirectorySQL, repo_directory=repo_directory) + augur_app.database.execute(updateRepoDirectorySQL, repo_directory=repo_directory) logger.info(f"Updated Facade repo directory to: {repo_directory}") # get_db_version is a helper function to print_db_version and upgrade_db_version -def get_db_version(app): +def get_db_version(augur_app): db_version_sql = s.sql.text(""" SELECT * FROM augur_operations.augur_settings WHERE setting = 'augur_data_version' """) - return int(app.database.execute(db_version_sql).fetchone()[2]) + return int(augur_app.database.execute(db_version_sql).fetchone()[2]) @cli.command('print-db-version') [email protected]_context -def print_db_version(ctx): +@pass_application +def print_db_version(augur_app): """ Get the version of the configured database """ - print(get_db_version(ctx.obj)) + print(get_db_version(augur_app)) @cli.command('upgrade-db-version') [email protected]_context -def upgrade_db_version(ctx): +@pass_application +def upgrade_db_version(augur_app): """ Upgrade the configured database to the latest version """ - app = ctx.obj - check_pgpass_credentials(app.config) - current_db_version = get_db_version(app) + check_pgpass_credentials(augur_app.config.get_raw_config()) + current_db_version = get_db_version(augur_app) update_scripts_filenames = [] for (_, _, filenames) in walk('schema/generate'): @@ -143,23 +137,22 @@ def upgrade_db_version(ctx): if current_db_version == most_recent_version: logger.info("Your database is already up to date. ") elif current_db_version > most_recent_version: - logger.info(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") + logger.error(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") for target_version, script_location in target_version_script_map.items(): if target_version == current_db_version + 1: logger.info(f"Upgrading from {current_db_version} to {target_version}") - run_psql_command_in_database(app, '-f', f"schema/generate/{script_location}") + run_psql_command_in_database(augur_app, '-f', f"schema/generate/{script_location}") current_db_version += 1 @cli.command('check-for-upgrade') [email protected]_context -def check_for_upgrade(ctx): +@pass_application +def check_for_upgrade(augur_app): """ Upgrade the configured database to the latest version """ - app = ctx.obj - check_pgpass_credentials(app.config) - current_db_version = get_db_version(app) + check_pgpass_credentials(augur_app.config.get_raw_config()) + current_db_version = get_db_version(augur_app) update_scripts_filenames = [] for (_, _, filenames) in walk('schema/generate'): @@ -180,18 +173,17 @@ def check_for_upgrade(ctx): elif current_db_version < most_recent_version: logger.info(f"Current database version: v{current_db_version}\nPlease upgrade to the most recent version (v{most_recent_version}) with augur db upgrade-db-version.") elif current_db_version > most_recent_version: - logger.warn(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") + logger.error(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") @cli.command('create-schema') [email protected]_context -def create_schema(ctx): +@pass_application +def create_schema(augur_app): """ Create schema in the configured database """ - app = ctx.obj - check_pgpass_credentials(app.config) - run_psql_command_in_database(app, '-f', 'schema/create_schema.sql') + check_pgpass_credentials(augur_app.config.get_raw_config()) + run_psql_command_in_database(augur_app, '-f', 'schema/create_schema.sql') def generate_key(length): return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length)) @@ -202,46 +194,40 @@ def generate_api_key(ctx): """ Generate and set a new Augur API key """ - app = ctx.obj key = generate_key(32) ctx.invoke(update_api_key, api_key=key) print(key) @cli.command('update-api-key') @click.argument("api_key") [email protected]_context -def update_api_key(ctx, api_key): +@pass_application +def update_api_key(augur_app, api_key): """ Update the API key in the database to the given key """ - app = ctx.obj - update_api_key_sql = s.sql.text(""" UPDATE augur_operations.augur_settings SET VALUE = :api_key WHERE setting='augur_api_key'; """) - app.database.execute(update_api_key_sql, api_key=api_key) - logger.info(f"Update Augur API key to: {api_key}") + augur_app.database.execute(update_api_key_sql, api_key=api_key) + logger.info(f"Updated Augur API key to: {api_key}") @cli.command('get-api-key') [email protected]_context -def get_api_key(ctx): - app = ctx.obj - +@pass_application +def get_api_key(augur_app): get_api_key_sql = s.sql.text(""" SELECT value FROM augur_operations.augur_settings WHERE setting='augur_api_key'; """) try: - print(app.database.execute(get_api_key_sql).fetchone()[0]) + print(augur_app.database.execute(get_api_key_sql).fetchone()[0]) except TypeError: - logger.warn("No Augur API key found.") + logger.error("No Augur API key found.") @cli.command('check-pgpass', short_help="Check the ~/.pgpass file for Augur's database credentials") [email protected]_context -def check_pgpass(ctx): - app = ctx.obj - check_pgpass_credentials(app.config) +@pass_config +def check_pgpass(config): + check_pgpass_credentials(config.get_raw_config()) @cli.command('init-database') @click.option('--default-db-name', default='postgres') @@ -252,12 +238,10 @@ def check_pgpass(ctx): @click.option('--target-password', default='augur') @click.option('--host', default='localhost') @click.option('--port', default='5432') [email protected]_context -def init_database(ctx, default_db_name, default_user, default_password, target_db_name, target_user, target_password, host, port): +def init_database(default_db_name, default_user, default_password, target_db_name, target_user, target_password, host, port): """ Create database with the given credentials using the given maintenance database """ - app = ctx.obj config = { 'Database': { 'name': default_db_name, @@ -276,15 +260,15 @@ def init_database(ctx, default_db_name, default_user, default_password, target_d def run_db_creation_psql_command(host, port, user, name, command): call(['psql', '-h', host, '-p', port, '-U', user, '-d', name, '-a', '-w', '-c', command]) -def run_psql_command_in_database(app, target_type, target): +def run_psql_command_in_database(augur_app, target_type, target): if target_type not in ['-f', '-c']: - logger.fatal("Invalid target type. Exiting...") + logger.error("Invalid target type. Exiting...") exit(1) - call(['psql', '-h', app.read_config('Database', 'host'),\ - '-d', app.read_config('Database', 'name'),\ - '-U', app.read_config('Database', 'user'),\ - '-p', str(app.read_config('Database', 'port')),\ + call(['psql', '-h', augur_app.config.get_value('Database', 'host'),\ + '-d', augur_app.config.get_value('Database', 'name'),\ + '-U', augur_app.config.get_value('Database', 'user'),\ + '-p', str(augur_app.config.get_value('Database', 'port')),\ '-a', '-w', target_type, target ]) @@ -292,14 +276,14 @@ def check_pgpass_credentials(config): pgpass_file_path = environ['HOME'] + '/.pgpass' if not path.isfile(pgpass_file_path): - logger.debug("~/.pgpass does not exist, creating.") + logger.info("~/.pgpass does not exist, creating.") open(pgpass_file_path, 'w+') chmod(pgpass_file_path, stat.S_IWRITE | stat.S_IREAD) pgpass_file_mask = oct(os.stat(pgpass_file_path).st_mode & 0o777) if pgpass_file_mask != '0o600': - logger.debug("Updating ~/.pgpass file permissions.") + logger.info("Updating ~/.pgpass file permissions.") chmod(pgpass_file_path, stat.S_IWRITE | stat.S_IREAD) with open(pgpass_file_path, 'a+') as pgpass_file: diff --git a/augur/cli/logging.py b/augur/cli/logging.py new file mode 100644 --- /dev/null +++ b/augur/cli/logging.py @@ -0,0 +1,89 @@ +import click +import os +from os import walk + +from augur.cli import pass_logs_dir + [email protected]("logging", short_help="View Augur's log files") +def cli(): + pass + [email protected]("directory") +@pass_logs_dir +def directory(logs_dir): + """ + Print the location of Augur's logs directory + """ + print(logs_dir) + [email protected]("tail") [email protected]("lines", default=20) +@pass_logs_dir +def tail(logs_dir, lines): + """ + Output the last n lines of the main Augur and worker logfiles + """ + root_log_dir = logs_dir + worker_log_dir = logs_dir + "/workers/" + if lines is None: + lines = 20 + + files = [] + directories = [] + for (_, _, filenames) in walk(root_log_dir): + for file in filenames: + result = _tail(open(root_log_dir + "/" + file), lines) + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + break + + files = [] + directories = [] + for (dirpath, dirnames, filenames) in walk(worker_log_dir): + directories.extend(dirnames) + break + + for directory in directories: + specific_worker_log_dir = worker_log_dir + directory + for (_, _, filenames) in walk(specific_worker_log_dir): + files.extend(filenames) + + for file in [file for file in filenames if "collection" in file]: + result = _tail(open(specific_worker_log_dir + "/" + file), lines) + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + break + +def _tail(f, lines=20, _buffer=4098): + lines_found = [] + + # block counter will be multiplied by buffer + # to get the block size from the end + block_counter = -1 + + # loop until we find X lines + while len(lines_found) < lines: + try: + f.seek(block_counter * _buffer, os.SEEK_END) + except IOError: # either file is too small, or too many lines requested + f.seek(0) + lines_found = f.readlines() + break + + lines_found = f.readlines() + + # we found enough lines, get out + # Removed this line because it was redundant the while will catch + # it, I left it for history + # if len(lines_found) > lines: + # break + + # decrement the block counter to get the + # next X bytes + block_counter -= 1 + + return lines_found[-lines:] \ No newline at end of file diff --git a/augur/cli/run.py b/augur/cli/run.py --- a/augur/cli/run.py +++ b/augur/cli/run.py @@ -4,187 +4,143 @@ """ from copy import deepcopy -import os, time, atexit, subprocess, click +import os, time, atexit, subprocess, click, atexit, logging, sys import multiprocessing as mp import gunicorn.app.base -from gunicorn.six import iteritems from gunicorn.arbiter import Arbiter -from augur.housekeeper.housekeeper import Housekeeper -from augur import logger +from augur.housekeeper import Housekeeper from augur.server import Server - from augur.cli.util import kill_processes -import time +from augur.application import Application + +logger = logging.getLogger("augur") @click.command("run") @click.option("--disable-housekeeper", is_flag=True, default=False, help="Turns off the housekeeper") @click.option("--skip-cleanup", is_flag=True, default=False, help="Disables the old process cleanup that runs before Augur starts") [email protected]_context -def cli(ctx, disable_housekeeper, skip_cleanup): +def cli(disable_housekeeper, skip_cleanup): """ Start Augur's backend server """ + augur_app = Application() + logger.info("Augur application initialized") if not skip_cleanup: - logger.info("Cleaning up old Augur processes. Just a moment please...") - ctx.invoke(kill_processes) + logger.debug("Cleaning up old Augur processes...") + kill_processes() time.sleep(2) else: - logger.info("Skipping cleanup processes.") - - def get_process_id(name): - """Return process ids found by name or command - """ - child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False) - response = child.communicate()[0] - return [int(pid) for pid in response.split()] + logger.debug("Skipping process cleanup") - app = ctx.obj + master = initialize_components(augur_app, disable_housekeeper) + logger.info('Starting Gunicorn server in the background...') + if not disable_housekeeper: + logger.info('Housekeeper update process logs will now take over.') + else: + logger.info("Gunicorn server logs will be written to gunicorn.log") + logger.info("Augur is still running...don't close this process!") + Arbiter(master).run() - mp.set_start_method('forkserver', force=True) +def initialize_components(augur_app, disable_housekeeper): master = None - manager = None broker = None housekeeper = None - - logger.info("Booting broker and its manager...") - manager = mp.Manager() - broker = manager.dict() - - controller = app.read_config('Workers') - worker_pids = [] worker_processes = [] + mp.set_start_method('forkserver', force=True) if not disable_housekeeper: - if not controller: - return + logger.info("Booting manager") + manager = mp.Manager() + + logger.info("Booting broker") + broker = manager.dict() + + housekeeper = Housekeeper(broker=broker, augur_app=augur_app) + + controller = augur_app.config.get_section('Workers') + for worker in controller.keys(): - if not controller[worker]['switch']: - continue - logger.info("Your config has the option set to automatically boot {} instances of the {}".format(controller[worker]['workers'], worker)) - pids = get_process_id("/bin/sh -c cd workers/{} && {}_start".format(worker, worker)) - worker_pids += pids - if len(pids) > 0: - worker_pids.append(pids[0] + 1) - pids.append(pids[0] + 1) - logger.info("Found and preparing to kill previous {} worker pids: {}".format(worker,pids)) - for pid in pids: - try: - os.kill(pid, 9) - except: - logger.info("Worker process {} already killed".format(pid)) - - @atexit.register - def exit(): - try: - for pid in worker_pids: - os.kill(pid, 9) - except: - logger.info("Worker process {} already killed".format(pid)) - for process in worker_processes: - logger.info("Shutting down worker process with pid: {} ...".format(process.pid)) - process.terminate() + if controller[worker]['switch']: + for i in range(controller[worker]['workers']): + logger.info("Booting {} #{}".format(worker, i + 1)) + worker_process = mp.Process(target=worker_start, name=f"{worker}_{i}", kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True) + worker_processes.append(worker_process) + worker_process.start() - if master is not None: - master.halt() - logger.info("Shutting down housekeeper updates...") - if housekeeper is not None: - housekeeper.shutdown_updates() - - # if hasattr(manager, "shutdown"): - # wait for the spawner and the worker threads to go down - # - if manager is not None: - manager.shutdown() - # check if it is still alive and kill it if necessary - # if manager._process.is_alive(): - manager._process.terminate() - - # Prevent multiprocessing's atexit from conflicting with gunicorn - logger.info("Killing main augur process with PID: {}".format(os.getpid())) - os.kill(os.getpid(), 9) - os._exit(0) + augur_app.manager = manager + augur_app.broker = broker + augur_app.housekeeper = housekeeper - if not disable_housekeeper: - logger.info("Booting housekeeper...") - jobs = deepcopy(app.read_config('Housekeeper', 'jobs')) - try: - housekeeper = Housekeeper( - jobs, - broker, - broker_host=app.read_config('Server', 'host'), - broker_port=app.read_config('Server', 'port'), - user=app.read_config('Database', 'user'), - password=app.read_config('Database', 'password'), - host=app.read_config('Database', 'host'), - port=app.read_config('Database', 'port'), - dbname=app.read_config('Database', 'name') - ) - except KeyboardInterrupt as e: - exit() - - logger.info("Housekeeper has finished booting.") - - if controller: - for worker in controller.keys(): - if controller[worker]['switch']: - for i in range(controller[worker]['workers']): - logger.info("Booting {} #{}".format(worker, i + 1)) - worker_process = mp.Process(target=worker_start, kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True) - worker_process.start() - worker_processes.append(worker_process) - - host = app.read_config('Server', 'host') - port = app.read_config('Server', 'port') - workers = int(app.read_config('Server', 'workers')) - timeout = int(app.read_config('Server', 'timeout')) - options = { - 'bind': '%s:%s' % (host, port), - 'workers': workers, - 'accesslog': '-', - 'access_log_format': '%(h)s - %(t)s - %(r)s', - 'timeout': timeout - } - logger.info('Starting server...') - master = Arbiter(AugurGunicornApp(options, manager=manager, broker=broker, housekeeper=housekeeper)).run() + atexit._clear() + atexit.register(exit, augur_app, worker_processes, master) + return AugurGunicornApp(augur_app.gunicorn_options, augur_app=augur_app) def worker_start(worker_name=None, instance_number=0, worker_port=None): - time.sleep(120 * instance_number) - destination = subprocess.DEVNULL try: - destination = open("workers/{}/worker_{}.log".format(worker_name, worker_port), "a+") - except IOError as e: - logger.error("Error opening log file for auto-started worker {}: {}".format(worker_name, e)) - process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT) - logger.info("{} booted.".format(worker_name)) + time.sleep(30 * instance_number) + destination = subprocess.DEVNULL + process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT) + logger.info("{} #{} booted.".format(worker_name,instance_number+1)) + except KeyboardInterrupt as e: + pass + +def exit(augur_app, worker_processes, master): + + logger.info("Shutdown started for this Gunicorn worker...") + augur_app.shutdown() + + if worker_processes: + for process in worker_processes: + logger.debug("Shutting down worker process with pid: {}...".format(process.pid)) + process.terminate() + + if master is not None: + logger.debug("Shutting down Gunicorn server") + master.halt() + master = None + + logger.info("Shutdown complete") + sys.exit(0) class AugurGunicornApp(gunicorn.app.base.BaseApplication): """ Loads configurations, initializes Gunicorn, loads server """ - def __init__(self, options=None, manager=None, broker=None, housekeeper=None): - self.options = options or {} - self.manager = manager - self.broker = broker - self.housekeeper = housekeeper + def __init__(self, options={}, augur_app=None): + self.options = options + self.augur_app = augur_app + self.manager = self.augur_app.manager + self.broker = self.augur_app.broker + self.housekeeper = self.augur_app.housekeeper + self.server = None + logger.debug(f"Gunicorn will start {self.options['workers']} worker processes") super(AugurGunicornApp, self).__init__() - # self.cfg.pre_request.set(pre_request) def load_config(self): """ Sets the values for configurations """ - config = dict([(key, value) for key, value in iteritems(self.options) - if key in self.cfg.settings and value is not None]) - for key, value in iteritems(config): + config = {key: value for key, value in self.options.items() + if key in self.cfg.settings and value is not None} + for key, value in config.items(): self.cfg.set(key.lower(), value) - def load(self): + def get_augur_app(self): """ Returns the loaded server """ - server = Server(manager=self.manager, broker=self.broker, housekeeper=self.housekeeper) - return server.app + self.load() + return self.server.augur_app + def load(self): + """ + Returns the loaded server + """ + if self.server is None: + try: + self.server = Server(augur_app=self.augur_app) + except Exception as e: + logger.error(f"An error occured when Gunicorn tried to load the server: {e}") + return self.server.app diff --git a/augur/cli/util.py b/augur/cli/util.py --- a/augur/cli/util.py +++ b/augur/cli/util.py @@ -5,6 +5,7 @@ import os import signal +import logging from subprocess import call, run import psutil @@ -12,36 +13,38 @@ import pandas as pd import sqlalchemy as s -from augur import logger -from augur.cli.configure import default_config +from augur.cli import initialize_logging, pass_config, pass_application + +logger = logging.getLogger(__name__) @click.group('util', short_help='Miscellaneous utilities') def cli(): pass @cli.command('export-env') [email protected]_context -def export_env(ctx): +@pass_config +def export_env(config): """ Exports your GitHub key and database credentials """ - app = ctx.obj export_file = open(os.getenv('AUGUR_EXPORT_FILE', 'augur_export_env.sh'), 'w+') export_file.write('#!/bin/bash') export_file.write('\n') env_file = open(os.getenv('AUGUR_ENV_FILE', 'docker_env.txt'), 'w+') - for env_var in app.env_config.items(): - export_file.write('export ' + env_var[0] + '="' + str(env_var[1]) + '"\n') - env_file.write(env_var[0] + '=' + str(env_var[1]) + '\n') + for env_var in config.get_env_config().items(): + if "LOG" not in env_var[0]: + logger.info(f"Exporting {env_var[0]}") + export_file.write('export ' + env_var[0] + '="' + str(env_var[1]) + '"\n') + env_file.write(env_var[0] + '=' + str(env_var[1]) + '\n') export_file.close() env_file.close() @cli.command('kill') [email protected]_context -def kill_processes(ctx): +@initialize_logging +def cli_kill_processes(): """ Terminates all currently running backend Augur processes, including any workers. Will only work in a virtual environment. """ @@ -49,14 +52,26 @@ def kill_processes(ctx): if processes != []: for process in processes: if process.pid != os.getpid(): - # logger.info(f"Killing {process.pid}: {' '.join(process.info['cmdline'][1:])}") logger.info(f"Killing process {process.pid}") try: process.send_signal(signal.SIGTERM) except psutil.NoSuchProcess as e: pass +def kill_processes(): + logger = logging.getLogger("augur") + processes = get_augur_processes() + if processes != []: + for process in processes: + if process.pid != os.getpid(): + logger.info(f"Killing process {process.pid}") + try: + process.send_signal(signal.SIGTERM) + except psutil.NoSuchProcess as e: + logger.warning(e) + @cli.command('list',) +@initialize_logging def list_processes(): """ Outputs the name and process ID (PID) of all currently running backend Augur processes, including any workers. Will only work in a virtual environment. @@ -78,13 +93,11 @@ def get_augur_processes(): return processes @cli.command('repo-reset') [email protected]_context -def repo_reset(ctx): +@pass_application +def repo_reset(augur_app): """ Refresh repo collection to force data collection """ - app = ctx.obj - - app.database.execute("UPDATE augur_data.repo SET repo_path = NULL, repo_name = NULL, repo_status = 'New'; TRUNCATE augur_data.commits CASCADE; ") + augur_app.database.execute("UPDATE augur_data.repo SET repo_path = NULL, repo_name = NULL, repo_status = 'New'; TRUNCATE augur_data.commits CASCADE; ") logger.info("Repos successfully reset") diff --git a/augur/config.py b/augur/config.py new file mode 100644 --- /dev/null +++ b/augur/config.py @@ -0,0 +1,349 @@ +import os +import json +import logging + +ENVVAR_PREFIX = "AUGUR_" + +default_config = { + "version": 1, + "Database": { + "name": "augur", + "host": "localhost", + "key": "key", + "password": "augur", + "port": 5432, + "user": "augur", + "gitlab_api_key":"gitlab_api_key" + }, + "Housekeeper": { + "jobs": [ + { + "all_focused": 1, + "delay": 150000, + "given": [ + "github_url" + ], + "model": "issues", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "github_url" + ], + "model": "pull_request_commits", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "github_url" + ], + "model": "repo_info", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "repo_group" + ], + "model": "commits", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "github_url" + ], + "model": "pull_requests", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "contributors", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "insights", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "badges", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "value", + "repo_group_id": 0 + }, + { + "delay": 100000, + "given": [ + "github_url" + ], + "model": "pull_request_files", + "repo_group_id": 0 + }, + { + "delay": 100000, + "given": [ + "github_url" + ], + "model": "releases", + "repo_group_id": 0 + } + ] + }, + "Workers": { + "facade_worker": { + "port": 50100, + "repo_directory": "repos/", + "switch": 1, + "workers": 1 + }, + "github_worker": { + "port": 50200, + "switch": 1, + "workers": 1 + }, + "insight_worker": { + "port": 50300, + "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", + "reviews": "pull_requests", "contributors-new": "new_contributors"}, + "confidence_interval": 95, + "contamination": 0.041, + "switch": 0, + "workers": 1, + "training_days": 365, + "anomaly_days": 2 + }, + "linux_badge_worker": { + "port": 50400, + "switch": 1, + "workers": 1 + }, + "metric_status_worker": { + "port": 50500, + "switch": 0, + "workers": 1 + }, + "pull_request_worker": { + "port": 50600, + "switch": 1, + "workers": 1 + }, + "repo_info_worker": { + "port": 50700, + "switch": 1, + "workers": 1 + }, + "value_worker": { + "port": 50800, + "scc_bin": "scc", + "switch": 0, + "workers": 1 + }, + "contributor_worker": { + "port": 50900, + "switch": 1, + "workers": 1 + }, + "gitlab_issues_worker": { + "port": 51000, + "switch": 1, + "workers": 1 + }, + "release_worker": { + "port": 51100, + "switch": 1, + "workers": 1 + }, + "gitlab_merge_request_worker": { + "port": 51200, + "switch": 1, + "workers": 1 + } + }, + "Facade": { + "check_updates": 1, + "clone_repos": 1, + "create_xlsx_summary_files": 1, + "delete_marked_repos": 0, + "fix_affiliations": 1, + "force_analysis": 1, + "force_invalidate_caches": 1, + "force_updates": 1, + "limited_run": 0, + "multithreaded": 0, + "nuke_stored_affiliations": 0, + "pull_repos": 1, + "rebuild_caches": 1, + "run_analysis": 1 + }, + "Server": { + "cache_expire": "3600", + "host": "0.0.0.0", + "port": "5000", + "workers": 4, + "timeout": 60 + }, + "Frontend": { + "host": "0.0.0.0", + "port": "5000" + }, + "Logging": { + "logs_directory": "logs/", + "log_level": "INFO", + "verbose": 0, + "quiet": 0, + "debug": 0 + } + } + +logger = logging.getLogger(__name__) + +class AugurConfig(): + """docstring for AugurConfig""" + def __init__(self, root_augur_dir, given_config={}): + self._default_config_file_name = 'augur.config.json' + self._root_augur_dir = root_augur_dir + self._default_config = default_config + self._env_config = {} + self.load_config() + self.version = self.get_version() + self._config.update(given_config) + + def get_section(self, section_name): + try: + return self._config[section_name] + except KeyError as e: + if not self.using_default_config: + logger.warn(f"{section_name} not found in loaded config. Checking default config") + try: + return self._default_config[section_name] + except KeyError as e: + logger.error(f"No defaults found for {section_name}") + raise(e) + else: + logger.debug(f"Already using default config, skipping check for {section_name}") + + def get_version(self): + try: + return self._config["version"] + except KeyError as e: + logger.warning("No config version found. Setting version to 0.") + return 0 + + def get_value(self, section_name, value): + try: + return self._config[section_name][value] + except KeyError as e: + if not self.using_default_config: + logger.warn(f"{section_name}:{value} not found in loaded config. Checking default config") + try: + return self._default_config[section_name][value] + except KeyError as e: + logger.error(f"No defaults found for {section_name}:{value}") + raise(e) + else: + logger.debug(f"Already using default config, skipping check for {section_name}:{value}") + + def load_config(self): + self._config = None + self.using_default_config = False + + logger.debug("Attempting to load config file") + try: + config_file_path = self.discover_config_file() + try: + with open(config_file_path, 'r+') as config_file_handle: + self._config = json.loads(config_file_handle.read()) + logger.debug("Config file loaded successfully") + except json.decoder.JSONDecodeError as e: + logger.warning("Unable to parse config. Using default configuration") + self.using_default_config = True + self._config = default_config + except AugurConfigFileNotFoundException as e: + logger.warning("Config file not found. Using default configuration") + self.using_default_config = True + self._config = default_config + + self.load_env_configuration() + + def discover_config_file(self): + default_config_path = self._root_augur_dir + '/' + self._default_config_file_name + config_file_path = None + + config_locations = [self._default_config_file_name, default_config_path + , f"/opt/augur/{self._default_config_file_name}"] + if os.getenv('AUGUR_CONFIG_FILE', None) is not None: + config_file_path = os.getenv('AUGUR_CONFIG_FILE') + else: + for location in config_locations: + try: + f = open(location, "r+") + config_file_path = os.path.abspath(location) + f.close() + break + except FileNotFoundError: + pass + if config_file_path: + return config_file_path + else: + raise(AugurConfigFileNotFoundException(message=f"{self._default_config_file_name} not found", errors=None)) + + def load_env_configuration(self): + self.set_env_value(section='Database', name='key', environment_variable='AUGUR_GITHUB_API_KEY') + self.set_env_value(section='Database', name='host', environment_variable='AUGUR_DB_HOST') + self.set_env_value(section='Database', name='name', environment_variable='AUGUR_DB_NAME') + self.set_env_value(section='Database', name='port', environment_variable='AUGUR_DB_PORT') + self.set_env_value(section='Database', name='user', environment_variable='AUGUR_DB_USER') + self.set_env_value(section='Database', name='password', environment_variable='AUGUR_DB_PASSWORD') + self.set_env_value(section='Logging', name='log_level', environment_variable='AUGUR_LOG_LEVEL') + self.set_env_value(section='Logging', name='quiet', environment_variable='AUGUR_LOG_QUIET') + self.set_env_value(section='Logging', name='debug', environment_variable='AUGUR_LOG_DEBUG') + self.set_env_value(section='Logging', name='verbose', environment_variable='AUGUR_LOG_VERBOSE') + + def set_env_value(self, section, name, environment_variable, sub_config=None): + """ + Sets names and values of specified config section according to their environment variables. + """ + # using sub_config lets us grab values from nested config blocks + if sub_config is None: + sub_config = self._config + + env_value = os.getenv(environment_variable) + + if env_value is not None: + self._env_config[environment_variable] = env_value + sub_config[section][name] = env_value + # logger.info(f"{section}:[\"{name}\"] set to {env_value} by: {environment_variable}") + else: + self._env_config[environment_variable] = self.get_value(section, name) + + def get_raw_config(self): + return self._config + + def get_default_config(self): + return self._default_config + + def get_env_config(self): + return self._env_config + +class AugurConfigFileNotFoundException(Exception): + def __init__(self, message, errors): + super().__init__(message) diff --git a/augur/housekeeper/housekeeper.py b/augur/housekeeper.py similarity index 81% rename from augur/housekeeper/housekeeper.py rename to augur/housekeeper.py --- a/augur/housekeeper/housekeeper.py +++ b/augur/housekeeper.py @@ -1,69 +1,85 @@ """ Keeps data up to date """ +import coloredlogs +from copy import deepcopy import logging, os, time, requests -from multiprocessing import Process +import logging.config +from multiprocessing import Process, get_start_method from sqlalchemy.ext.automap import automap_base import sqlalchemy as s import pandas as pd from sqlalchemy import MetaData -logging.basicConfig(filename='housekeeper.log') + +from augur.logging import AugurLogging + +import warnings +warnings.filterwarnings('ignore') + +logger = logging.getLogger(__name__) class Housekeeper: - def __init__(self, jobs, broker, broker_host, broker_port, user, password, host, port, dbname): + def __init__(self, broker, augur_app): + logger.info("Booting housekeeper") - self.broker_host = broker_host - self.broker_port = broker_port + self._processes = [] + self.augur_logging = augur_app.logging + self.jobs = deepcopy(augur_app.config.get_value("Housekeeper", "jobs")) + self.broker_host = augur_app.config.get_value("Server", "host") + self.broker_port = augur_app.config.get_value("Server", "port") self.broker = broker - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - user, password, host, port, dbname - ) - - dbschema='augur_data' - self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) + self.db = augur_app.database + self.helper_db = augur_app.operations_database helper_metadata = MetaData() helper_metadata.reflect(self.helper_db, only=['worker_job']) HelperBase = automap_base(metadata=helper_metadata) HelperBase.prepare() - self.job_table = HelperBase.classes.worker_job.__table__ repoUrlSQL = s.sql.text(""" SELECT repo_git FROM repo """) - rs = pd.read_sql(repoUrlSQL, self.db, params={}) - all_repos = rs['repo_git'].values.tolist() # List of tasks that need periodic updates - self.__updatable = self.prep_jobs(jobs) + self.schedule_updates() + + def schedule_updates(self): + """ + Starts update processes + """ + self.prep_jobs() + self.augur_logging.initialize_housekeeper_logging_listener() + logger.info("Scheduling update processes") + for job in self.jobs: + process = Process(target=self.updater_process, name=job["model"], args=(self.broker_host, self.broker_port, self.broker, job, (self.augur_logging.housekeeper_job_config, self.augur_logging.get_config()))) + self._processes.append(process) + process.start() - self.__processes = [] - self.__updater() @staticmethod - def updater_process(broker_host, broker_port, broker, job): + def updater_process(broker_host, broker_port, broker, job, logging_config): """ Controls a given plugin's update process - :param name: name of object to be updated - :param delay: time needed to update - :param shared: shared object that is to also be updated + """ - + logging.config.dictConfig(logging_config[0]) + logger = logging.getLogger(f"augur.jobs.{job['model']}") + coloredlogs.install(level=logging_config[1]["log_level"], logger=logger, fmt=logging_config[1]["format_string"]) + + if logging_config[1]["quiet"]: + logger.disabled + if 'repo_group_id' in job: repo_group_id = job['repo_group_id'] - logging.info('Housekeeper spawned {} model updater process for repo group id {} with PID {}\n'.format(job['model'], repo_group_id, os.getpid())) + logger.info('Housekeeper spawned {} model updater process for repo group id {}'.format(job['model'], repo_group_id)) else: repo_group_id = None - logging.info('Housekeeper spawned {} model updater process for repo ids {} with PID {}\n'.format(job['model'], job['repo_ids'], os.getpid())) + logger.info('Housekeeper spawned {} model updater process for repo ids {}'.format(job['model'], job['repo_ids'])) try: compatible_worker_found = False @@ -76,10 +92,10 @@ def updater_process(broker_host, broker_port, broker, job): time.sleep(3) continue - logging.info("Housekeeper recognized that the broker has a worker that " + - "can handle the {} model... beginning to distribute maintained tasks\n".format(job['model'])) + logger.info("Housekeeper recognized that the broker has a worker that " + + "can handle the {} model... beginning to distribute maintained tasks".format(job['model'])) while True: - logging.info('Housekeeper updating {} model with given {}...\n'.format( + logger.info('Housekeeper updating {} model with given {}...'.format( job['model'], job['given'][0])) if job['given'][0] == 'git_url' or job['given'][0] == 'github_url': @@ -100,9 +116,9 @@ def updater_process(broker_host, broker_port, broker, job): requests.post('http://{}:{}/api/unstable/task'.format( broker_host,broker_port), json=task, timeout=10) except Exception as e: - logging.info("Error encountered: {}\n".format(e)) + logger.error("Error encountered: {}".format(e)) - logging.info(task) + logger.debug(task) time.sleep(15) @@ -119,61 +135,33 @@ def updater_process(broker_host, broker_port, broker, job): requests.post('http://{}:{}/api/unstable/task'.format( broker_host,broker_port), json=task, timeout=10) except Exception as e: - logging.info("Error encountered: {}\n".format(e)) + logger.error("Error encountered: {}".format(e)) - logging.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)\n".format(len(job['repos']))) + logger.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)".format(len(job['repos']))) time.sleep(job['delay']) - - except KeyboardInterrupt: - os.kill(os.getpid(), 9) - os._exit(0) - except: - raise - def __updater(self, jobs=None): - """ - Starts update processes - """ - logging.info("Starting update processes...") - if jobs is None: - jobs = self.__updatable - for job in jobs: - up = Process(target=self.updater_process, args=(self.broker_host, self.broker_port, self.broker, job), daemon=True) - up.start() - self.__processes.append(up) - - def update_all(self): - """ - Updates all plugins - """ - for updatable in self.__updatable: - updatable['update']() - - def schedule_updates(self): - """ - Schedules updates - """ - # don't use this, - logging.debug('Scheduling updates...') - self.__updater() + except KeyboardInterrupt as e: + pass def join_updates(self): """ Join to the update processes """ - for process in self.__processes: + for process in self._processes: + logger.debug(f"Joining {process.name} update process") process.join() def shutdown_updates(self): """ Ends all running update processes """ - for process in self.__processes: + for process in self._processes: + # logger.debug(f"Terminating {process.name} update process") process.terminate() - def prep_jobs(self, jobs): - - for job in jobs: + def prep_jobs(self): + logger.info("Preparing housekeeper jobs") + for job in self.jobs: if 'repo_group_id' in job or 'repo_ids' in job: # If RG id is 0 then it just means to query all repos where_and = 'AND' if job['model'] == 'issues' and 'repo_group_id' in job else 'WHERE' @@ -269,7 +257,7 @@ def prep_jobs(self, jobs): reorganized_repos = pd.read_sql(repo_url_sql, self.db, params={}) if len(reorganized_repos) == 0: - logging.info("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql)) + logger.warning("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql)) job['repos'] = [] continue @@ -290,7 +278,7 @@ def prep_jobs(self, jobs): 'oauth_id': 0 } result = self.helper_db.execute(self.job_table.insert().values(job_tuple)) - logging.info("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple)) + logger.debug("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple)) # If a last id is not recorded, start from beginning of repos # (first id is not necessarily 0) @@ -347,5 +335,3 @@ def prep_jobs(self, jobs): job['repos'] = rs # time.sleep(120) - return jobs - diff --git a/augur/logging.py b/augur/logging.py new file mode 100644 --- /dev/null +++ b/augur/logging.py @@ -0,0 +1,305 @@ +import logging +import logging.config +import logging.handlers +from logging import FileHandler, StreamHandler, Formatter +from multiprocessing import Process, Queue, Event, current_process +from time import sleep +import os +from pathlib import Path +import atexit +import shutil +import coloredlogs +from copy import deepcopy + +from augur import ROOT_AUGUR_DIRECTORY + +logger = logging.getLogger(__name__) + +class AugurLogging(): + + simple_format_string = "[%(process)d] %(name)s [%(levelname)s] %(message)s" + verbose_format_string = "%(asctime)s,%(msecs)dms [PID: %(process)d] %(name)s [%(levelname)s] %(message)s" + cli_format_string = "CLI: [%(module)s.%(funcName)s] [%(levelname)s] %(message)s" + config_format_string = "[%(levelname)s] %(message)s" + error_format_string = "%(asctime)s [PID: %(process)d] %(name)s [%(funcName)s() in %(filename)s:L%(lineno)d] [%(levelname)s]: %(message)s" + + @staticmethod + def get_log_directories(augur_config, reset_logfiles=True): + LOGS_DIRECTORY = augur_config.get_value("Logging", "logs_directory") + + if LOGS_DIRECTORY[0] != "/": + LOGS_DIRECTORY = ROOT_AUGUR_DIRECTORY + "/" + LOGS_DIRECTORY + + if LOGS_DIRECTORY[-1] != "/": + LOGS_DIRECTORY += "/" + + if reset_logfiles is True: + try: + shutil.rmtree(LOGS_DIRECTORY) + except FileNotFoundError as e: + pass + + Path(LOGS_DIRECTORY).mkdir(exist_ok=True) + + return LOGS_DIRECTORY + + def __init__(self, disable_logs=False, reset_logfiles=True): + self.stop_event = None + self.LOGS_DIRECTORY = None + self.WORKER_LOGS_DIRECTORY = None + self.LOG_LEVEL = None + self.VERBOSE = None + self.QUIET = None + self.DEGBUG = None + + self.logfile_config = None + self.housekeeper_job_config = None + + self._reset_logfiles = reset_logfiles + + self.formatters = { + "simple": { + "class": "logging.Formatter", + "format": AugurLogging.simple_format_string + }, + "verbose": { + "class": "logging.Formatter", + "format": AugurLogging.verbose_format_string + }, + "cli": { + "class": "logging.Formatter", + "format": AugurLogging.cli_format_string + }, + "config": { + "class": "logging.Formatter", + "format": AugurLogging.config_format_string + }, + "error": { + "class": "logging.Formatter", + "format": AugurLogging.error_format_string + } + } + + self._configure_cli_logger() + + level = logging.INFO + config_handler = StreamHandler() + config_handler.setFormatter(Formatter(fmt=AugurLogging.config_format_string)) + config_handler.setLevel(level) + + config_initialization_logger = logging.getLogger("augur.config") + config_initialization_logger.setLevel(level) + config_initialization_logger.handlers = [] + config_initialization_logger.addHandler(config_handler) + config_initialization_logger.propagate = False + + coloredlogs.install(level=level, logger=config_initialization_logger, fmt=AugurLogging.config_format_string) + + if disable_logs: + self._disable_all_logging() + + + def _disable_all_logging(self): + for logger in ["augur", "augur.application", "augur.housekeeper", "augur.config", "augur.cli", "root"]: + lg = logging.getLogger(logger) + lg.disabled = True + + def _configure_cli_logger(self): + cli_handler = StreamHandler() + cli_handler.setLevel(logging.INFO) + + cli_logger = logging.getLogger("augur.cli") + cli_logger.setLevel(logging.INFO) + cli_logger.handlers = [] + cli_logger.addHandler(cli_handler) + cli_logger.propagate = False + + coloredlogs.install(level=logging.INFO, logger=cli_logger, fmt=AugurLogging.cli_format_string) + + def _set_config(self, augur_config): + self.LOGS_DIRECTORY = AugurLogging.get_log_directories(augur_config, self._reset_logfiles) + self.LOG_LEVEL = augur_config.get_value("Logging", "log_level") + self.QUIET = int(augur_config.get_value("Logging", "quiet")) + self.DEBUG = int(augur_config.get_value("Logging", "debug")) + self.VERBOSE = int(augur_config.get_value("Logging", "verbose")) + # self.JOB_NAMES = [job["model"] for job in deepcopy(augur_config.get_value("Housekeeper", "jobs"))] + + if self.QUIET: + self._disable_all_logging() + + if self.DEBUG: + self.LOG_LEVEL = "DEBUG" + self.VERBOSE = True + + if self.VERBOSE: + self.FORMATTER = "verbose" + else: + self.FORMATTER = "simple" + self.format_string = self.formatters[self.FORMATTER]["format"] + + def configure_logging(self, augur_config): + self._set_config(augur_config) + self._configure_logfiles() + self._configure_cli_logger() + self._configure_gunicorn_logging() + logger.debug("Loggers are fully configured") + + def _configure_logfiles(self): + self.logfile_config = { + "version": 1, + "disable_existing_loggers": True, + "formatters": self.formatters, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "formatter": self.FORMATTER, + "level": self.LOG_LEVEL + }, + "logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "augur.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "errorfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "augur.err", + "mode": "a", + "level": logging.WARNING, + "formatter": "error" + }, + "server_logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "gunicorn.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "housekeeper_logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "housekeeper.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "housekeeper_errorfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "housekeeper.err", + "mode": "a", + "level": logging.WARNING, + "formatter": "error", + }, + }, + "loggers": { + "augur": { + "handlers": ["console", "logfile", "errorfile"], + "level": self.LOG_LEVEL + }, + "augur.server": { + "handlers": ["server_logfile"], + "level": self.LOG_LEVEL, + "propagate": False + }, + "augur.housekeeper": { + "handlers": ["housekeeper_logfile", "housekeeper_errorfile"], + "level": self.LOG_LEVEL, + }, + "augur.jobs": { + "handlers": ["housekeeper_logfile", "housekeeper_errorfile", "logfile", "errorfile"], + "level": self.LOG_LEVEL, + "propagate": False + } + }, + "root": { + "handlers": [], + "level": self.LOG_LEVEL + } + } + + logging.config.dictConfig(self.logfile_config) + for logger_name in ["augur", "augur.housekeeper", "augur.jobs"]: + coloredlogs.install(logger=logging.getLogger(logger_name), level=self.LOG_LEVEL, fmt=self.format_string) + + logger.debug("Logfiles initialized") + logger.debug("Logs will be written to: " + self.LOGS_DIRECTORY) + + def initialize_housekeeper_logging_listener(self): + queue = Queue() + self.housekeeper_job_config = { + "version": 1, + "disable_existing_loggers": True, + "formatters": self.formatters, + "handlers": { + "queue": { + "class": "logging.handlers.QueueHandler", + "queue": queue + } + }, + "root": { + "handlers": ["queue"], + "level": self.LOG_LEVEL + } + } + + stop_event = Event() + self.lp = Process(target=logging_listener_process, name='housekeeper_logging_listener', + args=(queue, stop_event, self.logfile_config)) + self.lp.start() + sleep(2) # just to let it fully start up + self.stop_event = stop_event + logger.debug("Houseekeeper logging listener initialized") + + def get_config(self): + return { + "log_level": self.LOG_LEVEL, + "quiet": self.QUIET, + "verbose": self.VERBOSE, + "debug": self.DEBUG, + "format_string": self.format_string + } + + def _configure_gunicorn_logging(self): + gunicorn_log_file = self.LOGS_DIRECTORY + "gunicorn.log" + self.gunicorn_logging_options = { + "errorlog": gunicorn_log_file, + "accesslog": gunicorn_log_file, + "loglevel": self.LOG_LEVEL, + "capture_output": False + } + +def logging_listener_process(queue, stop_event, config): + """ + This could be done in the main process, but is just done in a separate + process for illustrative purposes. + + This initialises logging according to the specified configuration, + starts the listener and waits for the main process to signal completion + via the event. The listener is then stopped, and the process exits. + """ + logging.config.dictConfig(config) + listener = logging.handlers.QueueListener(queue, AugurLoggingHandler()) + listener.start() + try: + stop_event.wait() + except KeyboardInterrupt: + pass + finally: + listener.stop() + +class AugurLoggingHandler: + """ + A simple handler for logging events. It runs in the listener process and + dispatches events to loggers based on the name in the received record, + which then get dispatched, by the logging system, to the handlers + configured for those loggers. + """ + + def handle(self, record): + if record.name == "root": + logger = logging.getLogger() + else: + logger = logging.getLogger(record.name) + + record.processName = '%s (for %s)' % (current_process().name, record.processName) + logger.handle(record) diff --git a/augur/metrics/__init__.py b/augur/metrics/__init__.py --- a/augur/metrics/__init__.py +++ b/augur/metrics/__init__.py @@ -1 +1,38 @@ -from .metrics import Metrics \ No newline at end of file +import os +import glob +import sys +import inspect +import types +import importlib +import logging + +logger = logging.getLogger(__name__) + +class Metrics(): + def __init__(self, app): + logger.debug("Loading metrics") + self.database = app.database + self.spdx_db = app.spdx_database + + self.models = [] #TODO: standardize this + for filename in glob.iglob("augur/metrics/**"): + file_id = get_file_id(filename) + if not file_id.startswith('__') and filename.endswith('.py') and file_id != "metrics": + self.models.append(file_id) + + for model in self.models: + importlib.import_module(f"augur.metrics.{model}") + add_metrics(self, f"augur.metrics.{model}") + +def get_file_id(path): + return os.path.splitext(os.path.basename(path))[0] + +def add_metrics(metrics, module_name): + # find all unbound endpoint functions objects + # (ones that have metadata) defined the given module_name + # and bind them to the metrics class + for name, obj in inspect.getmembers(sys.modules[module_name]): + if inspect.isfunction(obj) == True: + if hasattr(obj, 'is_metric') == True: + setattr(metrics, name, types.MethodType(obj, metrics)) + diff --git a/augur/metrics/insight.py b/augur/metrics/insight.py --- a/augur/metrics/insight.py +++ b/augur/metrics/insight.py @@ -6,8 +6,7 @@ import pandas as pd from augur.util import register_metric - -@register_metric() +@register_metric(type="repo_group_only") def top_insights(self, repo_group_id, num_repos=6): """ Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date) diff --git a/augur/metrics/metrics.py b/augur/metrics/metrics.py deleted file mode 100644 --- a/augur/metrics/metrics.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import glob -import sys -import inspect -import types -import importlib -from augur import logger - -class Metrics(): - def __init__(self, app): - self.database = app.database - self.spdx_db = app.spdx_db - - models = [] #TODO: standardize this - for filename in glob.iglob("augur/metrics/**"): - file_id = get_file_id(filename) - if not file_id.startswith('__') and filename.endswith('.py') and file_id != "metrics": - models.append(file_id) - - for model in models: - importlib.import_module(f"augur.metrics.{model}") - - for model in models: - add_metrics(self, f"augur.metrics.{model}") - -def get_file_id(path): - return os.path.splitext(os.path.basename(path))[0] - -def add_metrics(metrics, module_name): - # find all unbound endpoint functions objects - # (ones that have metadata) defined the given module_name - # and bind them to the metrics class - # Derek are you proud of me - for name, obj in inspect.getmembers(sys.modules[module_name]): - if inspect.isfunction(obj) == True: - if hasattr(obj, 'metadata') == True: - setattr(metrics, name, types.MethodType(obj, metrics)) - diff --git a/augur/metrics/release.py b/augur/metrics/release.py new file mode 100644 --- /dev/null +++ b/augur/metrics/release.py @@ -0,0 +1,88 @@ +""" +Metrics that provide data about releases +""" + +import datetime +import sqlalchemy as s +import pandas as pd +from augur.util import register_metric + +@register_metric() +def releases(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): + """ Returns a timeseris of new reviews or pull requests opened + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of new releases/period + """ + if not begin_date: + begin_date = '1970-1-1' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + if not repo_id: + reviews_SQL = s.sql.text(""" + SELECT + res.repo_name, + res.release_id, + res.release_name, + res.release_description, + res.release_author, + res.release_created_at, + res.release_published_at, + res.release_updated_at, + res.release_is_draft, + res.release_is_prerelease, + res.release_tag_name, + res.release_url, + COUNT(res) + FROM ( + SELECT + releases.* + repo.repo_name + FROM + releases LEFT JOIN repo ON releases.repo_id = repo.repo_id + WHERE + repo.repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id ) + ) as res + GROUP BY releases.repo_id, releases.release_id + ORDER BY releases.release_published_at DESC + """) + + results = pd.read_sql(reviews_SQL, self.database, + params={'period': period, 'repo_group_id': repo_group_id, + 'begin_date': begin_date, 'end_date': end_date }) + return results + + else: + reviews_SQL = s.sql.text(""" + SELECT + repo.repo_name, + releases.release_id, + releases.release_name, + releases.release_description, + releases.release_author, + releases.release_created_at, + releases.release_published_at, + releases.release_updated_at, + releases.release_is_draft, + releases.release_is_prerelease, + releases.release_tag_name, + releases.release_url, + COUNT(releases) + FROM + releases LEFT JOIN repo ON releases.repo_id = repo.repo_id + GROUP BY repo.repo_id, releases.release_id + ORDER BY releases.release_published_at DESC + """) + + results = pd.read_sql(reviews_SQL, self.database, + params={'period': period, 'repo_id': repo_id, + 'begin_date': begin_date, 'end_date': end_date}) + return results + +def create_release_metrics(metrics): + add_metrics(metrics, __name__) \ No newline at end of file diff --git a/augur/metrics/repo_meta.py b/augur/metrics/repo_meta.py --- a/augur/metrics/repo_meta.py +++ b/augur/metrics/repo_meta.py @@ -5,9 +5,12 @@ import datetime import sqlalchemy as s import pandas as pd -from augur import logger -from augur.util import register_metric import math +import logging + +from augur.util import register_metric + +logger = logging.getLogger("augur") @register_metric() def code_changes(self, repo_group_id, repo_id=None, period='week', begin_date=None, end_date=None): @@ -321,7 +324,7 @@ def languages(self, repo_group_id, repo_id=None): results = pd.read_sql(languages_SQL, self.database, params={'repo_id': repo_id}) return results -@register_metric() +@register_metric(type="license") def license_files(self, license_id, spdx_binary, repo_group_id, repo_id=None,): """Returns the files related to a license diff --git a/augur/models/__init__.py b/augur/models/__init__.py deleted file mode 100644 --- a/augur/models/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from sqlalchemy.orm import sessionmaker -from .user import User -from .repo import Repo, RepoGroup - - -__all__ = ['User', 'RepoGroup', 'Repo'] \ No newline at end of file diff --git a/augur/models/common.py b/augur/models/common.py deleted file mode 100644 --- a/augur/models/common.py +++ /dev/null @@ -1,2 +0,0 @@ -from sqlalchemy.ext.declarative import declarative_base -Base = declarative_base() \ No newline at end of file diff --git a/augur/models/repo.py b/augur/models/repo.py deleted file mode 100644 --- a/augur/models/repo.py +++ /dev/null @@ -1,48 +0,0 @@ -import datetime -from sqlalchemy import Table, ForeignKey, Column, Integer, String, DateTime -from sqlalchemy.orm import relationship -from .common import Base -from .user import user_has_repo_group - -repo_group_has_project = Table('repo_group_has_project', - Base.metadata, - Column('repo_group_id', ForeignKey('repo_group.id'), primary_key=True), - Column('repo_id', ForeignKey('repo.url'), primary_key=True), -) - -class Repo(Base): - """ - The Repo object models a VCS repository - """ - __tablename__ = 'repo' - - # Keys - url = Column(String(1024), primary_key=True) - vcs = Column(String(64), default='git') - - # Fields - created_at = Column(DateTime, default=datetime.datetime.utcnow) - - # Foreign Keys - repo_groups_member_of = relationship('RepoGroup', secondary=repo_group_has_project, back_populates='projects') - - def __repr__(self): - return f"<Repo(giturl='{self.password}')>" - - -class RepoGroup(Base): - """ - The RepoGroup class models lists of projects that a user wants to keep track of - """ - __tablename__ = 'repo_group' - - # Keys - id = Column(Integer, primary_key=True) - name = Column(String(128)) - - # Fields - created_at = Column(DateTime, default=datetime.datetime.utcnow) - - # Foreign Keys - projects = relationship('Repo', secondary=repo_group_has_project, back_populates='repo_groups_member_of') - users_of = relationship('User', secondary=user_has_repo_group, back_populates='repo_groups') \ No newline at end of file diff --git a/augur/models/user.py b/augur/models/user.py deleted file mode 100644 --- a/augur/models/user.py +++ /dev/null @@ -1,61 +0,0 @@ -import datetime -from sqlalchemy import Table, ForeignKey, Column, Integer, String, DateTime, Boolean -from sqlalchemy.orm import relationship -from sqlalchemy.ext.hybrid import hybrid_property -from .common import Base -from werkzeug.security import generate_password_hash, check_password_hash -from flask_login import UserMixin - -user_has_repo_group = Table('user_has_repo_group', - Base.metadata, - Column('user_id', ForeignKey('user.id'), primary_key=True), - Column('repo_group_id', ForeignKey('repo_group.id'), primary_key=True), -) - -class User(Base): - """ - The User object models users in the database. - """ - __tablename__ = 'user' - - # Keys - id = Column(Integer, primary_key=True) - username = Column(String(64), unique=True, nullable=False) - email = Column(String(64), unique=True, nullable=False) - - # Fields - password_hash = Column(String(128)) - email_confirmation_token = Column(String(128), nullable=True) - created_at = Column(DateTime, default=datetime.datetime.utcnow) - password_updated_at = Column(DateTime, default=datetime.datetime.utcnow) - last_login_at = Column(DateTime, nullable=True) - authenticated = Column(Boolean, default=False) - active = Column(Boolean, default=True) - administrator = Column(Boolean, default=False) - - # Foreign Keys - repo_groups = relationship('RepoGroup', secondary=user_has_repo_group, back_populates='users_of') - - def get_id(self): - return self.id - - def __repr__(self): - return f"<User(username='{self.username}', email='{self.email}')>" - - @hybrid_property - def password(self): - return self.password_hash - - @password.setter - def password(self, password): - self.password_hash = generate_password_hash(password) - - def check_password(self, password): - return check_password_hash(self.password_hash, password) - - def is_authenticated(self): - return self.authenticated - - def is_active(self): - # False as we do not support annonymity - return self.active diff --git a/augur/routes/__init__.py b/augur/routes/__init__.py --- a/augur/routes/__init__.py +++ b/augur/routes/__init__.py @@ -1,35 +1,34 @@ + +import logging import importlib import os import glob +import sys +import inspect -from augur import logger +logger = logging.getLogger(__name__) def get_route_files(): route_files = [] - metric_route_files = [] def get_file_id(path): return os.path.splitext(os.path.basename(path))[0] - for filename in glob.iglob("**/routes/*"): + for filename in glob.iglob("augur/routes/*"): file_id = get_file_id(filename) if not file_id.startswith('__') and filename.endswith('.py'): route_files.append(file_id) - for filename in glob.iglob("**/routes/metrics/*"): - file_id = get_file_id(filename) - if not file_id.startswith('__') and filename.endswith('.py'): - metric_route_files.append(file_id) - - return route_files, metric_route_files + return route_files -route_files, metric_route_files = get_route_files() +route_files = get_route_files() def create_routes(server): for route_file in route_files: module = importlib.import_module('.' + route_file, 'augur.routes') module.create_routes(server) - for route_file in metric_route_files: - module = importlib.import_module('.' + route_file, 'augur.routes.metrics') - module.create_routes(server) + for name, obj in inspect.getmembers(server.augur_app.metrics): + if hasattr(obj, 'is_metric') == True: + if obj.metadata['type'] == "standard": + server.add_standard_metric(obj, obj.metadata['endpoint']) diff --git a/augur/routes/batch.py b/augur/routes/batch.py --- a/augur/routes/batch.py +++ b/augur/routes/batch.py @@ -10,9 +10,10 @@ from sqlalchemy import exc from flask import request, Response from augur.util import metric_metadata -from augur import logger import json +logger = logging.getLogger(__name__) + def create_routes(server): @server.app.route('/{}/batch'.format(server.api_version), methods=['GET', 'POST']) diff --git a/augur/routes/broker.py b/augur/routes/broker.py --- a/augur/routes/broker.py +++ b/augur/routes/broker.py @@ -9,6 +9,9 @@ import requests from flask import request, Response +logger = logging.getLogger(__name__) + +# TODO: not this... def worker_start(worker_name=None): process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True) @@ -26,12 +29,12 @@ def send_task(worker_proxy): j = r.json() if 'status' not in j: - logging.info("Worker: {}'s heartbeat did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) + logger.error("Worker: {}'s heartbeat did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) worker_proxy['status'] = 'Disconnected' return if j['status'] != 'alive': - logging.info("Worker: {} is busy, setting its status as so.\n".format(worker_id)) + logger.info("Worker: {} is busy, setting its status as so.\n".format(worker_id)) return # Want to check user-created job requests first @@ -43,16 +46,16 @@ def send_task(worker_proxy): new_task = maintain_queue.pop(0) else: - logging.info("Both queues are empty for worker {}\n".format(worker_id)) + logger.debug("Both queues are empty for worker {}\n".format(worker_id)) worker_proxy['status'] = 'Idle' return - logging.info("Worker {} is idle, preparing to send the {} task to {}\n".format(worker_id, new_task['display_name'], task_endpoint)) + logger.info("Worker {} is idle, preparing to send the {} task to {}\n".format(worker_id, new_task['display_name'], task_endpoint)) try: requests.post(task_endpoint, json=new_task) worker_proxy['status'] = 'Working' except: - logging.info("Sending Worker: {} a task did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) + logger.error("Sending Worker: {} a task did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) worker_proxy['status'] = 'Disconnected' # If the worker died, then restart it worker_start(worker_id.split('.')[len(worker_id.split('.')) - 2]) @@ -71,9 +74,9 @@ def task(): for given_component in list(task['given'].keys()): given.append(given_component) model = task['models'][0] - logging.info("Broker recieved a new user task ... checking for compatible workers for given: " + str(given) + " and model(s): " + str(model) + "\n") + logger.info("Broker recieved a new user task ... checking for compatible workers for given: " + str(given) + " and model(s): " + str(model) + "\n") - logging.info("Broker's list of all workers: {}\n".format(server.broker._getvalue().keys())) + logger.debug("Broker's list of all workers: {}\n".format(server.broker._getvalue().keys())) worker_found = False compatible_workers = {} @@ -83,7 +86,7 @@ def task(): if type(server.broker[worker_id]._getvalue()) != dict: continue - logging.info("Considering compatible worker: {}\n".format(worker_id)) + logger.info("Considering compatible worker: {}\n".format(worker_id)) # Group workers by type (all gh workers grouped together etc) worker_type = worker_id.split('.')[len(worker_id.split('.'))-2] @@ -91,28 +94,28 @@ def task(): # Make worker that is prioritized the one with the smallest sum of task queues if (len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue'])) < min([compatible_workers[w]['task_load'] for w in compatible_workers.keys() if worker_type == w]): - logging.info("Worker id: {} has the smallest task load encountered so far: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']))) + logger.info("Worker id: {} has the smallest task load encountered so far: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']))) compatible_workers[worker_type]['task_load'] = len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']) compatible_workers[worker_type]['worker_id'] = worker_id for worker_type in compatible_workers.keys(): worker_id = compatible_workers[worker_type]['worker_id'] worker = server.broker[worker_id] - logging.info("Final compatible worker chosen: {} with smallest task load: {} found to work on task: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']), task)) + logger.info("Final compatible worker chosen: {} with smallest task load: {} found to work on task: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']), task)) if task['job_type'] == "UPDATE": worker['user_queue'].append(task) - logging.info("Added task for model: {}. New length of worker {}'s user queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['user_queue'])))) + logger.info("Added task for model: {}. New length of worker {}'s user queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['user_queue'])))) elif task['job_type'] == "MAINTAIN": worker['maintain_queue'].append(task) - logging.info("Added task for model: {}. New length of worker {}'s maintain queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['maintain_queue'])))) + logger.info("Added task for model: {}. New length of worker {}'s maintain queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['maintain_queue'])))) if worker['status'] == 'Idle': send_task(worker) worker_found = True # Otherwise, let the frontend know that the request can't be served if not worker_found: - logging.info("Augur does not have knowledge of any workers that are capable of handing the request: {}\n".format(task)) + logger.warning("Augur does not have knowledge of any workers that are capable of handing the request: {}\n".format(task)) return Response(response=task, status=200, @@ -124,7 +127,7 @@ def worker(): and telling the broker to add this worker to the set it maintains """ worker = request.json - logging.info("Recieved HELLO message from worker: {}\n".format(worker['id'])) + logger.info("Recieved HELLO message from worker: {}\n".format(worker['id'])) if worker['id'] not in server.broker: server.broker[worker['id']] = server.manager.dict() server.broker[worker['id']]['id'] = worker['id'] @@ -139,7 +142,7 @@ def worker(): server.broker[worker['id']]['status'] = 'Idle' server.broker[worker['id']]['location'] = worker['location'] else: - logging.info("Worker: {} has been reconnected.\n".format(worker['id'])) + logger.info("Worker: {} has been reconnected.\n".format(worker['id'])) models = server.broker[worker['id']]['models'] givens = server.broker[worker['id']]['given'] user_queue = server.broker[worker['id']]['user_queue'] @@ -157,7 +160,7 @@ def worker(): def sync_queue(): task = request.json worker = task['worker_id'] - logging.info("Message recieved that worker {} completed task: {}\n".format(worker,task)) + logger.info("Message recieved that worker {} completed task: {}\n".format(worker,task)) try: models = server.broker[worker]['models'] givens = server.broker[worker]['given'] @@ -167,8 +170,8 @@ def sync_queue(): if server.broker[worker]['status'] != 'Disconnected': send_task(server.broker[worker]) except Exception as e: - logging.info("Ran into error: {}\n".format(repr(e))) - logging.info("A past instance of the {} worker finished a previous leftover task.\n".format(worker)) + logger.error("Ran into error: {}\n".format(repr(e))) + logger.error("A past instance of the {} worker finished a previous leftover task.\n".format(worker)) return Response(response=task, status=200, @@ -190,7 +193,7 @@ def get_status(): @server.app.route('/{}/workers/remove'.format(server.api_version), methods=['POST']) def remove_worker(): worker = request.json - logging.info("Recieved a message to disconnect worker: {}\n".format(worker)) + logger.info("Recieved a message to disconnect worker: {}\n".format(worker)) server.broker[worker['id']]['status'] = 'Disconnected' return Response(response=worker, status=200, @@ -200,13 +203,13 @@ def remove_worker(): def task_error(): task = request.json worker_id = task['worker_id'] - logging.info("Recieved a message that {} ran into an error on task: {}\n".format(worker_id, task)) + logger.error("Recieved a message that {} ran into an error on task: {}\n".format(worker_id, task)) if worker_id in server.broker: if server.broker[worker_id]['status'] != 'Disconnected': - logging.info("{} ran into error while completing task: {}\n".format(worker_id, task)) + logger.error("{} ran into error while completing task: {}\n".format(worker_id, task)) send_task(server.broker[worker_id]) else: - logging.info("A previous instance of {} ran into error while completing task: {}\n".format(worker_id, task)) + logger.error("A previous instance of {} ran into error while completing task: {}\n".format(worker_id, task)) return Response(response=request.json, status=200, mimetype="application/json") \ No newline at end of file diff --git a/augur/routes/manager.py b/augur/routes/manager.py --- a/augur/routes/manager.py +++ b/augur/routes/manager.py @@ -285,15 +285,15 @@ def get_repo_name(self): repo = self.name return repo[repo.find('/')+1:] -def authenticate_request(app, request): +def authenticate_request(augur_app, request): # do I like doing it like this? not at all # do I have the time to implement a better solution right now? not at all - user = app.read_config('Database', 'user') - password = app.read_config('Database', 'password') - host = app.read_config('Database', 'host') - port = app.read_config('Database', 'port') - dbname = app.read_config('Database', 'name') + user = augur_app.config.get_value('Database', 'user') + password = augur_app.config.get_value('Database', 'password') + host = augur_app.config.get_value('Database', 'host') + port = augur_app.config.get_value('Database', 'port') + dbname = augur_app.config.get_value('Database', 'name') DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( user, password, host, port, dbname diff --git a/augur/routes/metrics/commit.py b/augur/routes/metrics/commit.py deleted file mode 100644 --- a/augur/routes/metrics/commit.py +++ /dev/null @@ -1,8 +0,0 @@ -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.annual_commit_count_ranked_by_new_repo_in_repo_group,'annual-commit-count-ranked-by-new-repo-in-repo-group') - - server.add_standard_metric(metrics.annual_commit_count_ranked_by_repo_in_repo_group,'annual-commit-count-ranked-by-repo-in-repo-group') - diff --git a/augur/routes/metrics/contributor.py b/augur/routes/metrics/contributor.py deleted file mode 100644 --- a/augur/routes/metrics/contributor.py +++ /dev/null @@ -1,17 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.contributors, 'contributors') - - server.add_standard_metric(metrics.contributors_new, 'contributors-new') - - server.add_standard_metric(metrics.committers, 'committers') - - server.add_standard_metric(metrics.lines_changed_by_author,'lines-changed-by-author') - - server.add_standard_metric(metrics.top_committers, 'top-committers') - - server.add_standard_metric(metrics.contributors_code_development, 'contributors-code-development') \ No newline at end of file diff --git a/augur/routes/metrics/experimental.py b/augur/routes/metrics/experimental.py deleted file mode 100644 --- a/augur/routes/metrics/experimental.py +++ /dev/null @@ -1,6 +0,0 @@ - -def create_routes(server): - - metrics = server.augur_app.metrics - - diff --git a/augur/routes/metrics/insight.py b/augur/routes/metrics/insight.py deleted file mode 100644 --- a/augur/routes/metrics/insight.py +++ /dev/null @@ -1,13 +0,0 @@ -#SPDX-License-Identifier: MIT -from flask import Response - -def create_routes(server): - - metrics = server.augur_app.metrics - - @server.app.route(f"/{server.api_version}/repo-groups/<repo_group_id>/top-insights") - def top_insights(repo_group_id): - data = server.transform(metrics.top_insights, args=[repo_group_id]) - return Response(response=data, - status=200, - mimetype="application/json") diff --git a/augur/routes/metrics/issue.py b/augur/routes/metrics/issue.py deleted file mode 100644 --- a/augur/routes/metrics/issue.py +++ /dev/null @@ -1,39 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.issues_new, 'issues-new') - - server.add_standard_metric(metrics.issues_active, 'issues-active') - - server.add_standard_metric(metrics.issues_closed, 'issues-closed') - - server.add_standard_metric(metrics.issue_duration, 'issue-duration') - - server.add_standard_metric(metrics.issue_participants, 'issue-participants') - - server.add_standard_metric(metrics.issue_backlog, 'issue-backlog') - - server.add_standard_metric(metrics.issue_throughput, 'issue-throughput') - - server.add_standard_metric(metrics.issues_first_time_opened, 'issues-first-time-opened') - - server.add_standard_metric(metrics.issues_first_time_closed, 'issues-first-time-closed') - - server.add_standard_metric(metrics.open_issues_count, 'open-issues-count') - - server.add_standard_metric(metrics.closed_issues_count, 'closed-issues-count') - - server.add_standard_metric(metrics.issues_open_age, 'issues-open-age') - - server.add_standard_metric(metrics.issues_closed_resolution_duration, 'issues-closed-resolution-duration') - - server.add_standard_metric(metrics.issues_maintainer_response_duration, 'issues-maintainer-response-duration') - - server.add_standard_metric(metrics.average_issue_resolution_time, 'average-issue-resolution-time') - - server.add_standard_metric(metrics.issue_comments_mean, 'issue-comments-mean') - - server.add_standard_metric(metrics.issue_comments_mean_std, 'issue-comments-mean-std') diff --git a/augur/routes/metrics/message.py b/augur/routes/metrics/message.py deleted file mode 100644 --- a/augur/routes/metrics/message.py +++ /dev/null @@ -1,6 +0,0 @@ - -def create_routes(server): - - metrics = server.augur_app.metrics - - diff --git a/augur/routes/metrics/platform.py b/augur/routes/metrics/platform.py deleted file mode 100644 --- a/augur/routes/metrics/platform.py +++ /dev/null @@ -1,4 +0,0 @@ - -def create_routes(server): - metrics = server.augur_app.metrics - diff --git a/augur/routes/metrics/pull_request.py b/augur/routes/metrics/pull_request.py deleted file mode 100644 --- a/augur/routes/metrics/pull_request.py +++ /dev/null @@ -1,31 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.reviews, 'reviews') - - server.add_standard_metric(metrics.reviews_accepted, 'reviews-accepted') - - server.add_standard_metric(metrics.reviews_declined, 'reviews-declined') - - server.add_standard_metric(metrics.review_duration, 'review-duration') - - server.add_standard_metric(metrics.pull_requests_merge_contributor_new, 'pull-requests-merge-contributor-new') - - server.add_standard_metric(metrics.pull_request_acceptance_rate, 'pull-request-acceptance-rate') - - server.add_standard_metric(metrics.pull_requests_closed_no_merge, 'pull-requests-closed-no-merge') - - server.add_standard_metric(metrics.pull_request_merged_status_counts, 'pull-request-merged-status-counts') - - server.add_standard_metric(metrics.pull_request_average_time_to_close, 'pull-request-average-time-to-close') - - server.add_standard_metric(metrics.pull_request_average_time_between_responses, 'pull-request-average-time-between-responses') - - server.add_standard_metric(metrics.pull_request_average_commit_counts, 'pull-request-average-commit-counts') - - server.add_standard_metric(metrics.pull_request_average_event_counts, 'pull-request-average-event-counts') - - server.add_standard_metric(metrics.pull_request_average_time_to_responses_and_close, 'pull-request-average-time-to-responses-and-close') diff --git a/augur/routes/metrics/repo_meta.py b/augur/routes/metrics/repo_meta.py deleted file mode 100644 --- a/augur/routes/metrics/repo_meta.py +++ /dev/null @@ -1,54 +0,0 @@ -#SPDX-License-Identifier: MIT -from flask import Response - -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.code_changes, 'code-changes') - - server.add_standard_metric(metrics.code_changes_lines, 'code-changes-lines') - - @server.app.route(f"/{server.api_version}/<license_id>/<spdx_binary>/<repo_group_id>/<repo_id>/license-files") - def get_license_files(license_id, spdx_binary, repo_group_id, repo_id): - arguments = [license_id, spdx_binary, repo_group_id, repo_id] - license_files = server.transform(metrics.license_files, args=arguments) - return Response(response=license_files, - status=200, - mimetype="application/json") - - server.add_standard_metric(metrics.sbom_download, 'sbom-download') - - server.add_standard_metric(metrics.sub_projects, 'sub-projects') - - server.add_standard_metric(metrics.cii_best_practices_badge, 'cii-best-practices-badge') - - server.add_standard_metric(metrics.forks, 'forks') - - server.add_standard_metric(metrics.fork_count, 'fork-count') - - server.add_standard_metric(metrics.languages, 'languages') - - server.add_standard_metric(metrics.license_count, 'license-count') - - server.add_standard_metric(metrics.license_coverage, 'license-coverage') - - server.add_standard_metric(metrics.license_declared, 'license-declared') - - server.add_standard_metric(metrics.stars, 'stars') - - server.add_standard_metric(metrics.stars_count, 'stars-count') - - server.add_standard_metric(metrics.watchers, 'watchers') - - server.add_standard_metric(metrics.watchers_count, 'watchers-count') - - server.add_standard_metric(metrics.annual_lines_of_code_count_ranked_by_new_repo_in_repo_group,'annual-lines-of-code-count-ranked-by-new-repo-in-repo-group') - - server.add_standard_metric(metrics.annual_lines_of_code_count_ranked_by_repo_in_repo_group,'annual-lines-of-code-count-ranked-by-repo-in-repo-group') - - server.add_standard_metric(metrics.lines_of_code_commit_counts_by_calendar_year_grouped,'lines-of-code-commit-counts-by-calendar-year-grouped') - - server.add_standard_metric(metrics.average_weekly_commits, 'average-weekly-commits') - - server.add_standard_metric(metrics.aggregate_summary, 'aggregate-summary') diff --git a/augur/routes/nonstandard_metrics.py b/augur/routes/nonstandard_metrics.py new file mode 100644 --- /dev/null +++ b/augur/routes/nonstandard_metrics.py @@ -0,0 +1,24 @@ +import base64 +import sqlalchemy as s +import pandas as pd +import json +from flask import Response + +def create_routes(server): + + metrics = server.augur_app.metrics + + @server.app.route(f"/{server.api_version}/<license_id>/<spdx_binary>/<repo_group_id>/<repo_id>/license-files") + def get_license_files(license_id, spdx_binary, repo_group_id, repo_id): + arguments = [license_id, spdx_binary, repo_group_id, repo_id] + license_files = server.transform(metrics.license_files, args=arguments) + return Response(response=license_files, + status=200, + mimetype="application/json") + + @server.app.route(f"/{server.api_version}/repo-groups/<repo_group_id>/top-insights") + def top_insights(repo_group_id): + data = server.transform(metrics.top_insights, args=[repo_group_id]) + return Response(response=data, + status=200, + mimetype="application/json") \ No newline at end of file diff --git a/augur/routes/util.py b/augur/routes/util.py --- a/augur/routes/util.py +++ b/augur/routes/util.py @@ -6,8 +6,6 @@ def create_routes(server): - metrics = server.augur_app.metrics - @server.app.route('/{}/repo-groups'.format(server.api_version)) def get_all_repo_groups(): #TODO: make this name automatic - wrapper? repoGroupsSQL = s.sql.text(""" @@ -202,7 +200,7 @@ def get_issues(repo_group_id, repo_id=None): @server.app.route('/{}/api-port'.format(server.api_version)) def api_port(): - response = {'port': server.augur_app.read_config('Server', 'port')} + response = {'port': server.augur_app.config.get_value('Server', 'port')} return Response(response=json.dumps(response), status=200, mimetype="application/json") diff --git a/augur/server.py b/augur/server.py --- a/augur/server.py +++ b/augur/server.py @@ -3,51 +3,50 @@ Creates a WSGI server that serves the Augur REST API """ +import glob +import sys +import inspect +import types import json import os import base64 +import logging + from flask import Flask, request, Response, redirect from flask_cors import CORS import pandas as pd + import augur -from augur.util import logger from augur.routes import create_routes AUGUR_API_VERSION = 'api/unstable' -class VueCompatibleFlask(Flask): - jinja_options = Flask.jinja_options.copy() - jinja_options.update(dict( - block_start_string='(%', - block_end_string='%)', - variable_start_string='%%', - variable_end_string='%%', - comment_start_string='(#', - comment_end_string='#)', - )) - +logger = logging.getLogger(__name__) class Server(object): """ Defines Augur's server's behavior """ - def __init__(self, frontend_folder='../frontend/public', manager=None, broker=None, housekeeper=None): + def __init__(self, augur_app=None): """ Initializes the server, creating both the Flask application and Augur application """ # Create Flask application - self.app = VueCompatibleFlask(__name__, static_folder=frontend_folder, template_folder=frontend_folder) + self.app = Flask(__name__) + logger.debug("Created Flask app") self.api_version = AUGUR_API_VERSION app = self.app CORS(app) app.url_map.strict_slashes = False - # Create Augur application - self.augur_app = augur.Application() + self.augur_app = augur_app + self.manager = augur_app.manager + self.broker = augur_app.broker + self.housekeeper = augur_app.housekeeper # Initialize cache - expire = int(self.augur_app.read_config('Server', 'cache_expire')) + expire = int(self.augur_app.config.get_value('Server', 'cache_expire')) self.cache = self.augur_app.cache.get_cache('server', expire=expire) self.cache.clear() @@ -55,10 +54,7 @@ def __init__(self, frontend_folder='../frontend/public', manager=None, broker=No self.show_metadata = False - self.manager = manager - self.broker = broker - self.housekeeper = housekeeper - + logger.debug("Creating API routes...") create_routes(self) ##################################### @@ -184,40 +180,3 @@ def add_standard_metric(self, function, endpoint, **kwargs): self.app.route(repo_endpoint)(self.routify(function, 'repo')) self.app.route(repo_group_endpoint)(self.routify(function, 'repo_group')) self.app.route(deprecated_repo_endpoint )(self.routify(function, 'deprecated_repo')) - -def run(): - """ - Runs server with configured hosts/ports - """ - server = Server() - host = server.augur_app.read_config('Server', 'host') - port = server.augur_app.read_config('Server', 'port') - Server().app.run(host=host, port=int(port), debug=True) - -wsgi_app = None -def wsgi(environ, start_response): - """ - Creates WSGI app - """ - global wsgi_app - if (wsgi_app is None): - app_instance = Server() - wsgi_app = app_instance.app - # Stuff to make proxypass work - script_name = environ.get('HTTP_X_SCRIPT_NAME', '') - if script_name: - environ['SCRIPT_NAME'] = script_name - path_info = environ['PATH_INFO'] - if path_info.startswith(script_name): - environ['PATH_INFO'] = path_info[len(script_name):] - - scheme = environ.get('HTTP_X_SCHEME', '') - if scheme: - environ['wsgi.url_scheme'] = scheme - server = environ.get('HTTP_X_FORWARDED_SERVER', '') - if server: - environ['HTTP_HOST'] = server - return wsgi_app(environ, start_response) - -if __name__ == "__main__": - run() diff --git a/augur/util.py b/augur/util.py --- a/augur/util.py +++ b/augur/util.py @@ -8,8 +8,9 @@ import types import sys import beaker +import logging -from augur import logger +logger = logging.getLogger(__name__) __ROOT = os.path.abspath(os.path.dirname(__file__)) def get_data_path(path): @@ -42,7 +43,7 @@ def get_cache(namespace, cache_manager=None): metric_metadata = [] def register_metric(metadata=None, **kwargs): """ - Decorates a function as being a metric + Register a function as being a metric """ if metadata is None: metadata = {} @@ -54,20 +55,19 @@ def decorate(function): if not hasattr(function, 'is_metric'): function.is_metric = True - function.metadata.update(metadata) - if kwargs.get('endpoint_type', None): - endpoint_type = kwargs.pop('endpoint_type') - if endpoint_type == 'repo': - function.metadata['repo_endpoint'] = kwargs.get('endpoint') - else: - function.metadata['group_endpoint'] = kwargs.get('endpoint') - function.metadata.update(dict(kwargs)) function.metadata['tag'] = re.sub('_', '-', function.__name__).lower() - function.metadata['metric_name'] = re.sub('_', ' ', function.__name__).title() + function.metadata['endpoint'] = function.metadata['tag'] + function.metadata['name'] = re.sub('_', ' ', function.__name__).title() function.metadata['model'] = re.sub(r'(.*\.)', '', function.__module__) - function.metadata['ID'] = "{}-{}".format(function.metadata['model'].lower(), function.metadata['tag']) + + if kwargs.get('type', None): + function.metadata['type'] = kwargs.get('type') + else: + function.metadata['type'] = "standard" + + function.metadata.update(metadata) return function return decorate \ No newline at end of file diff --git a/conftest.py b/conftest.py new file mode 100644 --- /dev/null +++ b/conftest.py @@ -0,0 +1,31 @@ +import pytest +import re + +from augur.application import Application +from augur.cli.run import initialize_components + +default_repo_id = "25430" +default_repo_group_id = "10" + +def create_full_routes(routes): + full_routes = [] + for route in routes: + route = re.sub("<default_repo_id>", default_repo_id, route) + route = re.sub("<default_repo_group_id>", default_repo_group_id, route) + route = "http://localhost:5000/api/unstable/" + route + full_routes.append(route) + return full_routes + [email protected](scope="session") +def augur_app(): + augur_app = Application(disable_logs=True) + return augur_app + [email protected](scope="session") +def metrics(augur_app): + return augur_app.metrics + [email protected](scope="session") +def client(augur_app): + flask_client = initialize_components(augur_app, disable_housekeeper=True).load() + return flask_client.test_client() diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -25,7 +25,6 @@ exec(open(os.path.join(here, "../../metadata.py")).read()) - sys.path.insert(0, os.path.abspath('../../../augur')) # -- General configuration ------------------------------------------------ @@ -82,8 +81,6 @@ copyright = __copyright__ author = 'Carter Landis' - - # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. diff --git a/metadata.py b/metadata.py --- a/metadata.py +++ b/metadata.py @@ -1,13 +1,11 @@ -from os import path - __name__ = "Augur" __slug__ = "augur" __url__ = "https://github.com/chaoss/augur" __short_description__ = "Python 3 package for free/libre and open-source software community metrics & data collection" -__version__ = "0.12.0" -__release__ = "0.12.0" +__version__ = "0.13.0" +__release__ = "v0.13.0" __license__ = "MIT" __copyright__ = "CHAOSS & Augurlabs 2020" diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ "psycopg2-binary", "click", "psutil", - "gunicorn==19.9.0", + "gunicorn", "six>=1.14.0" ], extras_require={ @@ -61,7 +61,7 @@ }, entry_points={ "console_scripts": [ - "augur=augur.runtime:run" + "augur=augur.cli._multicommand:run" ], } ) diff --git a/util/alembic/env.py b/util/alembic/env.py deleted file mode 100644 --- a/util/alembic/env.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import with_statement -from alembic import context -from sqlalchemy import engine_from_config, pool -from logging.config import fileConfig - - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -from augur.models.common import Base -target_metadata = Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, target_metadata=target_metadata, literal_binds=True) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - import augur.application - app = augur.application.Application() - - context.configure( - connection=app.db.connect(), - target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py b/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py deleted file mode 100644 --- a/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Create basic tables - -Revision ID: 2eaa930b1f5a -Revises: -Create Date: 2019-02-09 16:10:24.251828 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '2eaa930b1f5a' -down_revision = None -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.create_table('repo', - sa.Column('url', sa.String(length=1024), nullable=False), - sa.Column('vcs', sa.String(length=64), nullable=True), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('url') - ) - op.create_table('repo_group', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('id') - ) - op.create_table('user', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('username', sa.String(length=64), nullable=False), - sa.Column('email', sa.String(length=64), nullable=False), - sa.Column('password_hash', sa.String(length=128), nullable=True), - sa.Column('email_confirmation_token', sa.String(length=128), nullable=True), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('password_updated_at', sa.DateTime(), nullable=True), - sa.Column('last_login_at', sa.DateTime(), nullable=True), - sa.Column('authenticated', sa.Boolean(), nullable=True), - sa.Column('active', sa.Boolean(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('email'), - sa.UniqueConstraint('username') - ) - op.create_table('repo_group_has_project', - sa.Column('repo_group_id', sa.Integer(), nullable=False), - sa.Column('repo_id', sa.String(length=1024), nullable=False), - sa.ForeignKeyConstraint(['repo_group_id'], ['repo_group.id'], ), - sa.ForeignKeyConstraint(['repo_id'], ['repo.url'], ), - sa.PrimaryKeyConstraint('repo_group_id', 'repo_id') - ) - op.create_table('user_has_repo_group', - sa.Column('user_id', sa.Integer(), nullable=False), - sa.Column('repo_group_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['repo_group_id'], ['repo_group.id'], ), - sa.ForeignKeyConstraint(['user_id'], ['user.id'], ), - sa.PrimaryKeyConstraint('user_id', 'repo_group_id') - ) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.drop_table('user_has_repo_group') - op.drop_table('repo_group_has_project') - op.drop_table('user') - op.drop_table('repo_group') - op.drop_table('repo') - # ### end Alembic commands ### diff --git a/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py b/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py deleted file mode 100644 --- a/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Add admin to User, name to RepoGroup - -Revision ID: a051167419fa -Revises: 2eaa930b1f5a -Create Date: 2019-02-17 13:09:42.138936 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = 'a051167419fa' -down_revision = '2eaa930b1f5a' -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.add_column('repo_group', sa.Column('name', sa.String(length=128), nullable=True)) - op.add_column('user', sa.Column('administrator', sa.Boolean(), nullable=True)) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.drop_column('user', 'administrator') - op.drop_column('repo_group', 'name') - # ### end Alembic commands ### diff --git a/workers/contributor_worker/contributor_worker/worker.py b/workers/contributor_worker/contributor_worker.py similarity index 68% rename from workers/contributor_worker/contributor_worker/worker.py rename to workers/contributor_worker/contributor_worker.py --- a/workers/contributor_worker/contributor_worker/worker.py +++ b/workers/contributor_worker/contributor_worker.py @@ -8,189 +8,50 @@ import statistics, logging, os, json, time import numpy as np import datetime -from workers.standard_methods import * #init_oauths, get_table_values, get_max_id, register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process, paginate -import warnings -warnings.filterwarnings('ignore') -class ContributorWorker: +from workers.worker_base import Worker + +class ContributorWorker(Worker): """ Worker that detects anomalies on a select few of our metrics task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self._task = task - self._child = None - self._queue = Queue() - self.db = None - self.tool_source = 'Contributor Worker' - self.tool_version = '0.0.1' # See __init__.py - self.data_source = 'Augur Commit Data' - self.finishing_task = False - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["contributors"] - } - ], - "config": [self.config] - } + def __init__(self, config={}): - self.results_counter = 0 + worker_type = "contributor_worker" - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) + given = [['git_url']] + models = ['contributors'] - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) + data_tables = ['contributors', 'contributors_aliases', 'contributor_affiliations', + 'issue_events', 'pull_request_events', 'issues', 'message', 'issue_assignees', + 'pull_request_assignees', 'pull_request_reviewers', 'pull_request_meta', 'pull_request_repo'] + operations_tables = ['worker_history', 'worker_job'] - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - # produce our own MetaData object - metadata = MetaData() - helper_metadata = MetaData() + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) - # we can reflect it ourselves from a database, using options - # such as 'only' to limit what tables we look at... - metadata.reflect(self.db, only=['contributors', 'contributors_aliases', 'contributor_affiliations', - 'issue_events', 'pull_request_events', 'issues', 'message', 'issue_assignees', - 'pull_request_assignees', 'pull_request_reviewers', 'pull_request_meta', 'pull_request_repo']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job']) - - # we can then produce a set of mappings from this MetaData. - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - # calling prepare() just sets up mapped classes and relationships. - Base.prepare() - HelperBase.prepare() - - # mapped classes are ready - self.contributors_table = Base.classes.contributors.__table__ - self.contributors_aliases_table = Base.classes.contributors_aliases.__table__ - self.contributor_affiliations_table = Base.classes.contributor_affiliations.__table__ - self.issue_events_table = Base.classes.issue_events.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.issues_table = Base.classes.issues.__table__ - self.message_table = Base.classes.message.__table__ - self.issue_assignees_table = Base.classes.issue_assignees.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5432/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'contributors': - self.contributors_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass + # These 3 are included in every tuple the worker inserts (data collection info) + self.tool_source = 'Contributor Worker' + self.tool_version = '1.0.0' + self.data_source = 'Augur Commit Data' def contributors_model(self, entry_info, repo_id): + # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's + self.logger.info("Querying starting ids info...\n") + + self.cntrb_id_inc = self.get_max_id('contributors', 'cntrb_id') + # Get and insert all users (emails) found by the facade worker self.insert_facade_contributors(entry_info, repo_id) # Get and insert all users github considers to be contributors for this repo - query_github_contributors(self, entry_info, repo_id) + self.query_github_contributors(entry_info, repo_id) - logging.info("Searching users for commits from the facade worker for repo with entry info: {}\n".format(entry_info)) + self.logger.info("Searching users for commits from the facade worker for repo with entry info: {}\n".format(entry_info)) # Get all distinct combinations of emails and names by querying the repo's commits userSQL = s.sql.text(""" @@ -242,7 +103,7 @@ def contributors_model(self, entry_info, repo_id): commit_cntrbs = json.loads(pd.read_sql(userSQL, self.db, \ params={'repo_id': repo_id}).to_json(orient="records")) - logging.info("We found {} distinct emails to search for in this repo (repo_id = {})".format( + self.logger.info("We found {} distinct emails to search for in this repo (repo_id = {})\n".format( len(commit_cntrbs), repo_id)) # For every unique commit contributor info combination... @@ -283,7 +144,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(times_used_tuple)) self.results_counter += 1 - logging.info("Updated cntrb_created_at and cntrb_last_used columns for existing " + self.logger.info("Updated cntrb_created_at and cntrb_last_used columns for existing " "tuple in the contributors table with email: {}\n".format(contributor['commit_email'])) # If cntrb_full_name column is not filled, go ahead and fill it bc we have that info @@ -297,7 +158,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(name_col)) - logging.info("Inserted cntrb_full_name column for existing tuple in the contributors " + self.logger.info("Inserted cntrb_full_name column for existing tuple in the contributors " "table with email: {}\n".format(contributor['cntrb_email'])) # If cntrb_canonical column is not filled, go ahead and fill it w main email bc @@ -312,7 +173,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(canonical_col)) - logging.info("Inserted cntrb_canonical column for existing tuple in the contributors " + self.logger.info("Inserted cntrb_canonical column for existing tuple in the contributors " "table with email: {}\n".format(contributor['cntrb_email'])) @@ -347,20 +208,20 @@ def contributors_model(self, entry_info, repo_id): url = 'https://api.github.com/search/users?q={}+in:email'.format( cmt_cntrb['email']) - logging.info("Hitting endpoint: " + url + " ...\n") + self.logger.info("Hitting endpoint: " + url + " ...\n") r = requests.get(url=url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) results = r.json() # If no matches or bad response, continue with other contributors if 'total_count' not in results: - logging.info("Search query returned an empty response, moving on...\n") + self.logger.info("Search query returned an empty response, moving on...\n") continue if results['total_count'] == 0: - logging.info("Search query did not return any results, moving on...\n") + self.logger.info("Search query did not return any results, moving on...\n") continue - logging.info("When searching for a contributor with info {}, we found the following users: {}\n".format( + self.logger.info("When searching for a contributor with info {}, we found the following users: {}\n".format( cmt_cntrb, results)) # Grab first result and make sure it has the highest match score @@ -370,9 +231,9 @@ def contributors_model(self, entry_info, repo_id): match = item cntrb_url = ("https://api.github.com/users/" + match['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) contributor = r.json() # Fill in all github information @@ -407,11 +268,12 @@ def contributors_model(self, entry_info, repo_id): } result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==self.cntrb_id_inc).values(cntrb_gh_info)) - logging.info("Updated existing tuple in the contributors table with github info after " + self.logger.info("Updated existing tuple in the contributors table with github info after " "a successful search query on a facade commit's author : {} {}\n".format(contributor, cntrb_gh_info)) # Dupe check + self.logger.info('Checking dupes.\n') dupe_cntrb_sql = s.sql.text(""" SELECT contributors.* FROM contributors inner join ( @@ -424,10 +286,23 @@ def contributors_model(self, entry_info, repo_id): dupe_cntrbs = pd.read_sql(dupe_cntrb_sql, self.db, params={}) - # Turn this column from nan to None - dupe_cntrbs['gh_user_id'] = dupe_cntrbs['gh_user_id'].where(pd.notnull(dupe_cntrbs['gh_user_id']), None) + self.logger.info(f'There are {len(dupe_cntrbs)} duplicates.\n') + + # Turn these columns from nan/nat to None + dupe_cntrbs['gh_user_id'] = dupe_cntrbs['gh_user_id'].where( + pd.notnull(dupe_cntrbs['gh_user_id']), None) + dupe_cntrbs['cntrb_last_used'] = dupe_cntrbs['cntrb_last_used'].astype( + object).where(dupe_cntrbs['cntrb_last_used'].notnull(), None) + dupe_cntrbs['cntrb_last_used'] = dupe_cntrbs['cntrb_last_used'].astype( + object).where(dupe_cntrbs['cntrb_last_used'].notnull(), None) for i, cntrb_existing in dupe_cntrbs.iterrows(): + + self.logger.info(f'Processing dupe: {cntrb_existing}.\n') + if i == 0: + self.logger.info('skipping first\n') + continue + cntrb_new = cntrb_existing.copy() del cntrb_new['cntrb_id'] del cntrb_new['data_collection_date'] @@ -447,22 +322,29 @@ def contributors_model(self, entry_info, repo_id): dupe_ids = pd.read_sql(dupe_ids_sql, self.db, params={'pk': pk, \ 'email': cntrb_new['cntrb_email']})['cntrb_id'].values.tolist() - self.map_new_id(self, dupe_ids, pk) + self.map_new_id(dupe_ids, pk) delete_dupe_ids_sql = s.sql.text(""" DELETE FROM contributors WHERE cntrb_id <> {} - AND cntrb_email = '{}' + AND cntrb_email = '{}'; """.format(pk, cntrb_new['cntrb_email'])) - self.db.execute(delete_dupe_ids_sql) + self.logger.info(f'Trying to delete dupes with sql: {delete_dupe_ids_sql}') + + try: + result = self.db.execute(delete_dupe_ids_sql) + except Exception as e: + self.logger.info(f'Deleting dupes failed with error: {e}') + + self.logger.info('Deleted duplicates.\n') # Register this task as completed - register_task_completion(self, entry_info, repo_id, "contributors") + self.register_task_completion(entry_info, repo_id, "contributors") def insert_facade_contributors(self, entry_info, repo_id): - logging.info("Beginning process to insert contributors from facade commits for repo w entry info: {}\n".format(entry_info)) + self.logger.info("Beginning process to insert contributors from facade commits for repo w entry info: {}\n".format(entry_info)) # Get all distinct combinations of emails and names by querying the repo's commits userSQL = s.sql.text(""" @@ -498,7 +380,7 @@ def insert_facade_contributors(self, entry_info, repo_id): """) commit_cntrbs = json.loads(pd.read_sql(userSQL, self.db, params={'repo_id': repo_id}).to_json(orient="records")) - logging.info("We found {} distinct contributors needing insertion (repo_id = {})".format( + self.logger.info("We found {} distinct contributors needing insertion (repo_id = {})".format( len(commit_cntrbs), repo_id)) for cntrb in commit_cntrbs: @@ -511,10 +393,10 @@ def insert_facade_contributors(self, entry_info, repo_id): 'cntrb_full_name': cntrb['name'] } result = self.db.execute(self.contributors_table.insert().values(cntrb_tuple)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: {}\n".format(cntrb['email'])) + self.logger.info("Inserted contributor: {}\n".format(cntrb['email'])) def handle_alias(self, tuple): cntrb_email = tuple['cntrb_email'] # canonical @@ -522,7 +404,7 @@ def handle_alias(self, tuple): cntrb_id = tuple['cntrb_id'] # Check existing contributors table tuple - existing_tuples = retrieve_tuple(self, {'cntrb_email': tuple['commit_email']}, ['contributors']) + existing_tuples = self.retrieve_tuple({'cntrb_email': tuple['commit_email']}, ['contributors']) if len(existing_tuples) == 0: """ Insert alias tuple into the contributor table """ @@ -543,15 +425,15 @@ def handle_alias(self, tuple): del cntrb['cntrb_id'] result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) + self.logger.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) alias_id = self.cntrb_id_inc elif len(existing_tuples) > 1: # fix all dupe references to dupe cntrb ids before we delete them - logging.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the contributors table, we will delete all tuples with this cntrb_email and re-insert only 1\n") - logging.info("For cntrb_email: {}".format(tuple['commit_email'])) + self.logger.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the contributors table, we will delete all tuples with this cntrb_email and re-insert only 1\n") + self.logger.info("For cntrb_email: {}".format(tuple['commit_email'])) """ Insert alias tuple into the contributor table """ @@ -576,7 +458,7 @@ def handle_alias(self, tuple): del cntrb['cntrb_id'] result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) + self.logger.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) alias_id = self.cntrb_id_inc @@ -610,14 +492,14 @@ def handle_alias(self, tuple): try: # Delete all dupes result = self.db.execute(deleteSQL) - logging.info("Deleted all non-canonical contributors with the email: {}\n".format(commit_email)) + self.logger.info("Deleted all non-canonical contributors with the email: {}\n".format(commit_email)) except Exception as e: - logging.info("When trying to delete a duplicate contributor, worker ran into error: {}".format(e)) + self.logger.info("When trying to delete a duplicate contributor, worker ran into error: {}".format(e)) else: #then there would be exactly 1 existing tuple, so that id is the one we want alias_id = existing_tuples[0]['cntrb_id'] - logging.info('Checking canonicals match.\n') + self.logger.info('Checking canonicals match.\n') alias_sql = s.sql.text(""" SELECT * FROM contributors @@ -636,14 +518,14 @@ def handle_alias(self, tuple): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_canonical==canonical_id_result.iloc[0]['cntrb_canonical'] ).values(canonical_col)) - logging.info("Updated cntrb_canonical column for existing tuple in the contributors " + self.logger.info("Updated cntrb_canonical column for existing tuple in the contributors " "table with email: {}\n".format(tuple['cntrb_email'])) # Now check existing alias table tuple - existing_tuples = retrieve_tuple(self, {'alias_email': commit_email}, ['contributors_aliases']) + existing_tuples = self.retrieve_tuple({'alias_email': commit_email}, ['contributors_aliases']) if len(existing_tuples) == 0: - logging.info("Finding cntrb_id for canonical email: {}".format(cntrb_email)) + self.logger.info("Finding cntrb_id for canonical email: {}".format(cntrb_email)) canonical_id_sql = s.sql.text(""" SELECT cntrb_id as canonical_id from contributors @@ -652,7 +534,7 @@ def handle_alias(self, tuple): canonical_id_result = json.loads(pd.read_sql(canonical_id_sql, self.db, params={'email': cntrb_email}).to_json( orient="records")) if len(canonical_id_result) > 1: - logging.info("MORE THAN ONE CANONICAL CONTRIBUTOR found for email: {}".format(cntrb_email)) + self.logger.info("MORE THAN ONE CANONICAL CONTRIBUTOR found for email: {}".format(cntrb_email)) alias_tuple = { 'cntrb_id': canonical_id_result[0]['canonical_id'], 'cntrb_a_id': alias_id, @@ -665,9 +547,9 @@ def handle_alias(self, tuple): } result = self.db.execute(self.contributors_aliases_table.insert().values(alias_tuple)) self.results_counter += 1 - logging.info("Inserted alias with email: {}\n".format(commit_email)) + self.logger.info("Inserted alias with email: {}\n".format(commit_email)) if len(existing_tuples) > 1: - logging.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the alias " + self.logger.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the alias " "table AND NEED TO ADD DELETION LOGIC: {}\n".format(existing_tuples)) def map_new_id(self, dupe_ids, new_id): @@ -693,48 +575,49 @@ def map_new_id(self, dupe_ids, new_id): alias_result = self.db.execute(self.contributors_aliases_table.update().where( self.contributors_aliases_table.c.cntrb_a_id.in_(dupe_ids)).values(alias_update_col)) - logging.info("Updated cntrb_a_id column for tuples in the contributors_aliases table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_a_id column for tuples in the contributors_aliases table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) except Exception as e: - logging.info(f'Alias re-map already done... error: {e}') + self.logger.info(f'Alias re-map already done... error: {e}') issue_events_result = self.db.execute(self.issue_events_table.update().where( self.issue_events_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the issue_events table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the issue_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_events_result = self.db.execute(self.pull_request_events_table.update().where( self.pull_request_events_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the pull_request_events table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the pull_request_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issues_cntrb_result = self.db.execute(self.issues_table.update().where( self.issues_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the issues table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issues_reporter_result = self.db.execute(self.issues_table.update().where( self.issues_table.c.reporter_id.in_(dupe_ids)).values(reporter_col)) - logging.info("Updated reporter_id column in the issues table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated reporter_id column in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issue_assignee_result = self.db.execute(self.issue_assignees_table.update().where( self.issue_assignees_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the issue_assignees table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the issue_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_assignee_result = self.db.execute(self.pull_request_assignees_table.update().where( self.pull_request_assignees_table.c.contrib_id.in_(dupe_ids)).values(pr_assignee_col)) - logging.info("Updated contrib_id column for tuple in the pull_request_assignees table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated contrib_id column for tuple in the pull_request_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) message_result = self.db.execute(self.message_table.update().where( self.message_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the message table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the message table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_reviewers_result = self.db.execute(self.pull_request_reviewers_table.update().where( self.pull_request_reviewers_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_reviewers table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_reviewers table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_meta_result = self.db.execute(self.pull_request_meta_table.update().where( self.pull_request_meta_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_meta table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_meta table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_repo_result = self.db.execute(self.pull_request_repo_table.update().where( self.pull_request_repo_table.c.pr_cntrb_id.in_(dupe_ids)).values(pr_repo_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_repo table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_repo table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info('Done mapping new id.\n') diff --git a/workers/contributor_worker/contributor_worker/__init__.py b/workers/contributor_worker/contributor_worker/__init__.py deleted file mode 100644 --- a/workers/contributor_worker/contributor_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.0.1' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/contributor_worker/contributor_worker/runtime.py b/workers/contributor_worker/contributor_worker/runtime.py deleted file mode 100644 --- a/workers/contributor_worker/contributor_worker/runtime.py +++ /dev/null @@ -1,110 +0,0 @@ -from flask import Flask, jsonify, request -from contributor_worker.worker import ContributorWorker -import click, os, json, logging, requests -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}\n".format(str(request.json))) - app.contributor_worker.task = request.json - - #set task - return jsonify({"success": "sucess"}) - - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": app.contributor_worker._queue, - "tasks": [{ - "given": list(app.contributor_worker._queue) - }] - }) - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.contributor_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51252, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'contributor_worker', None, {}) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.contributor_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "anomaly_days": worker_info['anomaly_days'] if 'anomaly_days' in worker_info else 2, - "training_days": worker_info['training_days'] if 'training_days' in worker_info else 365, - "confidence_interval": worker_info['confidence_interval'] if 'confidence_interval' in worker_info else .95, - "contamination": worker_info['contamination'] if 'contamination' in worker_info else 0.041, - 'metrics': worker_info['metrics'] if 'metrics' in worker_info else {"issues-new": "issues", - "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"} - } - - #create instance of the worker - app.contributor_worker = ContributorWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - print("Starting Flask App on host {} with port {} with pid: ".format(broker_host, worker_port) + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - print("Killing Flask App: {} and telling broker that this worker is disconnected.".format(str(os.getpid()))) - try: - logging.info("Sending disconnected message to broker... @ -> {} with info: {}\n".format('http://{}:{}/api/unstable/workers'.format( - config['broker_host'], config['broker_port']), config)) - requests.post('http://{}:{}/api/unstable/workers/remove'.format( - config['broker_host'], config['broker_port']), json=config) #hello message - except Exception as e: - logging.info("Ran into error: {}".format(e)) - logging.info("Broker's port is busy, worker will not be able to accept tasks, " - "please restart Augur if you want this worker to attempt connection again.") - diff --git a/workers/contributor_worker/runtime.py b/workers/contributor_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/contributor_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.contributor_worker.contributor_worker import ContributorWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ContributorWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/contributor_worker/setup.py b/workers/contributor_worker/setup.py --- a/workers/contributor_worker/setup.py +++ b/workers/contributor_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="contributor_worker", - version="0.0.2", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -31,7 +31,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'contributor_worker_start=contributor_worker.runtime:main', + 'contributor_worker_start=workers.contributor_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/facade_worker/facade_worker/facade00mainprogram.py b/workers/facade_worker/facade_worker/facade00mainprogram.py --- a/workers/facade_worker/facade_worker/facade00mainprogram.py +++ b/workers/facade_worker/facade_worker/facade00mainprogram.py @@ -26,20 +26,8 @@ # repos. It also rebuilds analysis data, checks any changed affiliations and # aliases, and caches data for display. -import pymysql -import sys -import platform -import imp -import time -import datetime -import html.parser -import subprocess -import os -import getopt -import xlsxwriter -import configparser +import pymysql, sys, platform, imp, time, datetime, html.parser, subprocess, os, getopt, xlsxwriter, configparser, logging from multiprocessing import Process, Queue - from facade_worker.facade01config import Config#increment_db, update_db, migrate_database_config, database_connection, get_setting, update_status, log_activity from facade_worker.facade02utilitymethods import update_repo_log, trim_commit, store_working_author, trim_author from facade_worker.facade03analyzecommit import analyze_commit @@ -48,55 +36,45 @@ from facade_worker.facade06analyze import analysis from facade_worker.facade07rebuildcache import nuke_affiliations, fill_empty_affiliations, invalidate_caches, rebuild_unknown_affiliation_and_web_caches -from workers.standard_methods import read_config +from workers.util import read_config +from workers.worker_base import Worker + +html = html.parser.HTMLParser() -import logging +class FacadeWorker(Worker): + def __init__(self, config={}, task=None): + worker_type = "facade_worker" -# if platform.python_implementation() == 'PyPy': -# import pymysql -# else: -# import MySQLdb -# ## End Imports + # Define what this worker can be given and know how to interpret + given = [['repo_group']] + models = ['commits'] -html = html.parser.HTMLParser() + # Define the tables needed to insert, update, or delete on + data_tables = [] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Facade-specific config + self.cfg = Config(self.logger) + + # Define data collection info + self.tool_source = 'Facade Worker' + self.tool_version = '1.0.0' + self.data_source = 'Git Log' -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class FacadeWorker: - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(os.getpid())) - - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.cfg = Config() - - ### The real program starts here ### + def initialize_database_connections(self): # Set up the database - db_user = self.config['user'] - db_pass = self.config['password'] - db_name = self.config['database'] - db_host = self.config['host'] - db_port = self.config['port'] - db_user_people = self.config['user'] - db_pass_people = self.config['password'] - db_name_people = self.config['database'] - db_host_people = self.config['host'] - db_port_people = self.config['port'] + db_user = self.config['user_database'] + db_pass = self.config['password_database'] + db_name = self.config['name_database'] + db_host = self.config['host_database'] + db_port = self.config['port_database'] # Open a general-purpose connection - db,cursor = self.cfg.database_connection( + self.db, self.cursor = self.cfg.database_connection( db_host, db_user, db_pass, @@ -104,157 +82,68 @@ def __init__(self, config, task=None): db_port, False, False) # Open a connection for the people database - db_people,cursor_people = self.cfg.database_connection( - db_host_people, - db_user_people, - db_pass_people, - db_name_people, - db_port_people, True, False) + self.db_people,self.cursor_people = self.cfg.database_connection( + db_host, + db_user, + db_pass, + db_name, + db_port, True, False) # Check if the database is current and update it if necessary try: - current_db = int(self.cfg.get_setting('database_version')) + self.current_db = int(self.cfg.get_setting('database_version')) except: # Catch databases which existed before database versioning - current_db = -1 - - #WHAT IS THE UPSTREAM_DB??? - # if current_db < upstream_db: - - # print(("Current database version: %s\nUpstream database version %s\n" % - # (current_db, upstream_db))) - - # self.cfg.update_db(current_db); + self.current_db = -1 - self.commit_model() - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - rg_id = value['given']['repo_group_id'] - - """ Query all repos """ - # repoUrlSQL = s.sql.text(""" - # SELECT repo_id,repo_group_id,repo_git FROM repo WHERE repo_group_id = '{}' - # """.format(rg_id)) - # rs = pd.read_sql(repoUrlSQL, self.db, params={}) - try: - if value['job_type'] == "UPDATE": - self._queue.put(CollectorTask(message_type='TASK', entry_info=value)) - elif value['job_type'] == "MAINTAIN": - self._maintain_queue.put(CollectorTask(message_type='TASK', entry_info=value)) - - except Exception as e: - logging.info("error: {}".format(e)) - - self._task = CollectorTask(message_type='TASK', entry_info={"task": value, "repo_id": repo_id}) - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...") - if self._child is None: - self._child = Process(target=self.collect, args=()) - self._child.start() - def collect(self): """ Function to process each entry in the worker's task queue Determines what action to take based off the message type """ + self.initialize_logging() # need to initialize logging again in child process cause multiprocessing + self.logger.info("Starting data collection process\n") + self.initialize_database_connections() while True: - time.sleep(0.5) if not self._queue.empty(): - message = self._queue.get() - logging.info("Popped off message: {}".format(message.entry_info)) - self.working_on = "UPDATE" + message = self._queue.get() # Get the task off our MP queue else: - if not self._maintain_queue.empty(): - message = self._maintain_queue.get() - logging.info("Popped off message: {}".format(message.entry_info)) - self.working_on = "MAINTAIN" - else: - break - - if message.type == 'EXIT': + break + self.logger.info("Popped off message: {}\n".format(str(message))) + + if message['job_type'] == 'STOP': break - if message.type != 'TASK': - raise ValueError(f'{message.type} is not a recognized task type') - - if message.type == 'TASK': - try: - git_url = message.entry_info['task']['given']['git_url'] - self.query_issues({'git_url': git_url, 'repo_id': message.entry_info['repo_id']}) - except Exception as e: - logging.info("Worker ran into an error for task: {}\n".format(message.entry_info['task'])) - logging.info("Error encountered: " + repr(e) + "\n") - logging.info("Notifying broker and logging task failure in database...\n") - message.entry_info['task']['worker_id'] = self.config['id'] - requests.post("http://{}:{}/api/unstable/task_error".format( - self.config['broker_host'],self.config['broker_port']), json=message.entry_info['task']) - # Add to history table - task_history = { - "repo_id": message.entry_info['repo_id'], - "worker": self.config['id'], - "job_model": message.entry_info['task']['models'][0], - "oauth_id": self.config['zombie_id'], - "timestamp": datetime.datetime.now(), - "status": "Error", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job error for: " + str(message.entry_info['task']) + "\n") - - # Update job process table - updated_job = { - "since_id_str": message.entry_info['repo_id'], - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==message.entry_info['task']['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + message.entry_info['task']['models'][0] + "\n") - - # Reset results counter for next task - self.results_counter = 0 - pass - - def commit_model(self): + # If task is not a valid job type + if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': + raise ValueError('{} is not a recognized task type'.format(message['job_type'])) + pass + try: + self.commits_model(message) + except Exception as e: + self.logger.error(e) + raise(e) + break + + def commits_model(self, message): # Figure out what we need to do - limited_run = read_config("Facade", name="limited_run", default=0) - delete_marked_repos = read_config("Facade", name="delete_marked_repos", default=0) - pull_repos = read_config("Facade", name="pull_repos", default=0) - clone_repos = read_config("Facade", name="clone_repos", default=1) - check_updates = read_config("Facade", name="check_updates", default=0) - force_updates = read_config("Facade", name="force_updates", default=0) - run_analysis = read_config("Facade", name="run_analysis", default=0) - force_analysis = read_config("Facade", name="force_analysis", default=0) - nuke_stored_affiliations = read_config("Facade", name="nuke_stored_affiliations", default=0) - fix_affiliations = read_config("Facade", name="fix_affiliations", default=1) - force_invalidate_caches = read_config("Facade", name="force_invalidate_caches", default=0) - rebuild_caches = read_config("Facade", name="rebuild_caches", default=1) #if abs((datetime.datetime.strptime(self.cfg.get_setting('aliases_processed')[:-3], + limited_run = self.augur_config.get_value("Facade", "limited_run") + delete_marked_repos = self.augur_config.get_value("Facade", "delete_marked_repos") + pull_repos = self.augur_config.get_value("Facade", "pull_repos") + clone_repos = self.augur_config.get_value("Facade", "clone_repos") + check_updates = self.augur_config.get_value("Facade", "check_updates") + force_updates = self.augur_config.get_value("Facade", "force_updates") + run_analysis = self.augur_config.get_value("Facade", "run_analysis") + force_analysis = self.augur_config.get_value("Facade", "force_analysis") + nuke_stored_affiliations = self.augur_config.get_value("Facade", "nuke_stored_affiliations") + fix_affiliations = self.augur_config.get_value("Facade", "fix_affiliations") + force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches") + rebuild_caches = self.augur_config.get_value("Facade", "rebuild_caches") #if abs((datetime.datetime.strptime(self.cfg.get_setting('aliases_processed')[:-3], # '%Y-%m-%d %I:%M:%S.%f') - datetime.datetime.now()).total_seconds()) // 3600 > int(self.cfg.get_setting( # 'update_frequency')) else 0 - force_invalidate_caches = read_config("Facade", name="force_invalidate_caches", default=0) - create_xlsx_summary_files = read_config("Facade", name="create_xlsx_summary_files", default=0) - multithreaded = read_config("Facade", name="multithreaded", default=1) + force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches") + create_xlsx_summary_files = self.augur_config.get_value("Facade", "create_xlsx_summary_files") + multithreaded = self.augur_config.get_value("Facade", "multithreaded") opts,args = getopt.getopt(sys.argv[1:],'hdpcuUaAmnfIrx') for opt in opts: @@ -355,9 +244,9 @@ def commit_model(self): if len(repo_base_directory) == 0: self.cfg.log_activity('Error','No base directory. It is unsafe to continue.') - update_status('Failed: No base directory') + self.cfg.update_status('Failed: No base directory') sys.exit(1) - + # Begin working start_time = time.time() diff --git a/workers/facade_worker/facade_worker/facade01config.py b/workers/facade_worker/facade_worker/facade01config.py --- a/workers/facade_worker/facade_worker/facade01config.py +++ b/workers/facade_worker/facade_worker/facade01config.py @@ -39,15 +39,15 @@ import json import logging -from workers.standard_methods import read_config - +from workers.util import read_config class Config: - def __init__(self): + def __init__(self, logger): self.upstream_db = 7 self.cursor = None self.cursor_people = None + self.logger = logger self.db = None self.db_people = None @@ -60,9 +60,10 @@ def __init__(self): " in your \'Workers\' -> \'facade_worker\' object in your config " "to the directory in which you want to clone repos. Exiting...") sys.exit(1) - self.tool_source = '\'FacadeAugur\'' - self.tool_version = '\'0.0.1\'' - self.data_source = '\'git_repository\'' + + self.tool_source = 'Facade Worker' + self.tool_version = '1.0.0' + self.data_source = 'Git Log' # Figure out how much we're going to log logging.basicConfig(filename='worker_{}.log'.format(worker_options['port']), filemode='w', level=logging.INFO) @@ -199,7 +200,7 @@ def log_activity(self, level, status): # "Debug", then just print it and don't save it in the database. log_options = ('Error','Quiet','Info','Verbose','Debug') - logging.info("* %s\n" % status) + self.logger.info("* %s\n" % status) if self.log_level == 'Debug' and level == 'Debug': return @@ -209,7 +210,7 @@ def log_activity(self, level, status): self.cursor.execute(query, (level, status)) self.db.commit() except Exception as e: - logging.info('Error encountered: {}\n'.format(e)) + self.logger.info('Error encountered: {}\n'.format(e)) # Set up the database db_user = read_config('Database', 'user', 'AUGUR_DB_USER', 'augur') diff --git a/workers/facade_worker/facade_worker/facade03analyzecommit.py b/workers/facade_worker/facade_worker/facade03analyzecommit.py --- a/workers/facade_worker/facade_worker/facade03analyzecommit.py +++ b/workers/facade_worker/facade_worker/facade03analyzecommit.py @@ -38,7 +38,7 @@ import configparser import traceback -from workers.standard_methods import read_config +from workers.util import read_config def analyze_commit(cfg, repo_id, repo_loc, commit, multithreaded): diff --git a/workers/facade_worker/facade_worker/facade07rebuildcache.py b/workers/facade_worker/facade_worker/facade07rebuildcache.py --- a/workers/facade_worker/facade_worker/facade07rebuildcache.py +++ b/workers/facade_worker/facade_worker/facade07rebuildcache.py @@ -156,7 +156,6 @@ def discover_null_affiliations(attribution,email): cfg.log_activity('Debug','Found domain match for %s' % email) - # try: for match in matches: update = ("UPDATE commits " "SET cmt_%s_affiliation = %%s " @@ -164,7 +163,6 @@ def discover_null_affiliations(attribution,email): "AND cmt_%s_affiliation IS NULL " "AND cmt_%s_date::date >= %%s::date" % (attribution, attribution, attribution, attribution)) - #"AND cmt_%s_date >= TO_TIMESTAMP(%%s, 'YYYY-MM-DD')" % cfg.log_activity('Info', 'attr: {} \nmatch:{}\nsql: {}'.format(attribution, match, update)) @@ -175,15 +173,6 @@ def discover_null_affiliations(attribution,email): cfg.log_activity('Info', 'Error encountered: {}'.format(e)) cfg.log_activity('Info', 'Affiliation insertion failed for %s ' % email) - # except Exception as e: - # cfg.log_activity('Info', '1st Error encountered: {}'.format(e)) - # cfg.log_activity('Info', 'Attribution matching failed for %s ' % email) - # except Exception as e: - # logging.info('2nd Error encountered: {}'.format(e)) - # cfg.log_activity('Info', 'Attribution matching failed and exception logging failed') - # else: - # cfg.log_activity('Info', 'Attribution matching failed and exception logging failed and the exception to the exception failed.') - def discover_alias(email): # Match aliases with their canonical email diff --git a/workers/facade_worker/facade_worker/runtime.py b/workers/facade_worker/facade_worker/runtime.py --- a/workers/facade_worker/facade_worker/runtime.py +++ b/workers/facade_worker/facade_worker/runtime.py @@ -1,102 +1,23 @@ from flask import Flask, jsonify, request, Response import click, os, json, requests, logging -from facade_worker.facade00mainprogram import FacadeWorker -from workers.standard_methods import read_config +from workers.facade_worker.facade_worker.facade00mainprogram import FacadeWorker +from workers.util import create_server, WorkerGunicornApplication -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(request.json)) - app.facade_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.facade_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51258, help='Port') -def main(augur_url, host, port): +def main(): """ Declares singular worker and creates the server and flask app that it will be running on """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'facade_worker', None, None) - worker_port = worker_info['port'] if 'port' in worker_info else port - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - config = { - "id": "com.augurlabs.core.facade_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } + app = Flask(__name__) + app.worker = FacadeWorker() - #create instance of the worker - app.facade_worker = FacadeWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") + create_server(app) + WorkerGunicornApplication(app).run() - app.run(debug=app.debug, host=host, port=worker_port) + if app.worker._child is not None: + app.worker._child.terminate() try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) except: pass - logging.info("Killing Flask App: " + str(os.getpid())) + os.kill(os.getpid(), 9) - diff --git a/workers/facade_worker/setup.py b/workers/facade_worker/setup.py --- a/workers/facade_worker/setup.py +++ b/workers/facade_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="facade_worker", - version="0.1", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -30,7 +30,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'facade_worker_start=facade_worker.runtime:main', + 'facade_worker_start=workers.facade_worker.facade_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/github_worker/github_worker/worker.py b/workers/github_worker/github_worker.py similarity index 56% rename from workers/github_worker/github_worker/worker.py rename to workers/github_worker/github_worker.py --- a/workers/github_worker/github_worker/worker.py +++ b/workers/github_worker/github_worker.py @@ -2,217 +2,61 @@ from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData import requests, time, logging, json, os from datetime import datetime -from sqlalchemy.ext.declarative import declarative_base -from workers.standard_methods import * +from workers.worker_base import Worker -class GitHubWorker: +class GitHubWorker(Worker): """ Worker that collects data from the Github API and stores it in our database task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - - self._task = task # task currently being worked on (dict) - self._child = None # process of currently running task (multiprocessing process) - self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes) - self.db = None # sql alchemy db session + def __init__(self, config={}): - # These 3 are included in every tuple the worker inserts (data collection info) - self.tool_source = 'GitHub API Worker' - self.tool_version = '0.0.3' # See __init__.py - self.data_source = 'GitHub API' - - self.results_counter = 0 # count of tuples inserted in the database (to store stats for each task in op tables) - self.finishing_task = True # if we are finishing a previous task, pagination works differenty - - self.specs = { - "id": self.config['id'], # what the broker knows this worker as - "location": self.config['location'], # host + port worker is running on (so broker can send tasks here) - "qualifications": [ - { - "given": [["github_url"]], # type of repo this worker can be given as a task - "models":["issues"] # models this worker can fill for a repo as a task - } - ], - "config": [self.config] - } - - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - # Create an sqlalchemy engine for both database schemas - logging.info("Making database connections... {}".format(DB_STR)) - db_schema = 'augur_data' - self.db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(db_schema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) + worker_type = 'github_worker' - metadata = MetaData() - helper_metadata = MetaData() + given = [['github_url']] + models = ['issues'] - # Reflect only the tables we will use for each schema's metadata object - metadata.reflect(self.db, only=['contributors', 'issues', 'issue_labels', 'message', + data_tables = ['contributors', 'issues', 'issue_labels', 'message', 'issue_message_ref', 'issue_events','issue_assignees','contributors_aliases', 'pull_request_assignees', 'pull_request_events', 'pull_request_reviewers', 'pull_request_meta', - 'pull_request_repo']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - # So we can access all our tables when inserting, updating, etc - self.contributors_table = Base.classes.contributors.__table__ - self.issues_table = Base.classes.issues.__table__ - self.issue_labels_table = Base.classes.issue_labels.__table__ - self.issue_events_table = Base.classes.issue_events.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.message_table = Base.classes.message.__table__ - self.issues_message_ref_table = Base.classes.issue_message_ref.__table__ - self.issue_assignees_table = Base.classes.issue_assignees.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.contributors_aliases_table = Base.classes.contributors_aliases.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ + 'pull_request_repo'] + operations_tables = ['worker_history', 'worker_job'] - # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's - logging.info("Querying starting ids info...\n") - - self.issue_id_inc = get_max_id(self, 'issues', 'issue_id') - - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - - self.msg_id_inc = get_max_id(self, 'message', 'msg_id') - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) + # These 3 are included in every tuple the worker inserts (data collection info) + self.tool_source = 'GitHub API Worker' + self.tool_version = '1.0.0' + self.data_source = 'GitHub API' - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5433/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - # If the task has one of our "valid" job types - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - # Setting that causes paginating through ALL pages, not just unknown ones - # This setting is set by the housekeeper and is attached to the task before it gets sent here - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - - self._task = value - self.run() + self.finishing_task = True # if we are finishing a previous task, pagination works differenty + self.platform_id = 25150 # GitHub - def cancel(self): - """ Delete/cancel current task - """ - self._task = None + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - # Spawn a subprocess to handle message reading and performing the tasks - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['github_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'issues': - self.issues_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass def issues_model(self, entry_info, repo_id): """ Data collection function Query the GitHub API for issues """ + + # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's + self.logger.info("Querying starting ids info...\n") + + self.issue_id_inc = self.get_max_id('issues', 'issue_id') + + self.msg_id_inc = self.get_max_id('message', 'msg_id') github_url = entry_info['given']['github_url'] - logging.info("Beginning filling the issues model for repo: " + github_url + "\n") - record_model_process(self, repo_id, 'issues') + self.logger.info("Beginning filling the issues model for repo: " + github_url + "\n") # Contributors are part of this model, and finding all for the repo saves us # from having to add them as we discover committers in the issue process - query_github_contributors(self, entry_info, repo_id) + self.query_github_contributors(entry_info, repo_id) # Extract the owner/repo for the endpoint path = urlparse(github_url) @@ -238,14 +82,14 @@ def issues_model(self, entry_info, repo_id): duplicate_col_map = {'gh_issue_id': 'id'} #list to hold issues needing insertion - issues = paginate(self, issues_url, duplicate_col_map, update_col_map, table, table_pkey, + issues = self.paginate(issues_url, duplicate_col_map, update_col_map, table, table_pkey, 'WHERE repo_id = {}'.format(repo_id)) - + self.logger.info(issues) # Discover and remove duplicates before we start inserting - logging.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") + self.logger.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") for issue_dict in issues: - logging.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") + self.logger.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") # Add the FK repo_id to the dict being inserted issue_dict['repo_id'] = repo_id @@ -253,17 +97,17 @@ def issues_model(self, entry_info, repo_id): # Figure out if this issue is a PR # still unsure about this key value pair/what it means pr_id = None - if "pull_request" in issue_dict: - logging.info("Issue is a PR\n") + if 'pull_request' in issue_dict: + self.logger.info("Issue is a PR\n") # Right now we are just storing our issue id as the PR id if it is one pr_id = self.issue_id_inc else: - logging.info("Issue is not a PR\n") + self.logger.info("Issue is not a PR\n") # Begin on the actual issue... issue = { "repo_id": issue_dict['repo_id'], - "reporter_id": find_id_from_login(self, issue_dict['user']['login']), + "reporter_id": self.find_id_from_login(issue_dict['user']['login']), "pull_request": pr_id, "pull_request_id": pr_id, "created_at": issue_dict['created_at'], @@ -292,20 +136,20 @@ def issues_model(self, entry_info, repo_id): if issue_dict['flag'] == 'need_update': result = self.db.execute(self.issues_table.update().where( self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) - logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( + self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( issue_dict['id'])) self.issue_id_inc = issue_dict['pkey'] elif issue_dict['flag'] == 'need_insertion': try: result = self.db.execute(self.issues_table.insert().values(issue)) - logging.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) self.results_counter += 1 self.issue_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + + self.logger.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + " and title of: {} and gh_issue_num of: {}\n".format(issue_dict['title'],issue_dict['number'])) except Exception as e: - logging.info("When inserting an issue, ran into the following error: {}\n".format(e)) - logging.info(issue) + self.logger.info("When inserting an issue, ran into the following error: {}\n".format(e)) + self.logger.info(issue) continue # Check if the assignee key's value is already recorded in the assignees key's value @@ -316,13 +160,13 @@ def issues_model(self, entry_info, repo_id): # Handles case if there are no assignees if collected_assignees[0] is not None: - logging.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") + self.logger.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") for assignee_dict in collected_assignees: if type(assignee_dict) != dict: continue assignee = { "issue_id": self.issue_id_inc, - "cntrb_id": find_id_from_login(self, assignee_dict['login']), + "cntrb_id": self.find_id_from_login(assignee_dict['login']), "tool_source": self.tool_source, "tool_version": self.tool_version, "data_source": self.data_source, @@ -331,13 +175,13 @@ def issues_model(self, entry_info, repo_id): } # Commit insertion to the assignee table result = self.db.execute(self.issue_assignees_table.insert().values(assignee)) - logging.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + + self.logger.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + " with login/cntrb_id: " + assignee_dict['login'] + " " + str(assignee['cntrb_id']) + "\n") else: - logging.info("Issue does not have any assignees\n") + self.logger.info("Issue does not have any assignees\n") # Insert the issue labels to the issue_labels table for label_dict in issue_dict['labels']: @@ -357,10 +201,10 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issue_labels_table.insert().values(label)) - logging.info("Primary key inserted into the issue_labels table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issue_labels table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted issue label with text: " + label_dict['name'] + "\n") + self.logger.info("Inserted issue label with text: " + label_dict['name'] + "\n") #### Messages/comments and events insertion @@ -375,19 +219,19 @@ def issues_model(self, entry_info, repo_id): duplicate_col_map = {'msg_timestamp': 'created_at'} #list to hold contributors needing insertion or update - issue_comments = paginate(self, comments_url, duplicate_col_map, update_col_map, table, table_pkey, + issue_comments = self.paginate(comments_url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="WHERE msg_id IN (SELECT msg_id FROM issue_message_ref WHERE issue_id = {})".format( self.issue_id_inc)) - logging.info("Number of comments needing insertion: {}\n".format(len(issue_comments))) + self.logger.info("Number of comments needing insertion: {}\n".format(len(issue_comments))) for comment in issue_comments: try: - commenter_cntrb_id = find_id_from_login(self, comment['user']['login']) + commenter_cntrb_id = self.find_id_from_login(comment['user']['login']) except: commenter_cntrb_id = None issue_comment = { - "pltfrm_id": 25150, + "pltfrm_id": self.platform_id, "msg_text": comment['body'], "msg_timestamp": comment['created_at'], "cntrb_id": commenter_cntrb_id, @@ -397,13 +241,13 @@ def issues_model(self, entry_info, repo_id): } try: result = self.db.execute(self.message_table.insert().values(issue_comment)) - logging.info("Primary key inserted into the message table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the message table: {}".format(result.inserted_primary_key)) self.results_counter += 1 self.msg_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted issue comment with id: {}\n".format(self.msg_id_inc)) + self.logger.info("Inserted issue comment with id: {}\n".format(self.msg_id_inc)) except Exception as e: - logging.info("Worker ran into error when inserting a message, likely had invalid characters. error: {}".format(e)) + self.logger.info("Worker ran into error when inserting a message, likely had invalid characters. error: {}".format(e)) ### ISSUE MESSAGE REF TABLE ### @@ -417,8 +261,8 @@ def issues_model(self, entry_info, repo_id): "issue_msg_ref_src_node_id": comment['node_id'] } - result = self.db.execute(self.issues_message_ref_table.insert().values(issue_message_ref)) - logging.info("Primary key inserted into the issue_message_ref table: {}".format(result.inserted_primary_key)) + result = self.db.execute(self.issue_message_ref_table.insert().values(issue_message_ref)) + self.logger.info("Primary key inserted into the issue_message_ref table: {}".format(result.inserted_primary_key)) self.results_counter += 1 # Base of the url for event endpoints @@ -434,7 +278,7 @@ def issues_model(self, entry_info, repo_id): pseudo_key_gh = 'url' pseudo_key_augur = 'node_url' table = 'issue_events' - event_table_values = get_table_values(self, [pseudo_key_augur], [table], "WHERE issue_id = {}".format(self.issue_id_inc)) + event_table_values = self.get_table_values([pseudo_key_augur], [table], "WHERE issue_id = {}".format(self.issue_id_inc)) # Paginate backwards through all the events but get first page in order # to determine if there are multiple pages and if the 1st page covers all @@ -442,29 +286,29 @@ def issues_model(self, entry_info, repo_id): multiple_pages = False while True: - logging.info("Hitting endpoint: " + events_url.format(i) + " ...\n") + self.logger.info("Hitting endpoint: " + events_url.format(i) + " ...\n") r = requests.get(url=events_url.format(i), headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) # Find last page so we can decrement from there if 'last' in r.links and not multiple_pages and not self.finishing_task: param = r.links['last']['url'][-6:] i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") + self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n") multiple_pages = True elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") + self.logger.info("Only 1 page of request\n") elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ... " + self.logger.info("Finishing a previous task, paginating forwards ... " "excess rate limit requests will be made\n") j = r.json() # Checking contents of requests with what we already have in the db - new_events = check_duplicates(j, event_table_values, pseudo_key_gh) + new_events = self.check_duplicates(j, event_table_values, pseudo_key_gh) if len(new_events) == 0 and multiple_pages and 'last' in r.links: if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown events, breaking from pagination.\n") + self.logger.info("No more pages with unknown events, breaking from pagination.\n") break elif len(new_events) != 0: to_add = [obj for obj in new_events if obj not in issue_events] @@ -474,29 +318,29 @@ def issues_model(self, entry_info, repo_id): # Since we already wouldve checked the first page... break if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") + self.logger.info("No more pages to check, breaking from pagination.\n") break - logging.info("Number of events needing insertion: " + str(len(issue_events)) + "\n") + self.logger.info("Number of events needing insertion: " + str(len(issue_events)) + "\n") # If the issue is closed, then we search for the closing event and store the user's id cntrb_id = None if 'closed_at' in issue_dict: for event in issue_events: if str(event['event']) != "closed": - logging.info("not closed, continuing") + self.logger.info("not closed, continuing") continue if not event['actor']: continue - cntrb_id = find_id_from_login(self, event['actor']['login']) + cntrb_id = self.find_id_from_login(event['actor']['login']) if cntrb_id is not None: break # Need to hit this single contributor endpoint to get extra created at data... cntrb_url = ("https://api.github.com/users/" + event['actor']['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) contributor = r.json() company = None @@ -543,20 +387,17 @@ def issues_model(self, entry_info, repo_id): # Commit insertion to table result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format( + self.logger.info("Primary key inserted into the contributors table: {}".format( result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - # Increment our global track of the cntrb id for the possibility of it being used as a FK - self.cntrb_id_inc = int(result.inserted_primary_key[0]) + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") for event in issue_events: if event['actor'] is not None: - event['cntrb_id'] = find_id_from_login(self, event['actor']['login']) + event['cntrb_id'] = self.find_id_from_login(event['actor']['login']) if event['cntrb_id'] is None: - logging.info("SOMETHING WRONG WITH FINDING ID FROM LOGIN") + self.logger.info("SOMETHING WRONG WITH FINDING ID FROM LOGIN") continue # event['cntrb_id'] = None else: @@ -578,10 +419,10 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issue_events_table.insert().values(issue_event)) - logging.info("Primary key inserted into the issue_events table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issue_events table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted issue event: " + event['event'] + " for issue id: {}\n".format(self.issue_id_inc)) + self.logger.info("Inserted issue event: " + event['event'] + " for issue id: {}\n".format(self.issue_id_inc)) if cntrb_id is not None: update_closing_cntrb = { @@ -589,11 +430,11 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issues_table.update().where( self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) - logging.info("Updated tuple in the issues table with contributor that closed it, issue_id: {}\n".format( + self.logger.info("Updated tuple in the issues table with contributor that closed it, issue_id: {}\n".format( issue_dict['id'])) self.issue_id_inc += 1 #Register this task as completed - register_task_completion(self, entry_info, repo_id, "issues") + self.register_task_completion(entry_info, repo_id, "issues") diff --git a/workers/github_worker/github_worker/__init__.py b/workers/github_worker/github_worker/__init__.py deleted file mode 100644 --- a/workers/github_worker/github_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.1.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/github_worker/github_worker/runtime.py b/workers/github_worker/github_worker/runtime.py deleted file mode 100644 --- a/workers/github_worker/github_worker/runtime.py +++ /dev/null @@ -1,107 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from github_worker.worker import GitHubWorker -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.github_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.github_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51236, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'github_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: # for multiple instances of workers - try: # trying each port for an already-alive worker until a free port is found - print("New github worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.github_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - app.github_worker = GitHubWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.github_worker._child is not None: - app.github_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/github_worker/runtime.py b/workers/github_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/github_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.github_worker.github_worker import GitHubWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitHubWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/github_worker/setup.py b/workers/github_worker/setup.py --- a/workers/github_worker/setup.py +++ b/workers/github_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="github_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'github_worker_start=github_worker.runtime:main', + 'github_worker_start=workers.github_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/template_worker/template_worker/__init__.py b/workers/gitlab_issues_worker/__init__.py similarity index 50% rename from workers/template_worker/template_worker/__init__.py rename to workers/gitlab_issues_worker/__init__.py --- a/workers/template_worker/template_worker/__init__.py +++ b/workers/gitlab_issues_worker/__init__.py @@ -1,4 +1,4 @@ -"""gh_repo_info_worker - Augur Worker that collects GitHub Repo Info data""" +"""gitlab_issues_worker - Augur Worker that collects Gitlab Issue Info""" __version__ = '0.0.0' __author__ = 'Augur Team <[email protected]>' diff --git a/workers/gitlab_issues_worker/gitlab_issues_worker.py b/workers/gitlab_issues_worker/gitlab_issues_worker.py new file mode 100644 --- /dev/null +++ b/workers/gitlab_issues_worker/gitlab_issues_worker.py @@ -0,0 +1,193 @@ +import logging, os, sys, time, requests, json +from datetime import datetime +from multiprocessing import Process, Queue +import pandas as pd +import sqlalchemy as s +from workers.worker_base import Worker + + +class GitLabIssuesWorker(Worker): + def __init__(self, config={}): + + # Define what this worker can be given and know how to interpret + + # given is usually either [['github_url']] or [['git_url']] (depending if your + # worker is exclusive to repos that are on the GitHub platform) + worker_type = "gitlab_issues_worker" + given = [['git_url']] + + # The name the housekeeper/broker use to distinguish the data model this worker can fill + # You will also need to name the method that does the collection for this model + # in the format *model name*_model() such as fake_data_model() for example + models = ['gitlab_issues'] + + # Define the tables needed to insert, update, or delete on + # The Worker class will set each table you define here as an attribute + # so you can reference all of them like self.message_table or self.repo_table + data_tables = ['contributors', 'issues', 'issue_labels', 'message', 'repo', + 'issue_message_ref', 'issue_events','issue_assignees','contributors_aliases', + 'pull_request_assignees', 'pull_request_events', 'pull_request_reviewers', 'pull_request_meta', + 'pull_request_repo'] + # For most workers you will only need the worker_history and worker_job tables + # from the operations schema, these tables are to log worker task histories + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Request headers updation + + gitlab_api_key = self.augur_config.get_value("Database", "gitlab_api_key") + self.config.update({ + "gitlab_api_key": gitlab_api_key + }) + self.headers = {"PRIVATE-TOKEN" : self.config['gitlab_api_key']} + + + # Define data collection info + self.tool_source = 'Gitlab API Worker' + self.tool_version = '0.0.0' + self.data_source = 'GitLab API' + + + def gitlab_issues_model(self, task, repo_id): + """ This is just an example of a data collection method. All data collection + methods for all workers currently accept this format of parameters. If you + want to change these parameters, you can re-define the collect() method to + overwrite the Worker class' version of it (which is the method that calls + this method). + + :param task: the task generated by the housekeeper and sent to the broker which + was then sent to this worker. Takes the example dict format of: + { + 'job_type': 'MAINTAIN', + 'models': ['fake_data'], + 'display_name': 'fake_data model for url: https://github.com/vmware/vivace', + 'given': { + 'git_url': 'https://github.com/vmware/vivace' + } + } + :param repo_id: the collect() method queries the repo_id given the git/github url + and passes it along to make things easier. An int such as: 27869 + """ + + # Collection and insertion of data happens here + + # Collecting issue info from Gitlab API + self.issue_id_inc = self.get_max_id('issues', 'issue_id') + self.msg_id_inc = self.get_max_id('message', 'msg_id') + self.logger.info('Beginning the process of GitLab Issue Collection...'.format(str(os.getpid()))) + gitlab_base = 'https://gitlab.com/api/v4' + intermediate_url = '{}/projects/{}/issues?per_page=100&state=opened&'.format(gitlab_base, 18754962) + gitlab_issues_url = intermediate_url + "page={}" + + + # Get issues that we already have stored + # Set pseudo key (something other than PK) to + # check dupicates with + table = 'issues' + table_pkey = 'issue_id' + update_col_map = {'issue_state': 'state'} + duplicate_col_map = {'gh_issue_id': 'id'} + + #list to hold issues needing insertion + issues = self.paginate(gitlab_issues_url, duplicate_col_map, update_col_map, table, table_pkey, + 'WHERE repo_id = {}'.format(repo_id), platform="gitlab") + + self.logger.info(issues) + self.logger.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") + for issue_dict in issues: + self.logger.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") + pr_id = None + if "pull_request" in issue_dict: + self.logger.info("This is an MR\n") + # Right now we are just storing our issue id as the MR id if it is one + pr_id = self.issue_id_inc + else: + self.logger.info("Issue is not an MR\n") + + # Insert data into models + issue = { + "repo_id": issue_dict['project_id'], + "reporter_id": self.find_id_from_login(issue_dict['author']['username'], platform='gitlab'), + "pull_request": pr_id, + "pull_request_id": pr_id, + "created_at": issue_dict['created_at'], + "issue_title": issue_dict['title'], + "issue_body": issue_dict['description'] if 'description' in issue_dict else None, + "comment_count": issue_dict['user_notes_count'], + "updated_at": issue_dict['updated_at'], + "closed_at": issue_dict['closed_at'], + "repository_url": issue_dict['_links']['project'], + "issue_url": issue_dict['_links']['self'], + "labels_url": issue_dict['labels'], + "comments_url": issue_dict['_links']['notes'], + "events_url": None, + "html_url": issue_dict['_links']['self'], + "issue_state": issue_dict['state'], + "issue_node_id": None, + "gh_issue_id": issue_dict['id'], + "gh_issue_number": issue_dict['iid'], + "gh_user_id": issue_dict['author']['id'], + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + # Commit insertion to the issues table + if issue_dict['flag'] == 'need_update': + self.logger.info("UPDATE FLAG") + result = self.db.execute(self.issues_table.update().where( + self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) + self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( + issue_dict['id'])) + self.issue_id_inc = issue_dict['pkey'] + elif issue_dict['flag'] == 'need_insertion': + self.logger.info("INSERT FLAG") + try: + result = self.db.execute(self.issues_table.insert().values(issue)) + self.logger.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) + self.results_counter += 1 + self.issue_id_inc = int(result.inserted_primary_key[0]) + self.logger.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + + " and title of: {} and gh_issue_num of: {}\n".format(issue_dict['title'], issue_dict['iid'])) + except Exception as e: + self.logger.info("When inserting an issue, ran into the following error: {}\n".format(e)) + self.logger.info(issue) + # continue + + # issue_assigness + self.logger.info("assignees", issue_dict['assignees']) + collected_assignees = issue_dict['assignees'] + if issue_dict['assignee'] not in collected_assignees: + collected_assignees.append(issue_dict['assignee']) + if collected_assignees[0] is not None: + self.logger.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") + for assignee_dict in collected_assignees: + if type(assignee_dict) != dict: + continue + assignee = { + "issue_id": self.issue_id_inc, + "cntrb_id": self.find_id_from_login(assignee_dict['username'], platform='gitlab'), + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source, + "issue_assignee_src_id": assignee_dict['id'], + "issue_assignee_src_node": None + } + self.logger.info("assignee info", assignee) + # Commit insertion to the assignee table + result = self.db.execute(self.issue_assignees_table.insert().values(assignee)) + self.logger.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) + self.results_counter += 1 + + self.logger.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + + " with login/cntrb_id: " + assignee_dict['username'] + " " + str(assignee['cntrb_id']) + "\n") + else: + self.logger.info("Issue does not have any assignees\n") + + # Register this task as completed. + # This is a method of the worker class that is required to be called upon completion + # of any data collection model, this lets the broker know that this worker is ready + # for another task + self.register_task_completion(task, repo_id, 'gitlab_issues') + diff --git a/workers/gitlab_issues_worker/runtime.py b/workers/gitlab_issues_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/gitlab_issues_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.gitlab_issues_worker.gitlab_issues_worker import GitLabIssuesWorker +from workers.util import WorkerGunicornApplication, create_server + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitLabIssuesWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) \ No newline at end of file diff --git a/workers/gitlab_issues_worker/setup.py b/workers/gitlab_issues_worker/setup.py new file mode 100644 --- /dev/null +++ b/workers/gitlab_issues_worker/setup.py @@ -0,0 +1,41 @@ +import io +import os +import re + +from setuptools import find_packages +from setuptools import setup + +def read(filename): + filename = os.path.join(os.path.dirname(__file__), filename) + text_type = type(u"") + with io.open(filename, mode="r", encoding='utf-8') as fd: + return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) + +setup( + name="gitlab_issues_worker", + version="0.0.0", + url="https://github.com/chaoss/augur", + license='MIT', + author="Augur Team", + author_email="", + description="Gitlab Worker", + packages=find_packages(exclude=('tests',)), + install_requires=[ + 'flask', + 'requests', + 'psycopg2-binary', + 'click' + ], + entry_points={ + 'console_scripts': [ + 'gitlab_issues_worker_start=workers.gitlab_issues_worker.runtime:main', + ], + }, + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + ] +) diff --git a/workers/insight_worker/insight_worker/__init__.py b/workers/insight_worker/__init__.py similarity index 100% rename from workers/insight_worker/insight_worker/__init__.py rename to workers/insight_worker/__init__.py diff --git a/workers/insight_worker/insight_worker/worker.py b/workers/insight_worker/insight_worker.py similarity index 79% rename from workers/insight_worker/insight_worker/worker.py rename to workers/insight_worker/insight_worker.py --- a/workers/insight_worker/insight_worker/worker.py +++ b/workers/insight_worker/insight_worker.py @@ -10,179 +10,55 @@ import scipy.stats import datetime from sklearn.ensemble import IsolationForest -from workers.standard_methods import * #init_oauths, get_table_values, get_max_id, register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process, paginate +from workers.worker_base import Worker import warnings warnings.filterwarnings('ignore') -class InsightWorker: +class InsightWorker(Worker): """ Worker that detects anomalies on a select few of our metrics task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self._task = task - self._child = None - self._queue = Queue() - self.db = None + def __init__(self, config={}): + + worker_type = "insight_worker" + + given = [['git_url']] + models = ['insights'] + + data_tables = ['chaoss_metric_status', 'repo_insights', 'repo_insights_records'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({ + 'api_host': self.augur_config.get_value('Server', 'host'), + 'api_port': self.augur_config.get_value('Server', 'port') + }) + + # These 3 are included in every tuple the worker inserts (data collection info) self.tool_source = 'Insight Worker' - self.tool_version = '0.0.3' # See __init__.py + self.tool_version = '1.0.0' self.data_source = 'Augur API' + self.refresh = True self.send_insights = True - self.finishing_task = False self.anomaly_days = self.config['anomaly_days'] self.training_days = self.config['training_days'] self.contamination = self.config['contamination'] self.confidence = self.config['confidence_interval'] / 100 self.metrics = self.config['metrics'] - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["insights"] - } - ], - "config": [self.config] - } - - self.results_counter = 0 - - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - # produce our own MetaData object - metadata = MetaData() - helper_metadata = MetaData() - - # we can reflect it ourselves from a database, using options - # such as 'only' to limit what tables we look at... - metadata.reflect(self.db, only=['chaoss_metric_status', 'repo_insights', 'repo_insights_records']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job']) - - # we can then produce a set of mappings from this MetaData. - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - # calling prepare() just sets up mapped classes and relationships. - Base.prepare() - HelperBase.prepare() - - # mapped classes are ready - self.chaoss_metric_status_table = Base.classes['chaoss_metric_status'].__table__ - self.repo_insights_table = Base.classes['repo_insights'].__table__ - self.repo_insights_records_table = Base.classes['repo_insights_records'].__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5432/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'insights': - self.insights_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - def insights_model(self, entry_info, repo_id): logging.info("Discovering insights for task with entry info: {}\n".format(entry_info)) - record_model_process(self, repo_id, 'insights') """ Collect data """ base_url = 'http://{}:{}/api/unstable/repo-groups/9999/repos/{}/'.format( - self.config['broker_host'], self.config['broker_port'], repo_id) + self.config['api_host'], self.config['api_port'], repo_id) # Dataframe to hold all endpoint results # Subtract configurable amount of time @@ -218,7 +94,7 @@ def insights_model(self, entry_info, repo_id): # If none of the endpoints returned data if df.size == 0: logging.info("None of the provided endpoints provided data for this repository. Anomaly detection is 'done'.\n") - register_task_completion(self, entry_info, repo_id, "insights") + self.register_task_completion(entry_info, repo_id, "insights") return """ Deletion of old insights """ @@ -258,7 +134,7 @@ def insights_model(self, entry_info, repo_id): result = self.db.execute(delete_points_SQL, repo_id=repo_id, min_date=min_date) # get table values to check for dupes later on - insight_table_values = get_table_values(self, ['*'], ['repo_insights_records'], where_clause="WHERE repo_id = {}".format(repo_id)) + insight_table_values = self.get_table_values(['*'], ['repo_insights_records'], where_clause="WHERE repo_id = {}".format(repo_id)) to_model_columns = df.columns[0:len(self.metrics)+1] @@ -415,7 +291,7 @@ def classify_anomalies(df,metric): logging.info("error occurred while storing datapoint: {}\n".format(repr(e))) break - register_task_completion(self, entry_info, repo_id, "insights") + self.register_task_completion(entry_info, repo_id, "insights") def confidence_interval_insights(self, entry_info): """ Anomaly detection method based on confidence intervals @@ -423,7 +299,6 @@ def confidence_interval_insights(self, entry_info): # Update table of endpoints before we query them all logging.info("Discovering insights for task with entry info: {}".format(entry_info)) - record_model_process(self, repo_id, 'insights') # Set the endpoints we want to discover insights for endpoints = [{'cm_info': "issues-new"}, {'cm_info': "code-changes"}, {'cm_info': "code-changes-lines"}, @@ -445,10 +320,10 @@ def confidence_interval_insights(self, entry_info): # If we are discovering insights for a group vs repo, the base url will change if 'repo_group_id' in entry_info and 'repo_id' not in entry_info: base_url = 'http://{}:{}/api/unstable/repo-groups/{}/'.format( - self.config['broker_host'],self.config['broker_port'], entry_info['repo_group_id']) + self.config['api_host'],self.config['api_port'], entry_info['repo_group_id']) else: base_url = 'http://{}:{}/api/unstable/repo-groups/9999/repos/{}/'.format( - self.config['broker_host'],self.config['broker_port'], repo_id) + self.config['api_host'],self.config['api_port'], repo_id) # Hit and discover insights for every endpoint we care about for endpoint in endpoints: @@ -610,50 +485,6 @@ def is_unique_key(key): self.register_task_completion(entry_info, "insights") - def register_task_completion(self, entry_info, model): - # Task to send back to broker - task_completed = { - 'worker_id': self.config['id'], - 'job_type': entry_info['job_type'], - 'repo_id': repo_id, - 'git_url': entry_info['git_url'] - } - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.config['zombie_id'], - "timestamp": datetime.datetime.now(), - "status": "Success", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job completion for: " + str(task_completed) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where( - self.job_table.c.job_model==model).values(updated_job)) - logging.info("Update job process for model: " + model + "\n") - - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - - # Reset results counter for next task - self.results_counter = 0 - def send_insight(self, insight, units_from_mean): try: repoSQL = s.sql.text(""" @@ -821,9 +652,9 @@ def confidence_interval(self, data, timeperiod='week', confidence=.95): def update_metrics(self): logging.info("Preparing to update metrics ...\n\n" + "Hitting endpoint: http://{}:{}/api/unstable/metrics/status ...\n".format( - self.config['broker_host'],self.config['broker_port'])) + self.config['api_host'],self.config['api_port'])) r = requests.get(url='http://{}:{}/api/unstable/metrics/status'.format( - self.config['broker_host'],self.config['broker_port'])) + self.config['api_host'],self.config['api_port'])) data = r.json() active_metrics = [metric for metric in data if metric['backend_status'] == 'implemented'] diff --git a/workers/insight_worker/insight_worker/runtime.py b/workers/insight_worker/insight_worker/runtime.py deleted file mode 100644 --- a/workers/insight_worker/insight_worker/runtime.py +++ /dev/null @@ -1,110 +0,0 @@ -from flask import Flask, jsonify, request -from insight_worker.worker import InsightWorker -import click, os, json, logging, requests -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}\n".format(str(request.json))) - app.insight_worker.task = request.json - - #set task - return jsonify({"success": "sucess"}) - - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": app.insight_worker._queue, - "tasks": [{ - "given": list(app.insight_worker._queue) - }] - }) - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.insight_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51252, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'insight_worker', None, {}) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.insight_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "anomaly_days": worker_info['anomaly_days'] if 'anomaly_days' in worker_info else 2, - "training_days": worker_info['training_days'] if 'training_days' in worker_info else 365, - "confidence_interval": worker_info['confidence_interval'] if 'confidence_interval' in worker_info else .95, - "contamination": worker_info['contamination'] if 'contamination' in worker_info else 0.041, - 'metrics': worker_info['metrics'] if 'metrics' in worker_info else {"issues-new": "issues", - "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"} - } - - #create instance of the worker - app.insight_worker = InsightWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - print("Starting Flask App on host {} with port {} with pid: ".format(broker_host, worker_port) + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - print("Killing Flask App: {} and telling broker that this worker is disconnected.".format(str(os.getpid()))) - try: - logging.info("Sending disconnected message to broker... @ -> {} with info: {}\n".format('http://{}:{}/api/unstable/workers'.format( - config['broker_host'], config['broker_port']), config)) - requests.post('http://{}:{}/api/unstable/workers/remove'.format( - config['broker_host'], config['broker_port']), json=config) #hello message - except Exception as e: - logging.info("Ran into error: {}".format(e)) - logging.info("Broker's port is busy, worker will not be able to accept tasks, " - "please restart Augur if you want this worker to attempt connection again.") - diff --git a/workers/insight_worker/runtime.py b/workers/insight_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/insight_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.insight_worker.insight_worker import InsightWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = InsightWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/insight_worker/setup.py b/workers/insight_worker/setup.py --- a/workers/insight_worker/setup.py +++ b/workers/insight_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="insight_worker", - version="0.0.2", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -31,7 +31,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'insight_worker_start=insight_worker.runtime:main', + 'insight_worker_start=workers.insight_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/linux_badge_worker/__init__.py b/workers/linux_badge_worker/__init__.py new file mode 100644 diff --git a/workers/linux_badge_worker/linux_badge_worker.py b/workers/linux_badge_worker/linux_badge_worker.py new file mode 100644 --- /dev/null +++ b/workers/linux_badge_worker/linux_badge_worker.py @@ -0,0 +1,63 @@ +import os +from datetime import datetime +import logging +import requests +import json +from urllib.parse import quote +from multiprocessing import Process, Queue + +import pandas as pd +import sqlalchemy as s +from sqlalchemy.ext.automap import automap_base +from sqlalchemy import MetaData +from workers.worker_base import Worker + +class LinuxBadgeWorker(Worker): + """ Worker that collects repo badging data from CII + config: database credentials, broker information, and ID + """ + def __init__(self, config={}): + + worker_type = "linux_badge_worker" + + given = [['git_url']] + models = ['badges'] + + data_tables = ['repo_badging'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({"endpoint": "https://bestpractices.coreinfrastructure.org/projects.json?pq="}) + self.tool_source = 'Linux Badge Worker' + self.tool_version = '1.0.0' + self.data_source = 'CII Badging API' + + + def badges_model(self, entry_info, repo_id): + """ Data collection and storage method + Query the CII API and store the result in the DB for the badges model + """ + git_url = entry_info['given']['git_url'] + self.logger.info("Collecting data for {}".format(git_url)) + extension = quote(git_url[0:-4]) + + url = self.config['endpoint'] + extension + self.logger.info("Hitting CII endpoint: " + url + " ...") + data = requests.get(url=url).json() + + if data != []: + self.logger.info("Inserting badging data for " + git_url) + self.db.execute(self.repo_badging_table.insert()\ + .values(repo_id=repo_id, + data=data, + tool_source=self.tool_source, + tool_version=self.tool_version, + data_source=self.data_source)) + + self.results_counter += 1 + else: + self.logger.info("No CII data found for {}\n".format(git_url)) + + self.register_task_completion(entry_info, repo_id, "badges") diff --git a/workers/linux_badge_worker/linux_badge_worker/__init__.py b/workers/linux_badge_worker/linux_badge_worker/__init__.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""linux_badge_worker - Augur worker that collects CII badging data""" - -__tool_source__ = 'Linux Badge Worker' -__tool_version__ = '0.1.0' -__data_source__ = 'CII Badging API' - -__author__ = 'Augur Team <[email protected]>' -__all__ = [] - diff --git a/workers/linux_badge_worker/linux_badge_worker/runtime.py b/workers/linux_badge_worker/linux_badge_worker/runtime.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/runtime.py +++ /dev/null @@ -1,107 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from linux_badge_worker.worker import BadgeWorker -from workers.standard_methods import read_config - -def create_server(app): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.linux_badge_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.linux_badge_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51235, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'linux_badge_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.linux_badge_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json?pq=", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - app.linux_badge_worker = BadgeWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - - if app.linux_badge_worker._child is not None: - app.linux_badge_worker._child.terminate() - - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/linux_badge_worker/linux_badge_worker/worker.py b/workers/linux_badge_worker/linux_badge_worker/worker.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/worker.py +++ /dev/null @@ -1,240 +0,0 @@ -import os -from datetime import datetime -import logging -import requests -import json -from urllib.parse import quote -from multiprocessing import Process, Queue - -from linux_badge_worker import __data_source__, __tool_source__, __tool_version__ -import pandas as pd -import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData -from workers.standard_methods import register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class BadgeWorker: - """ Worker that collects repo badging data from CII - config: database credentials, broker information, and ID - """ - def __init__(self, config, task=None): - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.config = config - - self.db = None - self.repo_badging_table = None - - self._task = task - self._queue = Queue() - self._child = None - - self.history_id = None - self.finishing_task = False - self.working_on = None - self.results_counter = 0 - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["badges"] - } - ], - "config": [self.config] - } - - self._db_str = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], - self.config['password'], - self.config['host'], - self.config['port'], - self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self._db_str, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self._db_str, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - logging.info("Database connection established...") - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['repo_badging']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - self.repo_badging_table = Base.classes.repo_badging.__table__ - logging.info("ORM setup complete...") - - # Organize different api keys/oauths available - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauth_sql = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(0)) - - for oauth in [{'oauth_id': 0, 'access_token': 0}] + json.loads(pd.read_sql(oauth_sql, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) \ - - datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - - # Send broker hello message - connect_to_broker(self) - logging.info("Connected to the broker...\n") - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "key": "", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['github_api_key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - - self._task = value - self.run() - - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def badges_model(self, entry_info, repo_id): - """ Data collection and storage method - Query the CII API and store the result in the DB for the badges model - """ - git_url = entry_info['given']['git_url'] - logging.info("Collecting data for {}".format(git_url)) - extension = quote(git_url[0:-4]) - - url = self.config['endpoint'] + extension - logging.info("Hitting CII endpoint: " + url + " ...") - data = requests.get(url=url).json() - - if data != []: - logging.info("Inserting badging data for " + git_url) - self.db.execute(self.repo_badging_table.insert()\ - .values(repo_id=repo_id, - data=data, - tool_source=__tool_source__, - tool_version=__tool_version__, - data_source=__data_source__)) - - self.results_counter += 1 - else: - logging.info("No CII data found for {}\n".format(git_url)) - - register_task_completion(self, entry_info, repo_id, "badges") - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'badges': - self.badges_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() diff --git a/workers/linux_badge_worker/runtime.py b/workers/linux_badge_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/linux_badge_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.linux_badge_worker.linux_badge_worker import LinuxBadgeWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = LinuxBadgeWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/linux_badge_worker/setup.py b/workers/linux_badge_worker/setup.py --- a/workers/linux_badge_worker/setup.py +++ b/workers/linux_badge_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="linux_badge_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'linux_badge_worker_start=linux_badge_worker.runtime:main', + 'linux_badge_worker_start=workers.linux_badge_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/metric_status_worker/metric_status_worker/__init__.py b/workers/metric_status_worker/metric_status_worker/__init__.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.1.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/metric_status_worker/metric_status_worker/runtime.py b/workers/metric_status_worker/metric_status_worker/runtime.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/runtime.py +++ /dev/null @@ -1,108 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, logging, requests, json -from metric_status_worker.worker import MetricStatusWorker -import os -import json -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.metric_status_worker.task = request.json - - #set task - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "success" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.metric_status_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51263, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'metric_status_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.metric_status_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - app.metric_status_worker = MetricStatusWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=port) - if app.metric_status_worker._child is not None: - app.metric_status_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/metric_status_worker/metric_status_worker/worker.py b/workers/metric_status_worker/metric_status_worker/worker.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/worker.py +++ /dev/null @@ -1,719 +0,0 @@ -import base64 -import logging -import os -import re -import sys -import json -import time -from abc import ABC -from datetime import datetime -from multiprocessing import Process, Queue -from urllib.parse import urlparse - -import pandas as pd -import requests -import sqlalchemy as s -from github import Github -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - - -class MetricStatusWorker: - def __init__(self, config, task=None): - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.db = None - self.table = None - self.API_KEY = self.config['key'] - self.tool_source = 'Metric Status Worker' - self.tool_version = '0.0.1' - self.data_source = 'GitHub API' - self.results_counter = 0 - self.working_on = None - - - # url = 'https://api.github.com' - # response = requests.get(url, headers=self.headers) - # self.rate_limit = int(response.headers['X-RateLimit-Remaining']) - - specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["chaoss_metric_status"] - } - ], - "config": [self.config] - } - - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - logging.info("Making database connections...") - - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['chaoss_metric_status']) - # helper_metadata.reflect(self.helper_db) - - Base = automap_base(metadata=metadata) - - Base.prepare() - - self.chaoss_metric_status_table = Base.classes['chaoss_metric_status'].__table__ - - try: - requests.post('http://{}:{}/api/unstable/workers'.format( - self.config['broker_host'],self.config['broker_port']), json=specs) - except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker') - sys.exit('Cannot connect to the broker! Quitting...') - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced """ - return self._task - - @task.setter - def task(self, value): - try: - if value['job_type'] == 'UPDATE': - self._queue.put(CollectorTask('TASK', {})) - elif value['job_type'] == 'MAINTAIN': - self._maintain_queue.put(CollectorTask('TASK', {})) - - if 'focused_task' in value: - if value['focused_task'] == 1: - self.finishing_task = True - except Exception as e: - logging.error("Error: {},".format(str(e))) - - self._task = CollectorTask(message_type='TASK', entry_info={}) - self.run() - - def cancel(self): - """ Delete/cancel current task """ - self._task = None - - def run(self): - logging.info("Running...") - if self._child is None: - self._child = Process(target=self.collect, args=()) - self._child.start() - requests.post("http://{}:{}/api/unstable/add_pids".format( - self.config['broker_host'],self.config['broker_port']), json={'pids': [self._child.pid, os.getpid()]}) - - def collect(self): - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = 'UPDATE' - elif not self._maintain_queue.empty(): - message = self._maintain_queue.get() - logging.info("Popped off message: {}".format(str(message.entry_info))) - self.working_on = "MAINTAIN" - else: - break - - - if message.type == 'EXIT': - break - if message.type != 'TASK': - raise ValueError( - f'{message.type} is not a recognized task type') - - if message.type == 'TASK': - self.update_metrics(message.entry_info) - - def update_metrics(self, entry_info): - """ Data colletction function - Query the github api for metric status - """ - status = MetricsStatus(self.API_KEY) - status.create_metrics_status() - metrics = status.metrics_status - - # convert to dict - dict_metrics = [] - for metric in metrics: - metric_info = { - 'cm_group': metric['group'], - 'cm_source': metric['data_source'], - 'cm_type': metric['metric_type'], - 'cm_backend_status': metric['backend_status'], - 'cm_frontend_status': metric['frontend_status'], - 'cm_api_endpoint_repo': metric['endpoint_repo'], - 'cm_api_endpoint_rg': metric['endpoint_group'], - 'cm_defined': metric['is_defined'], - 'cm_name': metric['display_name'], - 'cm_working_group': metric['group'], - 'cm_info': metric['tag'], - 'cm_working_group_focus_area': metric['focus_area'], - 'tool_source': self.tool_source, - 'tool_version': self.tool_version, - 'data_source': self.data_source, - } - dict_metrics.append(metric_info) - - need_insertion = self.filter_duplicates({'cm_api_endpoint_repo': "cm_api_endpoint_repo", 'cm_backend_status':'cm_api_endpoint_rg'}, ['chaoss_metric_status'], - dict_metrics) - logging.info("Count of contributors needing insertion: " + str(len(need_insertion)) + "\n") - for metric in need_insertion: - result = self.db.execute(self.chaoss_metric_status_table.insert().values(metric)) - logging.info("Primary key inserted into the metrics table: " + str(result.inserted_primary_key)) - self.results_counter += 1 - - self.register_task_completion() - - - # def filter_duplicates(self, og_data): - # need_insertion = [] - # colSQL = s.sql.text(""" - # SELECT * FROM chaoss_metric_status - # """) - # values = pd.read_sql(colSQL, self.db) - # for obj in og_data: - # location = values.loc[ (values['cm_name']==obj['cm_name'] ) & ( values['cm_working_group']==obj[ - # 'cm_working_group']) & ()] - # if not location.empty: - # logging.info("value of tuple exists: " + str(obj['cm_name'])) - # else: - # need_insertion.append(obj) - # - # logging.info("While filtering duplicates, we reduced the data size from " + str(len(og_data)) + - # " to " + str(len(need_insertion)) + "\n") - # - # return need_insertion - - def filter_duplicates(self, cols, tables, og_data): - need_insertion = [] - - table_str = tables[0] - del tables[0] - for table in tables: - table_str += ", " + table - for col in cols.keys(): - colSQL = s.sql.text(""" - SELECT {} FROM {} - """.format(col, table_str)) - values = pd.read_sql(colSQL, self.db, params={}) - - for obj in og_data: - if values.isin([obj[cols[col]]]).any().any(): - logging.info("value of tuple exists: " + str(obj[cols[col]]) + "\n") - elif obj not in need_insertion: - need_insertion.append(obj) - logging.info("While filtering duplicates, we reduced the data size from " + str(len(og_data)) + - " to " + str(len(need_insertion)) + "\n") - return need_insertion - - def update_exist_metrics(self, metrics): - need_update = [] - need_insert = [] - - for metric in metrics: - result = self.db.execute(self.chaoss_metric_status_table.update().where((self.chaoss_metric_status_table.c.cm_name == metric['cm_name'])&(self.chaoss_metric_status_table.c.cm_group == metric['cm_group']) & ((self.chaoss_metric_status_table.c.cm_api_endpoint_repo != metric['cm_api_endpoint_repo']) | (self.chaoss_metric_status_table.c.cm_api_endpoint_rg != metric['cm_api_endpoint_rg'])|(self.chaoss_metric_status_table.c.cm_source != metric['cm_source'])) - ).values(metric)) - - if result.rowcount: - logging.info("Update Metric {}-{}".format(metric['cm_group'], metric['cm_name'])) - - def register_task_completion(self): - task_completed = { - 'worker_id': self.config['id'], - 'job_type': self.working_on, - } - - logging.info("Telling broker we completed task: " + str(task_completed) + "\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - self.results_counter = 0 - - - - -class FrontendStatusExtractor(object): - - def __init__(self): - pass - self.api_text = open(os.path.abspath(os.path.dirname(os.path.dirname(os.getcwd()))) + - "/frontend/src/AugurAPI.ts", 'r').read() - self.attributes = re.findall( - r'(?:(GitEndpoint|Endpoint|Timeseries|addRepoMetric|addRepoGroupMetric)\()\'(.*)\', \'(.*)\'', - self.api_text) - self.timeseries = [ - attribute for attribute in self.attributes if attribute[0] == "Timeseries"] - self.endpoints = [ - attribute for attribute in self.attributes if attribute[0] == "Endpoint"] - self.git_endpoints = [ - attribute for attribute in self.attributes if attribute[0] == "GitEndpoint"] - self.repo_metrics = [ - attribute for attribute in self.attributes if attribute[0] == 'addRepoMetric'] - self.group_metric = [ - attribute for attribute in self.attributes if attribute[0] == 'addRepoMetric'] - - def determine_frontend_status(self, metric): - metric.frontend_status = 'unimplemented' - attribute = None - - if metric.metric_type == "timeseries": - attribute = next((attribute for attribute in self.timeseries if - "/api/unstable/<owner>/<repo>/timeseries/{}".format(attribute[2]) == metric.endpoint_repo), - None) - - elif metric.metric_type == "metric": - attribute = next((attribute for attribute in self.endpoints if - "/api/unstable/<owner>/<repo>/{}".format(attribute[2]) == metric.endpoint_repo), None) - if not attribute: - attribute = next((attribute for attribute in self.repo_metrics if - "/api/unstable/repo-groups/<repo_group_id>/repos/<repo_id>/{}".format( - attribute[2]) == metric.endpoint_repo), None) - if not attribute and metric.endpoint_group: - attribute = next((attribute for attribute in self.repo_metrics if - "/api/unstable/repo-groups/<repo_group_id>/{}".format(attribute[2]) == metric.endpoint_group), None) - - elif metric.metric_type == "git": - attribute = next((attribute for attribute in self.git_endpoints if - "/api/unstable/git/{}".format(attribute[2]) == metric.endpoint_repo), None) - - if attribute is not None: - metric.frontend_status = 'implemented' - metric.chart_mapping = attribute[1] - else: - metric.frontend_status = 'unimplemented' - - -class Metric(ABC): - - def __init__(self): - self.ID = None - self.tag = None - self.display_name = None - self.group = None - self.backend_status = 'unimplemented' - self.frontend_status = 'unimplemented' - self.chart_mapping = None - self.data_source = None - self.metric_type = None - self.documentation_url = None - self.is_defined = False - self.focus_area = None - self.endpoint_group = None - self.endpoint_repo = None - - -class GroupedMetric(Metric): - - def __init__(self, display_name, group, tag, focus_area): - Metric.__init__(self) - self.display_name = display_name - self.tag = tag - self.ID = re.sub(r'-$|\*', '', 'none' + '-' + self.tag) - self.group = group - self.focus_area = focus_area - - -class ImplementedMetric(Metric): - - def __init__(self, metadata, frontend_status_extractor): - Metric.__init__(self) - - self.ID = metadata['ID'] - self.tag = metadata['tag'] - self.display_name = metadata['metric_name'] - self.backend_status = 'implemented' - self.data_source = metadata['source'] - self.group = "experimental" - self.endpoint_group = None - self.endpoint_repo = None - - - if 'metric_type' in metadata: - self.metric_type = metadata['metric_type'] - else: - self.metric_type = 'metric' - - if 'endpoint' in metadata: - if 'group_endpoint' in metadata: - self.endpoint_group = metadata['group_endpoint'] - if 'repo_endpoint' in metadata: - self.endpoint_repo = metadata['repo_endpoint'] - else: - self.endpoint_repo = metadata['endpoint'] - frontend_status_extractor.determine_frontend_status(self) - - -class MetricsStatus(object): - wg_evolution = { - "repo": "chaoss/wg-evolution", - "focus_area": "focus_areas", - "name": 'evolution' - } - - wg_diversity_inclusion = { - "repo": "chaoss/wg-diversity-inclusion", - "focus_area": "focus-areas", - "name": "diversity-inclusion" - } - - wg_value = { - "repo": "chaoss/wg-value", - "focus_area": 'focus-areas', - "name": "value" - } - - wg_common = { - "repo": "chaoss/wg-common", - "focus_area": "focus-areas", - "name": "common" - } - - wg_risk = { - "repo": "chaoss/wg-risk", - "focus_area": "focus-areas", - "name": "risk" - } - - def __init__(self, githubapi): - self.__githubapi = githubapi - self.github = Github(self.__githubapi) - - # TODO: don't hardcode this - self.groups = { - "evolution": "Evolution", - "diversity-inclusion": "Diversity and Inclusion metrics", - "value": "Value", - "risk": "Risk", - "common": "Common", - "experimental": "Experimental", - "all": "All" - } - - self.implemented_metrics = [] - - self.evo_metrics = [] - self.di_metrics = [] - self.risk_metrics = [] - self.value_metrics = [] - self.common_metrics = [] - self.experimental_metrics = [] - - self.metrics_by_group = [] - - self.metrics_status = [] - - self.data_sources = [] - self.metric_types = [] - self.tags = {} - self.metadata = [] - - def create_metrics_status(self): - - self.build_implemented_metrics() - - self.evo_metrics = self.create_grouped_metrics( - self.wg_evolution, "evolution") - self.risk_metrics = self.create_grouped_metrics(self.wg_risk, "risk") - self.common_metrics = self.create_grouped_metrics( - self.wg_common, 'common') - self.di_metrics = self.create_grouped_metrics( - self.wg_diversity_inclusion, 'diversity-inclusion') - self.value_metrics = self.create_grouped_metrics( - self.wg_value, 'value') - - self.metrics_by_group = [self.evo_metrics, self.risk_metrics, - self.common_metrics, self.di_metrics, self.value_metrics] - - self.create_experimental_metrics() - self.metrics_by_group.append(self.experimental_metrics) - # - self.copy_implemented_metrics() - - self.find_defined_metrics() - - self.build_metrics_status() - - # self.build_metadata() - - def build_implemented_metrics(self): - frontend_status_extractor = FrontendStatusExtractor() - - r = requests.get( - url='http://{}:{}/api/unstable/batch/metadata'.format( - self.config['broker_host'],self.config['broker_port'])) - data = json.loads(r.text) - - for metric in data: - if "ID" in metric.keys(): - self.implemented_metrics.append( - ImplementedMetric(metric, frontend_status_extractor)) - - def create_grouped_metrics(self, group, group_name): - metrics = self.find_metrics_from_focus_area( - group['repo'], group['focus_area']) - - remote_metrics = [] - for metric in metrics: - remote_metrics.append(GroupedMetric(metric.display_name, group['name'], metric.tag, - metric.focus_area)) - - return remote_metrics - - def find_metrics_from_focus_area(self, repo_name, focus_area_path): - focus_areas = self.github.get_repo( - repo_name).get_dir_contents(focus_area_path) - metrics = [] - for area in focus_areas: - # get focus area name from filename - # focus_area_name = re.sub('.md','',re.sub('-', ' ',area.name)) - focus_area_name = None - focus_area_name_splited = [a.capitalize() for a in re.sub( - '.md', '', re.sub('[_]|[-]', ' ', area.name)).split()] - focus_area_name = ' '.join(focus_area_name_splited) - - # extract structure :focus_area_name/readme.md - if area.type == 'dir': - tmp = self.github.get_repo( - repo_name).get_dir_contents(area.path) - readme = [a for a in tmp if 'readme' in a.name.lower()] - if len(readme) == 0: - continue - else: - area = readme[0] - elif 'readme' in area.name.lower() or 'changelog' in area.name.lower(): - continue - - # decode content; github api return encoded content - decoded_content = base64.b64decode(area.content).decode('utf-8') - metric_name_tag = self.parse_table( - decoded_content) or self.parse_list(decoded_content) - - for name, tag in metric_name_tag.items(): - add_metric = Metric() - add_metric.display_name = name - add_metric.tag = tag - add_metric.focus_area = focus_area_name - - metrics.append(add_metric) - - if metric_name_tag is None: - continue - - return metrics - - def parse_table(self, md_content): - # group 0 is header, group 2 is |---|--|, and group 3 is table content - tables = re.findall( - r'^(\|?[^\n]+\|[^\n]+\|?\r?\n)((?:\|?\s*:?[-]+\s*:?)+\|?)(\n(?:\|?[^\n]+\|[^\n]+\|?\r?\n?)*)?$', md_content, - re.MULTILINE) - - if not tables: - return None - - box = [] - metrics_name_tag = {} - for table in tables: - # get metric name by 'metric_name' index in column - metric_index, length_in_row = self.get_metric_index_in_table_row( - table[0]) - table_content = [x.strip() - for x in table[2].replace('\n', '|').split('|')] - # remove two empty str - table_content.pop(0) - table_content.pop() - - raw_metrics = [table_content[a] for a in range( - metric_index, len(table_content), length_in_row)] - - for raw_metric in raw_metrics: - metric_name, metric_link = self.is_has_link( - raw_metric, md_content) - metric_name = re.sub('[\[]|[\]]', '', metric_name) - if not metric_link: - metric_link = re.sub(' ', '-', metric_name).lower() - metrics_name_tag[metric_name] = self.link_to_tag( - metric_name, str(metric_link)) - - return metrics_name_tag - - def get_metric_index_in_table_row(self, row): - header_names = [x.strip().lower() for x in row.split('|')] - # print(header_names) - index = None - if 'metric' in header_names: - index = header_names.index('metric') - elif 'name' in header_names: - index = header_names.index('name') - - return index, len(header_names) - - def parse_list(self, md_content): - matched_lists = re.findall(r'[-]\s+(.+)\n', md_content) - metric_names = {} - # print(matched_lists) - for matched in matched_lists: - # print(matched) - metirc_name = re.sub(r'.+:\s', '', matched) - metirc_name, metric_link = self.is_has_link( - metirc_name, md_content) - metirc_name = re.sub('[\[]|[\]]', '', metirc_name) - metric_names[metirc_name] = self.link_to_tag( - metirc_name, metric_link) - return metric_names - - def is_has_link(self, s, md_content): - # remove leading whitespace if exist - s = s.strip() - pattern_inline = re.compile(r'\[([^\[\]]+)\]\(([^)]+)') - match = pattern_inline.match(s) - - if match: - return match.group(1), match.group(2) - - pattern_ref = re.compile(r'\[([^\[\]]+)\]\[([^]]+)') - match2 = pattern_ref.match(s) - - if match2: - link = match2.group(2) - p = re.compile(r'\n\[' + link + r'\]:\s+(.+)\n') - res = p.search(md_content, re.DOTALL) - if res: - return match2.group(1), res.group(1) - else: - return s, None - - def link_to_tag(self, name, s): - - # generate tag if undefined metric - if not s: - return re.sub(' ', '-', name.lower()) - - pattern = re.compile(r'\/?([a-zA-Z_-]+)(\.md)?$') - m = pattern.search(s) - if m: - return re.sub('_', '-', re.sub('.md', '', m.group(1).lower())) - else: - return re.sub(' ', '-', re.sub('\(s\)', 's', name)) - - def create_experimental_metrics(self): - tags = [] - for group in self.metrics_by_group: - for metric in group: - tags.append(metric.tag) - - self.experimental_metrics = [ - metric for metric in self.implemented_metrics if metric.tag not in tags] - - def copy_implemented_metrics(self): - # takes implemented metrics and copies their data to the appropriate metric object - # I am so very sorry - # TODO: burn this into the ground - for group in enumerate(self.metrics_by_group): - if group[1] is not self.experimental_metrics: - for grouped_metric in group[1]: - defined_implemented_metrics = [ - metric for metric in self.implemented_metrics if grouped_metric.tag == metric.tag] - if defined_implemented_metrics != []: - for metric in defined_implemented_metrics: - metric.group = group[1][0].group - metric.focus_area = grouped_metric.focus_area - group[1].append(metric) - self.implemented_metrics.remove(metric) - grouped_metric.ID = 'n/a' - self.metrics_by_group[group[0]] = [ - metric for metric in group[1] if metric.ID != 'n/a'] - - def find_defined_metrics(self): - # return map {tag: html_url} - repo_names = [self.wg_common['repo'], self.wg_evolution['repo'], - self.wg_diversity_inclusion['repo'], self.wg_risk['repo'], self.wg_value['repo']] - - md_files = {} - - for repo_name in repo_names: - repo = self.github.get_repo(repo_name) - contents = repo.get_contents("") - - while len(contents) > 1: - file_content = contents.pop(0) - if file_content.type == "dir": - contents.extend(repo.get_contents(file_content.path)) - elif '.md' in file_content.name: - name = re.sub( - '_', '-', re.sub('.md', '', file_content.name)) - md_files[name.lower()] = file_content.html_url - - for group in self.metrics_by_group: - for metric in group: - if metric.tag in md_files.keys(): - metric.is_defined = True - metric.documentation_url = md_files[metric.tag] - - def build_metrics_status(self): - for group in self.metrics_by_group: - for metric in group: - self.metrics_status.append(metric.__dict__) - - def build_metadata(self): - self.get_metric_sources() - self.get_metric_types() - self.get_metric_tags() - - self.metadata = { - "remotes": { - "diversity_inclusion_urls": self.diversity_inclusion_urls, - "growth_maturity_decline_urls": self.growth_maturity_decline_urls, - "risk_urls": self.risk_urls, - "value_urls": self.value_urls, - "activity_repo_urls": self.activity_urls - }, - "groups": self.groups, - "data_sources": self.data_sources, - "metric_types": self.metric_types, - "tags": self.tags - } - - def get_metric_sources(self): - for data_source in [metric['data_source'] for metric in self.metrics_status]: - data_source = data_source.lower() - if data_source not in self.data_sources and data_source != "none": - self.data_sources.append(data_source) - self.data_sources.append("all") - - def get_metric_types(self): - for metric_type in [metric['metric_type'] for metric in self.metrics_status]: - metric_type = metric_type.lower() - if metric_type not in self.metric_types and metric_type != "none": - self.metric_types.append(metric_type) - self.metric_types.append("all") - - def get_metric_tags(self): - for tag in [(metric['tag'], metric['group']) for metric in self.metrics_status]: - # tag[0] = tag[0].lower() - if tag[0] not in [tag[0] for tag in self.tags] and tag[0] != "none": - self.tags[tag[0]] = tag[1] \ No newline at end of file diff --git a/workers/pull_request_worker/pull_request_worker/__init__.py b/workers/pull_request_worker/__init__.py similarity index 100% rename from workers/pull_request_worker/pull_request_worker/__init__.py rename to workers/pull_request_worker/__init__.py diff --git a/workers/pull_request_worker/pull_request_worker/worker.py b/workers/pull_request_worker/pull_request_worker.py similarity index 61% rename from workers/pull_request_worker/pull_request_worker/worker.py rename to workers/pull_request_worker/pull_request_worker.py --- a/workers/pull_request_worker/pull_request_worker/worker.py +++ b/workers/pull_request_worker/pull_request_worker.py @@ -1,225 +1,42 @@ import ast, json, logging, os, sys, time, traceback, requests from datetime import datetime from multiprocessing import Process, Queue -from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base -from workers.standard_methods import * from sqlalchemy.sql.expression import bindparam +from workers.worker_base import Worker -class GHPullRequestWorker: +class GitHubPullRequestWorker(Worker): """ Worker that collects Pull Request related data from the Github API and stores it in our database. :param task: most recent task the broker added to the worker's queue :param config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.working_on = None - self.config = config - LOG_FORMAT = '%(levelname)s:[%(name)s]: %(message)s' - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO, format=LOG_FORMAT) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self.db = None - self.table = None - self.API_KEY = self.config['key'] - self.tool_source = 'GitHub Pull Request Worker' - self.tool_version = '0.0.1' # See __init__.py - self.data_source = 'GitHub API' - self.results_counter = 0 - self.headers = {'Authorization': f'token {self.API_KEY}'} - self.history_id = None - self.finishing_task = True - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [['github_url']], - "models":['pull_requests', 'pull_request_commits', 'pull_request_files'] - } - ], - "config": [self.config] - } + def __init__(self, config={}): - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], - self.config['port'], self.config['database'] - ) + worker_type = "pull_request_worker" - #Database connections - logging.info("Making database connections...\n") - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) + # Define what this worker can be given and know how to interpret + given = [['github_url']] + models = ['pull_requests', 'pull_request_commits', 'pull_request_files'] - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['contributors', 'pull_requests', + # Define the tables needed to insert, update, or delete on + data_tables = ['contributors', 'pull_requests', 'pull_request_assignees', 'pull_request_events', 'pull_request_labels', 'pull_request_message_ref', 'pull_request_meta', 'pull_request_repo', 'pull_request_reviewers', 'pull_request_teams', 'message', 'pull_request_commits', - 'pull_request_files']) - - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) + 'pull_request_files'] + operations_tables = ['worker_history', 'worker_job'] - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.contributors_table = Base.classes.contributors.__table__ - self.pull_requests_table = Base.classes.pull_requests.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.pull_request_labels_table = Base.classes.pull_request_labels.__table__ - self.pull_request_message_ref_table = Base.classes.pull_request_message_ref.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_teams_table = Base.classes.pull_request_teams.__table__ - self.message_table = Base.classes.message.__table__ - self.pull_request_commits_table = Base.classes.pull_request_commits.__table__ - self.pull_request_files_table = Base.classes.pull_request_files.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - logging.info("Querying starting ids info...\n") - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - self.pr_id_inc = get_max_id(self, 'pull_requests', 'pull_request_id') - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - self.msg_id_inc = get_max_id(self, 'message', 'msg_id') - self.pr_msg_ref_id_inc = get_max_id(self, 'pull_request_message_ref', 'pr_msg_ref_id') - self.label_id_inc = get_max_id(self, 'pull_request_labels', 'pr_label_id') - self.event_id_inc = get_max_id(self, 'pull_request_events', 'pr_event_id') - self.reviewer_id_inc = get_max_id(self, 'pull_request_reviewers', 'pr_reviewer_map_id') - self.assignee_id_inc = get_max_id(self, 'pull_request_assignees', 'pr_assignee_map_id') - self.pr_meta_id_inc = get_max_id(self, 'pull_request_meta', 'pr_repo_meta_id') - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - # self.pull_requests_graphql({ - # 'job_type': 'MAINTAIN', - # 'models': ['pull_request_files'], - # 'display_name': 'pull_request_files model for url: https://github.com/zephyrproject-rtos/actions_sandbox.git', - # 'given': { - # 'github_url': 'https://github.com/zephyrproject-rtos/actions_sandbox.git' - # } - # }, 25201) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - github_url = value['given']['github_url'] - - repo_url_SQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(github_url)) - rs = pd.read_sql(repo_url_SQL, self.db, params={}) - - try: - repo_id = int(rs.iloc[0]['repo_id']) - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - if 'focused_task' in value: - if value['focused_task'] == 1: - self.finishing_task = True - - except Exception as e: - logging.error(f"error: {e}, or that repo is not in our database: {value}\n") - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query all repos with repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['github_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'pull_requests': - self.pull_requests_model(message, repo_id) - elif message['models'][0] == 'pull_request_commits': - self.pull_request_commits_model(message, repo_id) - elif message['models'][0] == 'pull_request_files': - self.pull_requests_graphql(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + # Define data collection info + self.tool_source = 'GitHub Pull Request Worker' + self.tool_version = '1.0.0' + self.data_source = 'GitHub API' + def graphql_paginate(self, query, data_subjects, before_parameters=None): """ Paginate a GitHub GraphQL query backwards @@ -227,7 +44,7 @@ def graphql_paginate(self, query, data_subjects, before_parameters=None): :rtype: A Pandas DataFrame, contains all data contained in the pages """ - logging.info(f'Start paginate with params: \n{data_subjects} ' + self.logger.info(f'Start paginate with params: \n{data_subjects} ' f'\n{before_parameters}') def all_items(dictionary): @@ -262,7 +79,7 @@ def find_root_of_subject(data, key_subject): for data_subject, nest in data_subjects.items(): - logging.info(f'Beginning paginate process for field {data_subject} ' + self.logger.info(f'Beginning paginate process for field {data_subject} ' f'for query: {query}') page_count = 0 @@ -274,13 +91,13 @@ def find_root_of_subject(data, key_subject): success = False for attempt in range(num_attempts): - logging.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint ' + self.logger.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint ' f'page number {page_count}\n') response = requests.post(base_url, json={'query': query.format( **before_parameters)}, headers=self.headers) - update_gh_rate_limit(self, response) + self.update_gh_rate_limit(response) try: data = response.json() @@ -288,9 +105,9 @@ def find_root_of_subject(data, key_subject): data = json.loads(json.dumps(response.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) + self.logger.info("Error!: {}".format(data['errors'])) if data['errors'][0]['type'] == 'RATE_LIMITED': - update_gh_rate_limit(self, response) + self.update_gh_rate_limit(response) num_attempts -= 1 continue @@ -302,18 +119,18 @@ def find_root_of_subject(data, key_subject): data = root['edges'] break else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url)) break if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': num_attempts -= 1 - update_gh_rate_limit(self, response, temporarily_disable=True) + self.update_gh_rate_limit(response, temporarily_disable=True) if data['message'] == 'Bad credentials': - update_gh_rate_limit(self, response, bad_credentials=True) + self.update_gh_rate_limit(response, bad_credentials=True) if not success: - logging.info('GraphQL query failed: {}'.format(query)) + self.logger.info('GraphQL query failed: {}'.format(query)) continue before_parameters.update({ @@ -323,7 +140,7 @@ def find_root_of_subject(data, key_subject): tuples += data - logging.info(f'Paged through {page_count} pages and ' + self.logger.info(f'Paged through {page_count} pages and ' f'collected {len(tuples)} data points\n') if not nest: @@ -333,9 +150,9 @@ def find_root_of_subject(data, key_subject): before_parameters=before_parameters) - def pull_requests_graphql(self, task_info, repo_id): + def pull_request_files_model(self, task_info, repo_id): - owner, repo = get_owner_repo(task_info['given']['github_url']) + owner, repo = self.get_owner_repo(task_info['given']['github_url']) # query existing PRs and the respective url we will append the commits url to pr_number_sql = s.sql.text(""" @@ -349,7 +166,7 @@ def pull_requests_graphql(self, task_info, repo_id): for index, pull_request in enumerate(pr_numbers.itertuples()): - logging.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}') + self.logger.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}') query = """ {{ @@ -394,26 +211,24 @@ def pull_requests_graphql(self, task_info, repo_id): WHERE pull_request_files.pull_request_id = pull_requests.pull_request_id AND repo_id = :repo_id """) - logging.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n') + self.logger.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n') table_values = pd.read_sql(table_values_sql, self.db, params={'repo_id': repo_id}) # Compare queried values against table values for dupes/updates if len(pr_file_rows) > 0: table_columns = pr_file_rows[0].keys() else: - logging.info(f'No rows need insertion for repo {repo_id}\n') - register_task_completion(self, task_info, repo_id, 'pull_request_files') + self.logger.info(f'No rows need insertion for repo {repo_id}\n') + self.register_task_completion(task_info, repo_id, 'pull_request_files') + return # Compare queried values against table values for dupes/updates pr_file_rows_df = pd.DataFrame(pr_file_rows) pr_file_rows_df = pr_file_rows_df.dropna(subset=['pull_request_id']) - pr_file_rows_df['need_update'] = 0 dupe_columns = ['pull_request_id', 'pr_file_path'] update_columns = ['pr_file_additions', 'pr_file_deletions'] - logging.info(f'{pr_file_rows_df}') - logging.info(f'{table_values}') need_insertion = pr_file_rows_df.merge(table_values, suffixes=('','_table'), how='outer', indicator=True, on=dupe_columns).loc[ lambda x : x['_merge']=='left_only'][table_columns] @@ -430,7 +245,7 @@ def pull_requests_graphql(self, task_info, repo_id): pr_file_insert_rows = need_insertion.to_dict('records') pr_file_update_rows = need_updates.to_dict('records') - logging.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and ' + self.logger.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and ' f'{len(need_updates)} updates.\n') if len(pr_file_update_rows) > 0: @@ -447,7 +262,7 @@ def pull_requests_graphql(self, task_info, repo_id): ) success = True except Exception as e: - logging.info('error: {}'.format(e)) + self.logger.info('error: {}'.format(e)) time.sleep(5) if len(pr_file_insert_rows) > 0: @@ -460,14 +275,22 @@ def pull_requests_graphql(self, task_info, repo_id): ) success = True except Exception as e: - logging.info('error: {}'.format(e)) + self.logger.info('error: {}'.format(e)) time.sleep(5) - register_task_completion(self, task_info, repo_id, 'pull_request_files') + self.register_task_completion(task_info, repo_id, 'pull_request_files') def pull_request_commits_model(self, task_info, repo_id): """ Queries the commits related to each pull request already inserted in the db """ + self.logger.info("Querying starting ids info...\n") + + # Increment so we are ready to insert the 'next one' of each of these most recent ids + self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 + self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id') + self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id') + + # query existing PRs and the respective url we will append the commits url to pr_url_sql = s.sql.text(""" SELECT DISTINCT pr_url, pull_requests.pull_request_id @@ -484,7 +307,7 @@ def pull_request_commits_model(self, task_info, repo_id): update_col_map = {} # Use helper paginate function to iterate the commits url and check for dupes - pr_commits = paginate(self, commits_url, duplicate_col_map, update_col_map, table, table_pkey, + pr_commits = self.paginate(commits_url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="where pull_request_id = {}".format(pull_request.pull_request_id)) for pr_commit in pr_commits: # post-pagination, iterate results @@ -500,9 +323,9 @@ def pull_request_commits_model(self, task_info, repo_id): 'data_source': 'GitHub API', } result = self.db.execute(self.pull_request_commits_table.insert().values(pr_commit_row)) - logging.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n") + self.logger.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n") - register_task_completion(self, task_info, repo_id, 'pull_request_commits') + self.register_task_completion(task_info, repo_id, 'pull_request_commits') def pull_requests_model(self, entry_info, repo_id): """Pull Request data collection function. Query GitHub API for PhubRs. @@ -510,11 +333,18 @@ def pull_requests_model(self, entry_info, repo_id): :param entry_info: A dictionary consisiting of 'git_url' and 'repo_id' :type entry_info: dict """ + + self.logger.info("Querying starting ids info...\n") + + # Increment so we are ready to insert the 'next one' of each of these most recent ids + self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 + self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id') + self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id') + github_url = entry_info['given']['github_url'] - logging.info('Beginning collection of Pull Requests...\n') - logging.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n') - record_model_process(self, repo_id, 'pull_requests') + self.logger.info('Beginning collection of Pull Requests...\n') + self.logger.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n') owner, repo = self.get_owner_repo(github_url) @@ -530,12 +360,12 @@ def pull_requests_model(self, entry_info, repo_id): duplicate_col_map = {'pr_src_id': 'id'} #list to hold pull requests needing insertion - prs = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, + prs = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey, where_clause='WHERE repo_id = {}'.format(repo_id), value_update_col_map={'pr_augur_contributor_id': float('nan')}) # Discover and remove duplicates before we start inserting - logging.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n") + self.logger.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n") for pr_dict in prs: @@ -553,7 +383,7 @@ def pull_requests_model(self, entry_info, repo_id): 'pr_src_state': pr_dict['state'], 'pr_src_locked': pr_dict['locked'], 'pr_src_title': pr_dict['title'], - 'pr_augur_contributor_id': find_id_from_login(self, pr_dict['user']['login']), + 'pr_augur_contributor_id': self.find_id_from_login(pr_dict['user']['login']), 'pr_body': pr_dict['body'], 'pr_created_at': pr_dict['created_at'], 'pr_updated_at': pr_dict['updated_at'], @@ -581,21 +411,21 @@ def pull_requests_model(self, entry_info, repo_id): } if pr_dict['flag'] == 'need_insertion': - logging.info(f'PR {pr_dict["id"]} needs to be inserted\n') + self.logger.info(f'PR {pr_dict["id"]} needs to be inserted\n') result = self.db.execute(self.pull_requests_table.insert().values(pr)) - logging.info(f"Added Pull Request: {result.inserted_primary_key}") + self.logger.info(f"Added Pull Request: {result.inserted_primary_key}") self.pr_id_inc = int(result.inserted_primary_key[0]) elif pr_dict['flag'] == 'need_update': result = self.db.execute(self.pull_requests_table.update().where( self.pull_requests_table.c.pr_src_id==pr_dict['id']).values(pr)) - logging.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format( + self.logger.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format( pr_dict['id'])) self.pr_id_inc = pr_dict['pkey'] else: - logging.info("PR does not need to be inserted. Fetching its id from DB") + self.logger.info("PR does not need to be inserted. Fetching its id from DB") pr_id_sql = s.sql.text(""" SELECT pull_request_id FROM pull_requests WHERE pr_src_id={} @@ -609,16 +439,16 @@ def pull_requests_model(self, entry_info, repo_id): self.query_reviewers(pr_dict['requested_reviewers'], self.pr_id_inc) self.query_pr_meta(pr_dict['head'], pr_dict['base'], self.pr_id_inc) - logging.info(f"Inserted PR data for {owner}/{repo}") + self.logger.info(f"Inserted PR data for {owner}/{repo}") self.results_counter += 1 - register_task_completion(self, entry_info, repo_id, 'pull_requests') + self.register_task_completion(entry_info, repo_id, 'pull_requests') def query_labels(self, labels, pr_id): - logging.info('Querying PR Labels\n') + self.logger.info('Querying PR Labels\n') if len(labels) == 0: - logging.info('No new labels to add\n') + self.logger.info('No new labels to add\n') return table = 'pull_request_labels' @@ -629,12 +459,12 @@ def query_labels(self, labels, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - pr_labels_table_values = get_table_values(self, cols_query, [table]) + pr_labels_table_values = self.get_table_values(cols_query, [table]) - new_labels = assign_tuple_action(self, labels, pr_labels_table_values, update_col_map, duplicate_col_map, + new_labels = self.assign_tuple_action(labels, pr_labels_table_values, update_col_map, duplicate_col_map, table_pkey) - logging.info(f'Found {len(new_labels)} labels\n') + self.logger.info(f'Found {len(new_labels)} labels\n') for label_dict in new_labels: @@ -653,14 +483,13 @@ def query_labels(self, labels, pr_id): if label_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_labels_table.insert().values(label)) - logging.info(f"Added PR Label: {result.inserted_primary_key}\n") - logging.info(f"Inserted PR Labels data for PR with id {pr_id}\n") + self.logger.info(f"Added PR Label: {result.inserted_primary_key}\n") + self.logger.info(f"Inserted PR Labels data for PR with id {pr_id}\n") self.results_counter += 1 - self.label_id_inc = int(result.inserted_primary_key[0]) def query_pr_events(self, owner, repo, gh_pr_no, pr_id): - logging.info('Querying PR Events\n') + self.logger.info('Querying PR Events\n') url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' + '/events?per_page=100&page={}') @@ -674,14 +503,14 @@ def query_pr_events(self, owner, repo, gh_pr_no, pr_id): duplicate_col_map = {'issue_event_src_id': 'id'} #list to hold contributors needing insertion or update - pr_events = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey) + pr_events = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n") + self.logger.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n") for pr_event_dict in pr_events: if pr_event_dict['actor']: - cntrb_id = find_id_from_login(self, pr_event_dict['actor']['login']) + cntrb_id = self.find_id_from_login(pr_event_dict['actor']['login']) else: cntrb_id = 1 @@ -700,18 +529,17 @@ def query_pr_events(self, owner, repo, gh_pr_no, pr_id): } result = self.db.execute(self.pull_request_events_table.insert().values(pr_event)) - logging.info(f"Added PR Event: {result.inserted_primary_key}\n") + self.logger.info(f"Added PR Event: {result.inserted_primary_key}\n") self.results_counter += 1 - self.event_id_inc = int(result.inserted_primary_key[0]) - logging.info(f"Inserted PR Events data for PR with id {pr_id}\n") + self.logger.info(f"Inserted PR Events data for PR with id {pr_id}\n") def query_reviewers(self, reviewers, pr_id): - logging.info('Querying Reviewers') + self.logger.info('Querying Reviewers') if reviewers is None or len(reviewers) == 0: - logging.info('No reviewers to add') + self.logger.info('No reviewers to add') return table = 'pull_request_reviewers' @@ -722,15 +550,15 @@ def query_reviewers(self, reviewers, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - reviewers_table_values = get_table_values(self, cols_query, [table]) + reviewers_table_values = self.get_table_values(cols_query, [table]) - new_reviewers = assign_tuple_action(self, reviewers, reviewers_table_values, update_col_map, duplicate_col_map, + new_reviewers = self.assign_tuple_action(reviewers, reviewers_table_values, update_col_map, duplicate_col_map, table_pkey) for reviewers_dict in new_reviewers: if 'login' in reviewers_dict: - cntrb_id = find_id_from_login(self, reviewers_dict['login']) + cntrb_id = self.find_id_from_login(reviewers_dict['login']) else: cntrb_id = 1 @@ -744,18 +572,17 @@ def query_reviewers(self, reviewers, pr_id): if reviewers_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_reviewers_table.insert().values(reviewer)) - logging.info(f"Added PR Reviewer {result.inserted_primary_key}") + self.logger.info(f"Added PR Reviewer {result.inserted_primary_key}") - self.reviewer_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 - logging.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}") + self.logger.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}") def query_assignee(self, assignees, pr_id): - logging.info('Querying Assignees') + self.logger.info('Querying Assignees') if assignees is None or len(assignees) == 0: - logging.info('No assignees to add') + self.logger.info('No assignees to add') return table = 'pull_request_assignees' @@ -766,15 +593,15 @@ def query_assignee(self, assignees, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - assignee_table_values = get_table_values(self, cols_query, [table]) + assignee_table_values = self.get_table_values(cols_query, [table]) - assignees = assign_tuple_action(self, assignees, assignee_table_values, update_col_map, duplicate_col_map, + assignees = self.assign_tuple_action(assignees, assignee_table_values, update_col_map, duplicate_col_map, table_pkey) for assignee_dict in assignees: if 'login' in assignee_dict: - cntrb_id = find_id_from_login(self, assignee_dict['login']) + cntrb_id = self.find_id_from_login(assignee_dict['login']) else: cntrb_id = 1 @@ -788,15 +615,14 @@ def query_assignee(self, assignees, pr_id): if assignee_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_assignees_table.insert().values(assignee)) - logging.info(f'Added PR Assignee {result.inserted_primary_key}') + self.logger.info(f'Added PR Assignee {result.inserted_primary_key}') - self.assignee_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 - logging.info(f'Finished inserting PR Assignee data for PR with id {pr_id}') + self.logger.info(f'Finished inserting PR Assignee data for PR with id {pr_id}') def query_pr_meta(self, head, base, pr_id): - logging.info('Querying PR Meta') + self.logger.info('Querying PR Meta') table = 'pull_request_meta' duplicate_col_map = {'pr_sha': 'sha'} @@ -808,12 +634,12 @@ def query_pr_meta(self, head, base, pr_id): update_keys += list(value_update_col_map.keys()) cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - meta_table_values = get_table_values(self, cols_query, [table]) + meta_table_values = self.get_table_values(cols_query, [table]) pr_meta_dict = { - 'head': assign_tuple_action(self, [head], meta_table_values, update_col_map, duplicate_col_map, + 'head': self.assign_tuple_action([head], meta_table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map=value_update_col_map)[0], - 'base': assign_tuple_action(self, [base], meta_table_values, update_col_map, duplicate_col_map, + 'base': self.assign_tuple_action([base], meta_table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map=value_update_col_map)[0] } @@ -824,7 +650,7 @@ def query_pr_meta(self, head, base, pr_id): 'pr_src_meta_label': pr_meta_data['label'], 'pr_src_meta_ref': pr_meta_data['ref'], 'pr_sha': pr_meta_data['sha'], - 'cntrb_id': find_id_from_login(self, pr_meta_data['user']['login']) if pr_meta_data['user'] \ + 'cntrb_id': self.find_id_from_login(pr_meta_data['user']['login']) if pr_meta_data['user'] \ and 'login' in pr_meta_data['user'] else None, 'tool_source': self.tool_source, 'tool_version': self.tool_version, @@ -836,13 +662,12 @@ def query_pr_meta(self, head, base, pr_id): self.pull_request_meta_table.c.pr_sha==pr_meta['pr_sha'] and self.pull_request_meta_table.c.pr_head_or_base==pr_side ).values(pr_meta)) - logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( - issue_dict['id'])) - self.issue_id_inc = issue_dict['pkey'] + # self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format(issue_dict['id'])) + self.pr_meta_id_inc = pr_meta_data['pkey'] elif pr_meta_data['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_meta_table.insert().values(pr_meta)) - logging.info(f'Added PR Head {result.inserted_primary_key}') + self.logger.info(f'Added PR Head {result.inserted_primary_key}') self.pr_meta_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 @@ -857,12 +682,12 @@ def query_pr_meta(self, head, base, pr_id): if pr_meta_data['repo']: self.query_pr_repo(pr_meta_data['repo'], pr_side, self.pr_meta_id_inc) else: - logging.info('No new PR Head data to add') + self.logger.info('No new PR Head data to add') - logging.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}') + self.logger.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}') def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): - logging.info('Querying PR Comments') + self.logger.info('Querying PR Comments') url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' + '/comments?per_page=100&page={}') @@ -876,14 +701,14 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): duplicate_col_map = {'pr_message_ref_src_comment_id': 'id'} #list to hold contributors needing insertion or update - pr_messages = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey) + pr_messages = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n") + self.logger.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n") for pr_msg_dict in pr_messages: if pr_msg_dict['user'] and 'login' in pr_msg_dict['user']: - cntrb_id = find_id_from_login(self, pr_msg_dict['user']['login']) + cntrb_id = self.find_id_from_login(pr_msg_dict['user']['login']) else: cntrb_id = 1 @@ -901,12 +726,11 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): } result = self.db.execute(self.message_table.insert().values(msg)) - logging.info(f'Added PR Comment {result.inserted_primary_key}') - self.msg_id_inc = int(result.inserted_primary_key[0]) + self.logger.info(f'Added PR Comment {result.inserted_primary_key}') pr_msg_ref = { 'pull_request_id': pr_id, - 'msg_id': self.msg_id_inc, + 'msg_id': int(result.inserted_primary_key[0]), 'pr_message_ref_src_comment_id': pr_msg_dict['id'], 'pr_message_ref_src_node_id': pr_msg_dict['node_id'], 'tool_source': self.tool_source, @@ -917,15 +741,14 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): result = self.db.execute( self.pull_request_message_ref_table.insert().values(pr_msg_ref) ) - logging.info(f'Added PR Message Ref {result.inserted_primary_key}') - self.pr_msg_ref_id_inc = int(result.inserted_primary_key[0]) + self.logger.info(f'Added PR Message Ref {result.inserted_primary_key}') self.results_counter += 1 - logging.info(f'Finished adding PR Message data for PR with id {pr_id}') + self.logger.info(f'Finished adding PR Message data for PR with id {pr_id}') def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): - logging.info(f'Querying PR {pr_repo_type} repo') + self.logger.info(f'Querying PR {pr_repo_type} repo') table = 'pull_request_repo' duplicate_col_map = {'pr_src_repo_id': 'id'} @@ -935,13 +758,13 @@ def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - pr_repo_table_values = get_table_values(self, cols_query, [table]) + pr_repo_table_values = self.get_table_values(cols_query, [table]) - new_pr_repo = assign_tuple_action(self, [pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map, + new_pr_repo = self.assign_tuple_action([pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map, table_pkey)[0] if new_pr_repo['owner'] and 'login' in new_pr_repo['owner']: - cntrb_id = find_id_from_login(self, new_pr_repo['owner']['login']) + cntrb_id = self.find_id_from_login(new_pr_repo['owner']['login']) else: cntrb_id = 1 @@ -962,20 +785,8 @@ def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): if new_pr_repo['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_repo_table.insert().values(pr_repo)) - logging.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}') + self.logger.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}') self.results_counter += 1 - logging.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}') - - def get_owner_repo(self, github_url): - split = github_url.split('/') - - owner = split[-2] - repo = split[-1] - - if '.git' in repo: - repo = repo[:-4] - - return owner, repo - + self.logger.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}') diff --git a/workers/pull_request_worker/pull_request_worker/runtime.py b/workers/pull_request_worker/pull_request_worker/runtime.py deleted file mode 100644 --- a/workers/pull_request_worker/pull_request_worker/runtime.py +++ /dev/null @@ -1,109 +0,0 @@ -import json, logging, os, click -import requests -from flask import Flask, Response, jsonify, request -from pull_request_worker.worker import GHPullRequestWorker -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': # POST a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.gh_pr_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.gh_pr_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51239, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'pull_request_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - print("New pull request worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.pull_request_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - - app.gh_pr_worker = GHPullRequestWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.gh_pr_worker._child is not None: - app.gh_pr_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/pull_request_worker/runtime.py b/workers/pull_request_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/pull_request_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.pull_request_worker.pull_request_worker import GitHubPullRequestWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitHubPullRequestWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/pull_request_worker/setup.py b/workers/pull_request_worker/setup.py --- a/workers/pull_request_worker/setup.py +++ b/workers/pull_request_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="pull_request_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'pull_request_worker_start=pull_request_worker.runtime:main', + 'pull_request_worker_start=workers.pull_request_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/repo_info_worker/repo_info_worker/__init__.py b/workers/release_worker/__init__.py similarity index 50% rename from workers/repo_info_worker/repo_info_worker/__init__.py rename to workers/release_worker/__init__.py --- a/workers/repo_info_worker/repo_info_worker/__init__.py +++ b/workers/release_worker/__init__.py @@ -1,4 +1,4 @@ -"""gh_repo_info_worker - Augur Worker that collects GitHub Repo Info data""" +"""gh_release_worker - Augur Worker that collects GitHub Repo Info data""" __version__ = '0.0.0' __author__ = 'Augur Team <[email protected]>' diff --git a/workers/release_worker/release_worker.py b/workers/release_worker/release_worker.py new file mode 100644 --- /dev/null +++ b/workers/release_worker/release_worker.py @@ -0,0 +1,154 @@ +import logging, os, sys, time, requests, json +from datetime import datetime +from multiprocessing import Process, Queue +from urllib.parse import urlparse +import pandas as pd +import sqlalchemy as s +from sqlalchemy import MetaData +from sqlalchemy.ext.automap import automap_base +from workers.worker_base import Worker + +#TODO - fully edit to match releases +class ReleaseWorker(Worker): + def __init__(self, config={}): + + worker_type = "release_worker" + + # Define what this worker can be given and know how to interpret + given = [['github_url']] + models = ['releases'] + + # Define the tables needed to insert, update, or delete on + data_tables = ['releases'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Define data collection info + self.tool_source = 'Release Worker' + self.tool_version = '1.0.0' + self.data_source = 'GitHub API' + + def releases_model(self, task, repo_id): + + github_url = task['given']['github_url'] + + self.logger.info("Beginning filling the releases model for repo: " + github_url + "\n") + + owner, repo = self.get_owner_repo(github_url) + + url = 'https://api.github.com/graphql' + + query = """ + { + repository(owner:"%s", name:"%s"){ + id + releases(orderBy: {field: CREATED_AT, direction: ASC}, last: %d) { + edges { + node { + name + publishedAt + createdAt + description + id + isDraft + isPrerelease + tagName + url + updatedAt + author { + name + company + } + } + } + } + } + } + """ % (owner, repo, 10) + + # Hit the graphql endpoint and retry 3 times in case of failure + num_attempts = 0 + success = False + while num_attempts < 3: + self.logger.info("Hitting endpoint: {} ...\n".format(url)) + r = requests.post(url, json={'query': query}, headers=self.headers) + self.update_gh_rate_limit(r) + + try: + data = r.json() + except: + data = json.loads(json.dumps(r.text)) + + if 'errors' in data: + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': + self.update_gh_rate_limit(r) + continue + + if 'data' in data: + success = True + data = data['data']['repository'] + break + else: + self.logger.info("Request returned a non-data dict: {}\n".format(data)) + if data['message'] == 'Not Found': + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + break + if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': + self.update_gh_rate_limit(r, temporarily_disable=True) + continue + if data['message'] == 'Bad credentials': + self.update_gh_rate_limit(r, bad_credentials=True) + continue + num_attempts += 1 + if not success: + self.register_task_failure(task, repo_id, "Failed to hit endpoint: {}".format(url)) + return + + if 'repository' in data: + if 'releases' in data['repository']: + if 'edges' in data['repository']['releases']: + for n in data['repository']['releases']['edges']: + if 'node' in n: + release = n['node'] + insert_release(self, repo_id, owner, release) + self.logger.info("There's no release to insert. Current node is not available in releases: {}\n".format(n)) + self.logger.info("There are no releases to insert for current repository: {}\n".format(data)) + self.logger.info("Graphql response does not contain releases: {}\n".format(data)) + self.logger.info("Graphql response does not contain repository: {}\n".format(data)) + + def insert_release(self, repo_id, owner, release): + author = release['author']['name']+'_'+release['author']['company'] + # Put all data together in format of the table + self.logger.info(f'Inserting release for repo with id:{repo_id}, owner:{owner}, release name:{release["name"]}\n') + release_inf = { + 'release_id': release['id'], + 'repo_id': repo_id, + 'release_name': release['name'], + 'release_description': release['description'], + 'release_author': release['author'], + 'release_created_at': release['createdAt'], + 'release_published_at': release['publishedAt'], + 'release_updated_at': release['updatedAt'], + 'release_is_draft': release['isDraft'], + 'release_is_prerelease': release['isPrerelease'], + 'release_tag_name': release['tagName'], + 'release_url': release['url'], + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source + } + + result = self.db.execute(self.releases_table.insert().values(release_inf)) + self.logger.info(f"Primary Key inserted into releases table: {result.inserted_primary_key}\n") + self.results_counter += 1 + + self.logger.info(f"Inserted info for {owner}/{repo}/{release['name']}\n") + + #Register this task as completed + self.register_task_completion(task, release_id, "releases") + return + + diff --git a/workers/release_worker/runtime.py b/workers/release_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/release_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.release_worker.release_worker import ReleaseWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ReleaseWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/metric_status_worker/setup.py b/workers/release_worker/setup.py similarity index 83% rename from workers/metric_status_worker/setup.py rename to workers/release_worker/setup.py --- a/workers/metric_status_worker/setup.py +++ b/workers/release_worker/setup.py @@ -5,22 +5,20 @@ from setuptools import find_packages from setuptools import setup - def read(filename): filename = os.path.join(os.path.dirname(__file__), filename) text_type = type(u"") with io.open(filename, mode="r", encoding='utf-8') as fd: return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) - setup( - name="metric_status_worker", - version="0.1.0", + name="release_worker", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", author_email="[email protected]", - description="Augur Worker that collects GitHub data", + description="Augur Worker that collects data about GitHub releases", packages=find_packages(exclude=('tests',)), install_requires=[ 'flask', @@ -30,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'metric_status_worker_start=metric_status_worker.runtime:main', + 'release_worker_start=workers.release_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/repo_info_worker/__init__.py b/workers/repo_info_worker/__init__.py new file mode 100644 diff --git a/workers/repo_info_worker/repo_info_worker/worker.py b/workers/repo_info_worker/repo_info_worker.py similarity index 58% rename from workers/repo_info_worker/repo_info_worker/worker.py rename to workers/repo_info_worker/repo_info_worker.py --- a/workers/repo_info_worker/repo_info_worker/worker.py +++ b/workers/repo_info_worker/repo_info_worker.py @@ -1,37 +1,44 @@ import logging, os, sys, time, requests, json from datetime import datetime from multiprocessing import Process, Queue -from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base from workers.worker_base import Worker +# NOTE: This worker primarily inserts rows into the REPO_INFO table, which serves the primary purposes of +# 1. Displaying discrete metadata like "number of forks" and how they change over time +# 2. Validating other workers, like those related to pull requests, issues, and commits. Our totals should be at or very near the totals in the repo_info table. + +# This table also updates the REPO table in 2 cases: +# 1. Recognizing when a repository is a forked repository by updating the "forked_from" field and +# 2. Recognizing when a repository is archived, and recording the data we observed the change in status. + class RepoInfoWorker(Worker): - def __init__(self, config): + def __init__(self, config={}): + + worker_type = "repo_info_worker" # Define what this worker can be given and know how to interpret given = [['github_url']] models = ['repo_info'] # Define the tables needed to insert, update, or delete on - data_tables = ['repo_info'] + data_tables = ['repo_info', 'repo'] operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) # Define data collection info self.tool_source = 'Repo Info Worker' - self.tool_version = '0.0.1' + self.tool_version = '1.0.0' self.data_source = 'GitHub API' def repo_info_model(self, task, repo_id): github_url = task['given']['github_url'] - logging.info("Beginning filling the repo_info model for repo: " + github_url + "\n") + self.logger.info("Beginning filling the repo_info model for repo: " + github_url + "\n") owner, repo = self.get_owner_repo(github_url) @@ -100,7 +107,7 @@ def repo_info_model(self, task, repo_id): num_attempts = 0 success = False while num_attempts < 3: - logging.info("Hitting endpoint: {} ...\n".format(url)) + self.logger.info("Hitting endpoint: {} ...\n".format(url)) r = requests.post(url, json={'query': query}, headers=self.headers) self.update_gh_rate_limit(r) @@ -110,8 +117,8 @@ def repo_info_model(self, task, repo_id): data = json.loads(json.dumps(r.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) - if data['errors']['message'] == 'API rate limit exceeded': + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': self.update_gh_rate_limit(r) continue @@ -120,9 +127,9 @@ def repo_info_model(self, task, repo_id): data = data['data']['repository'] break else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) break if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': self.update_gh_rate_limit(r, temporarily_disable=True) @@ -132,14 +139,14 @@ def repo_info_model(self, task, repo_id): continue num_attempts += 1 if not success: - self.register_task_failure(task, repo_id, "Failed to hit endpoint: {}".format(url)) + self.register_task_failure(self.task, repo_id, "Failed to hit endpoint: {}".format(url)) return # Get committers count info that requires seperate endpoint committers_count = self.query_committers_count(owner, repo) # Put all data together in format of the table - logging.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n') + self.logger.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n') rep_inf = { 'repo_id': repo_id, 'last_updated': data['updatedAt'] if 'updatedAt' in data else None, @@ -177,16 +184,34 @@ def repo_info_model(self, task, repo_id): } result = self.db.execute(self.repo_info_table.insert().values(rep_inf)) - logging.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n") + self.logger.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n") self.results_counter += 1 - logging.info(f"Inserted info for {owner}/{repo}\n") + # Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table. + forked = self.is_forked(owner, repo) + archived = self.is_archived(owner, repo) + archived_date_collected = None + if archived is not False: + archived_date_collected = archived + archived = 1 + else: + archived = 0 + + rep_additional_data = { + 'forked_from': forked, + 'repo_archived': archived, + 'repo_archived_date_collected': archived_date_collected + } + result = self.db.execute(self.repo_table.update().where(repo_table.c.repo_id==repo_id).values(rep_additional_data)) + self.logger.info(f"Primary Key inserted into repo table: {result.inserted_primary_key}\n") + + self.logger.info(f"Inserted info for {owner}/{repo}\n") - #Register this task as completed - self.register_task_completion(task, repo_id, "repo_info") + # Register this task as completed + self.register_task_completion(self.task, repo_id, "repo_info") def query_committers_count(self, owner, repo): - logging.info('Querying committers count\n') + self.logger.info('Querying committers count\n') url = f'https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100' committers = 0 @@ -201,7 +226,67 @@ def query_committers_count(self, owner, repo): else: url = r.links['next']['url'] except Exception: - logging.exception('An error occured while querying contributor count\n') + self.logger.exception('An error occured while querying contributor count\n') return committers + def is_forked(self, owner, repo): #/repos/:owner/:repo parent + self.logger.info('Querying parent info to verify if the repo is forked\n') + url = f'https://api.github.com/repos/{owner}/{repo}' + + r = requests.get(url, headers=self.headers) + self.update_gh_rate_limit(r) + + data = self.get_repo_data(url, r) + + if 'fork' in data: + if 'parent' in data: + return data['parent']['full_name'] + return 'Parent not available' + + return False + + def is_archived(self, owner, repo): + self.logger.info('Querying committers count\n') + url = f'https://api.github.com/repos/{owner}/{repo}' + + r = requests.get(url, headers=self.headers) + self.update_gh_rate_limit(r) + + data = self.get_repo_data(url, r) + + if 'archived' in data: + if data['archived']: + if 'updated_at' in data: + return data['updated_at'] + return 'Date not available' + return False + + return False + + def get_repo_data(self, url, response): + success = False + try: + data = response.json() + except: + data = json.loads(json.dumps(response.text)) + + if 'errors' in data: + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': + self.update_gh_rate_limit(response) + + if 'id' in data: + success = True + else: + self.logger.info("Request returned a non-data dict: {}\n".format(data)) + if data['message'] == 'Not Found': + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': + self.update_gh_rate_limit(r, temporarily_disable=True) + if data['message'] == 'Bad credentials': + self.update_gh_rate_limit(r, bad_credentials=True) + if not success: + self.register_task_failure(self.task, repo_id, "Failed to hit endpoint: {}".format(url)) + + return data diff --git a/workers/repo_info_worker/repo_info_worker/runtime.py b/workers/repo_info_worker/repo_info_worker/runtime.py deleted file mode 100644 --- a/workers/repo_info_worker/repo_info_worker/runtime.py +++ /dev/null @@ -1,55 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from repo_info_worker.worker import RepoInfoWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'repo_info_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.repo_info_worker.{}".format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - 'gh_api_key': read_config('Database', 'key', 'AUGUR_GITHUB_API_KEY', 'key') - } - - #create instance of the worker - app.gh_repo_info_worker = RepoInfoWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - if app.gh_repo_info_worker._child is not None: - app.gh_repo_info_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/repo_info_worker/runtime.py b/workers/repo_info_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/repo_info_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.repo_info_worker.repo_info_worker import RepoInfoWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = RepoInfoWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/repo_info_worker/setup.py b/workers/repo_info_worker/setup.py --- a/workers/repo_info_worker/setup.py +++ b/workers/repo_info_worker/setup.py @@ -13,22 +13,21 @@ def read(filename): setup( name="repo_info_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", author_email="[email protected]", description="Augur Worker that collects general data about a repo on GitHub", - packages=find_packages(exclude=('tests',)), + packages=find_packages(), install_requires=[ 'flask', 'requests', - 'psycopg2-binary', - 'click' + 'psycopg2-binary' ], entry_points={ 'console_scripts': [ - 'repo_info_worker_start=repo_info_worker.runtime:main', + 'repo_info_worker_start=workers.repo_info_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/standard_methods.py b/workers/standard_methods.py deleted file mode 100644 --- a/workers/standard_methods.py +++ /dev/null @@ -1,712 +0,0 @@ -""" Helper methods constant across all workers """ -import requests, datetime, time, traceback, json, os, sys, math -import sqlalchemy as s -import pandas as pd -import os -import sys, logging -from urllib.parse import urlparse - -def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}): - """ map objects => { *our db col* : *gh json key*} """ - need_insertion_count = 0 - need_update_count = 0 - for i, obj in enumerate(new_data): - if type(obj) != dict: - logging.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) - continue - - obj['flag'] = 'none' # default of no action needed - existing_tuple = None - for db_dupe_key in list(duplicate_col_map.keys()): - - if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any(): - if table_values[table_values[db_dupe_key].isin( - [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records'): - existing_tuple = table_values[table_values[db_dupe_key].isin( - [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0] - continue - - logging.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) - obj['flag'] = 'need_insertion' - need_insertion_count += 1 - break - - if obj['flag'] == 'need_insertion': - logging.info('Already determined that current tuple needs insertion, skipping checking updates. ' - 'Moving to next tuple.\n') - continue - - # If we need to check the values of the existing tuple to determine if an update is needed - for augur_col, value_check in value_update_col_map.items(): - not_nan_check = not (pd.isna(value_check) and pd.isna(existing_tuple[augur_col])) if value_check is not None else True - if existing_tuple[augur_col] != value_check and not_nan_check: - continue - logging.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) - obj['flag'] = 'need_update' - obj['pkey'] = existing_tuple[table_pkey] - need_update_count += 1 - - if obj['flag'] == 'need_update': - logging.info('Already determined that current tuple needs update, skipping checking further updates. ' - 'Moving to next tuple.\n') - continue - - # Now check the existing tuple's values against the response values to determine if an update is needed - for col in update_col_map.keys(): - if update_col_map[col] not in obj: - continue - if obj[update_col_map[col]] == existing_tuple[col]: - continue - logging.info("Found a tuple that needs an update for column: {}\n".format(col)) - obj['flag'] = 'need_update' - obj['pkey'] = existing_tuple[table_pkey] - need_update_count += 1 - - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + - "was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count)) - return new_data - -def check_duplicates(new_data, table_values, key): - need_insertion = [] - for obj in new_data: - if type(obj) == dict: - if not table_values.isin([obj[key]]).any().any(): - need_insertion.append(obj) - # else: - # logging.info("Tuple with github's {} key value already".format(key) + - # "exists in our db: {}\n".format(str(obj[key]))) - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + - "was reduced to {} tuples.\n".format(str(len(need_insertion)))) - return need_insertion - -def connect_to_broker(self): - connected = False - for i in range(5): - try: - logging.info("attempt {}\n".format(i)) - if i > 0: - time.sleep(10) - requests.post('http://{}:{}/api/unstable/workers'.format( - self.config['broker_host'],self.config['broker_port']), json=self.specs) - logging.info("Connection to the broker was successful\n") - connected = True - break - except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker. Trying again...\n') - if not connected: - sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n') - -def dump_queue(queue): - """ - Empties all pending items in a queue and returns them in a list. - """ - result = [] - queue.put("STOP") - for i in iter(queue.get, 'STOP'): - result.append(i) - # time.sleep(.1) - return result - -def find_id_from_login(self, login): - idSQL = s.sql.text(""" - SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' - """.format(login)) - rs = pd.read_sql(idSQL, self.db, params={}) - data_list = [list(row) for row in rs.itertuples(index=False)] - try: - return data_list[0][0] - except: - logging.info("contributor needs to be added...") - - cntrb_url = ("https://api.github.com/users/" + login) - logging.info("Hitting endpoint: {} ...\n".format(cntrb_url)) - r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) - contributor = r.json() - - company = None - location = None - email = None - if 'company' in contributor: - company = contributor['company'] - if 'location' in contributor: - location = contributor['location'] - if 'email' in contributor: - email = contributor['email'] - - cntrb = { - "cntrb_login": contributor['login'] if 'login' in contributor else None, - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, - "cntrb_canonical": None, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } - result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) - self.results_counter += 1 - self.cntrb_id_inc = int(result.inserted_primary_key[0]) - - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - return find_id_from_login(self, login) - -def get_owner_repo(github_url): - split = github_url.split('/') - - owner = split[-2] - repo = split[-1] - - if '.git' in repo: - repo = repo[:-4] - - return owner, repo - -def get_max_id(self, table, column, default=25150, operations_table=False): - maxIdSQL = s.sql.text(""" - SELECT max({0}.{1}) AS {1} - FROM {0} - """.format(table, column)) - db = self.db if not operations_table else self.helper_db - rs = pd.read_sql(maxIdSQL, db, params={}) - if rs.iloc[0][column] is not None: - max_id = int(rs.iloc[0][column]) + 1 - logging.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) - else: - max_id = default - logging.info("Could not find max id for {} column in the {} table... using default set to: \ - {}\n".format(column, table, max_id)) - return max_id - -def get_table_values(self, cols, tables, where_clause=""): - table_str = tables[0] - del tables[0] - - col_str = cols[0] - del cols[0] - - for table in tables: - table_str += ", " + table - for col in cols: - col_str += ", " + col - - tableValuesSQL = s.sql.text(""" - SELECT {} FROM {} {} - """.format(col_str, table_str, where_clause)) - logging.info("Getting table values with the following PSQL query: \n{}\n".format(tableValuesSQL)) - values = pd.read_sql(tableValuesSQL, self.db, params={}) - return values - -def init_oauths(self): - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauthSQL = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(self.config['key'])) - for oauth in [{'oauth_id': 0, 'access_token': self.config['key']}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}\n".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - -def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}): - # Paginate backwards through all the tuples but get first page in order - # to determine if there are multiple pages and if the 1st page covers all - update_keys = list(update_col_map.keys()) if update_col_map else [] - update_keys += list(value_update_col_map.keys()) if value_update_col_map else [] - cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - table_values = get_table_values(self, cols_query, [table], where_clause) - - i = 1 - multiple_pages = False - tuples = [] - while True: - num_attempts = 0 - success = False - while num_attempts < 3: - logging.info("Hitting endpoint: " + url.format(i) + " ...\n") - r = requests.get(url=url.format(i), headers=self.headers) - update_gh_rate_limit(self, r) - logging.info("Analyzing page {} of {}\n".format(i, int(r.links['last']['url'][-6:].split('=')[1]) + 1 if 'last' in r.links else '*last page not known*')) - - try: - j = r.json() - except: - j = json.loads(json.dumps(r.text)) - - if type(j) != dict and type(j) != str: - success = True - break - elif type(j) == dict: - logging.info("Request returned a dict: {}\n".format(j)) - if j['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) - break - if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': - num_attempts -= 1 - update_gh_rate_limit(self, r, temporarily_disable=True) - if j['message'] == 'Bad credentials': - update_gh_rate_limit(self, r, bad_credentials=True) - elif type(j) == str: - logging.info("J was string: {}\n".format(j)) - if '<!DOCTYPE html>' in j: - logging.info("HTML was returned, trying again...\n") - elif len(j) == 0: - logging.info("Empty string, trying again...\n") - else: - try: - j = json.loads(j) - success = True - break - except: - pass - num_attempts += 1 - if not success: - break - - # Find last page so we can decrement from there - if 'last' in r.links and not multiple_pages and not self.finishing_task: - param = r.links['last']['url'][-6:] - i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") - multiple_pages = True - elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") - elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ..." - " excess rate limit requests will be made\n") - - if len(j) == 0: - logging.info("Response was empty, breaking from pagination.\n") - break - - # Checking contents of requests with what we already have in the db - j = assign_tuple_action(self, j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map) - if not j: - logging.info("Assigning tuple action failed, moving to next page.\n") - i = i + 1 if self.finishing_task else i - 1 - continue - try: - to_add = [obj for obj in j if obj not in tuples and obj['flag'] != 'none'] - except Exception as e: - logging.info("Failure accessing data of page: {}. Moving to next page.\n".format(e)) - i = i + 1 if self.finishing_task else i - 1 - continue - if len(to_add) == 0 and multiple_pages and 'last' in r.links: - logging.info("{}".format(r.links['last'])) - if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown tuples, breaking from pagination.\n") - break - tuples += to_add - - i = i + 1 if self.finishing_task else i - 1 - - # Since we already wouldve checked the first page... break - if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") - break - - return tuples - -def query_github_contributors(self, entry_info, repo_id): - - """ Data collection function - Query the GitHub API for contributors - """ - logging.info("Querying contributors with given entry info: " + str(entry_info) + "\n") - - github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url'] - - # Extract owner/repo from the url for the endpoint - path = urlparse(github_url) - split = path[2].split('/') - - owner = split[1] - name = split[2] - - # Handles git url case by removing the extension - if ".git" in name: - name = name[:-4] - - # Set the base of the url and place to hold contributors to insert - contributors_url = ("https://api.github.com/repos/" + owner + "/" + - name + "/contributors?per_page=100&page={}") - - # Get contributors that we already have stored - # Set our duplicate and update column map keys (something other than PK) to - # check dupicates/needed column updates with - table = 'contributors' - table_pkey = 'cntrb_id' - update_col_map = {'cntrb_email': 'email'} - duplicate_col_map = {'cntrb_login': 'login'} - - #list to hold contributors needing insertion or update - contributors = paginate(self, contributors_url, duplicate_col_map, update_col_map, table, table_pkey) - - logging.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") - - for repo_contributor in contributors: - try: - # Need to hit this single contributor endpoint to get extra data including... - # `created at` - # i think that's it - cntrb_url = ("https://api.github.com/users/" + repo_contributor['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") - r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) - contributor = r.json() - - company = None - location = None - email = None - if 'company' in contributor: - company = contributor['company'] - if 'location' in contributor: - location = contributor['location'] - if 'email' in contributor: - email = contributor['email'] - canonical_email = contributor['email'] - - cntrb = { - "cntrb_login": contributor['login'], - "cntrb_created_at": contributor['created_at'], - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - # "cntrb_type": , dont have a use for this as of now ... let it default to null - "cntrb_canonical": canonical_email, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } - - # Commit insertion to table - if repo_contributor['flag'] == 'need_update': - result = self.db.execute(self.contributors_table.update().where( - self.history_table.c.cntrb_email==email).values(cntrb)) - logging.info("Updated tuple in the contributors table with existing email: {}".format(email)) - self.cntrb_id_inc = repo_contributor['pkey'] - elif repo_contributor['flag'] == 'need_insertion': - result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) - self.results_counter += 1 - - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - # Increment our global track of the cntrb id for the possibility of it being used as a FK - self.cntrb_id_inc = int(result.inserted_primary_key[0]) - - except Exception as e: - logging.info("Caught exception: {}".format(e)) - logging.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) - continue - -def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): - """ - Read a variable in specified section of the config file, unless provided an environment variable - - :param section: location of given variable - :param name: name of variable - """ - config_file_path = os.getenv("AUGUR_CONFIG_FILE", config_file_path) - _config_file_name = 'augur.config.json' - _config_bad = False - _already_exported = {} - _runtime_location = 'runtime/' - _default_config = {} - _config_file = None - - try: - _config_file = open(config_file_path, 'r+') - except: - print('Couldn\'t open {}'.format(_config_file_name)) - - # Load the config file - try: - config_text = _config_file.read() - _config = json.loads(config_text) - except json.decoder.JSONDecodeError as e: - if not _config_bad: - _using_config_file = False - print('{} could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: {}'.format(config_file_path, str(e))) - _config = _default_config - - value = None - if environment_variable is not None: - value = os.getenv(environment_variable) - if value is None: - try: - if name is not None: - value = _config[section][name] - else: - value = _config[section] - except Exception as e: - value = default - if not section in _config: - _config[section] = {} - - return value - - -def record_model_process(self, repo_id, model): - - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Stopped", - "total_results": self.results_counter - } - if self.finishing_task: - result = self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - self.history_id += 1 - else: - result = self.helper_db.execute(self.history_table.insert().values(task_history)) - logging.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) - self.history_id = int(result.inserted_primary_key[0]) - -def register_task_completion(self, task, repo_id, model): - # Task to send back to broker - task_completed = { - 'worker_id': self.config['id'], - 'job_type': "MAINTAIN", - 'repo_id': repo_id, - 'job_model': model - } - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] if 'git_url' in task['given'] else "INVALID_GIVEN" - if key == 'INVALID_GIVEN': - register_task_failure(self, task, repo_id, "INVALID_GIVEN: not github nor git url") - return - - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Success", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job completion for: " + str(task_completed) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where( - self.job_table.c.job_model==model).values(updated_job)) - logging.info("Updated job process for model: " + model + "\n") - - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - - # Reset results counter for next task - self.results_counter = 0 - -def register_task_failure(self, task, repo_id, e): - - logging.info("Worker ran into an error for task: {}\n".format(task)) - logging.info("Printing traceback...\n") - tb = traceback.format_exc() - logging.info(tb) - - logging.info(f'This task inserted {self.results_counter} tuples before failure.\n') - logging.info("Notifying broker and logging task failure in database...\n") - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - url = task['given'][key] - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(url)) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - task['worker_id'] = self.config['id'] - try: - requests.post("http://{}:{}/api/unstable/task_error".format( - self.config['broker_host'],self.config['broker_port']), json=task) - except requests.exceptions.ConnectionError: - logging.error('Could not send task failure message to the broker\n') - except Exception: - logging.exception('An error occured while informing broker about task failure\n') - - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": task['models'][0], - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Error", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job error in the history table for: " + str(task) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==task['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + task['models'][0] + "\n") - - # Reset results counter for next task - self.results_counter = 0 - -def retrieve_tuple(self, key_values, tables): - table_str = tables[0] - del tables[0] - - key_values_items = list(key_values.items()) - for col, value in [key_values_items[0]]: - where_str = col + " = '" + value + "'" - del key_values_items[0] - - for col, value in key_values_items: - where_str += ' AND ' + col + " = '" + value + "'" - for table in tables: - table_str += ", " + table - - retrieveTupleSQL = s.sql.text(""" - SELECT * FROM {} WHERE {} - """.format(table_str, where_str)) - values = json.loads(pd.read_sql(retrieveTupleSQL, self.db, params={}).to_json(orient="records")) - return values - -def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): - # Try to get rate limit from request headers, sometimes it does not work (GH's issue) - # In that case we just decrement from last recieved header count - if bad_credentials and len(self.oauths) > 1: - logging.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) - del self.oauths[0] - - if temporarily_disable: - logging.info("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") - self.oauths[0]['rate_limit'] = 0 - else: - try: - self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - logging.info("Recieved rate limit from headers\n") - except: - self.oauths[0]['rate_limit'] -= 1 - logging.info("Headers did not work, had to decrement\n") - logging.info("Updated rate limit, you have: " + - str(self.oauths[0]['rate_limit']) + " requests remaining.\n") - if self.oauths[0]['rate_limit'] <= 0: - try: - reset_time = response.headers['X-RateLimit-Reset'] - except Exception as e: - logging.info("Could not get reset time from headers because of error: {}".format(e)) - logging.info('Headers: {}'.format(response.headers)) - reset_time = 3600 - time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() - logging.info("Rate limit exceeded, checking for other available keys to use.\n") - - # We will be finding oauth with the highest rate limit left out of our list of oauths - new_oauth = self.oauths[0] - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] - for oauth in other_oauths: - logging.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - response = requests.get(url=url, headers=self.headers) - oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() - - # Update oauth to switch to if a higher limit is found - if oauth['rate_limit'] > new_oauth['rate_limit']: - logging.info("Higher rate limit found in oauth: {}\n".format(oauth)) - new_oauth = oauth - elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: - logging.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) - new_oauth = oauth - - if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: - logging.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) - time.sleep(new_oauth['seconds_to_reset']) - - # Make new oauth the 0th element in self.oauths so we know which one is in use - index = self.oauths.index(new_oauth) - self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] - logging.info("Using oauth: {}\n".format(self.oauths[0])) - - # Change headers to be using the new oauth's key - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} diff --git a/workers/template_worker/__init__.py b/workers/template_worker/__init__.py new file mode 100644 diff --git a/workers/template_worker/runtime.py b/workers/template_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/template_worker/runtime.py @@ -0,0 +1,23 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.template_worker.template_worker import TemplateWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ + Creates the Flask app and data collection worker, then starts the Gunicorn server + """ + app = Flask(__name__) + app.worker = TemplateWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/template_worker/setup.py b/workers/template_worker/setup.py --- a/workers/template_worker/setup.py +++ b/workers/template_worker/setup.py @@ -19,16 +19,15 @@ def read(filename): author="Augur Team", author_email="[email protected]", description="Template worker to be used as an example", - packages=find_packages(exclude=('tests',)), + packages=find_packages(), install_requires=[ 'flask', 'requests', - 'psycopg2-binary', - 'click' + 'psycopg2-binary' ], entry_points={ 'console_scripts': [ - 'template_worker_start=template_worker.runtime:main', + 'template_worker_start=workers.template_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/template_worker/template_worker/worker.py b/workers/template_worker/template_worker.py similarity index 76% rename from workers/template_worker/template_worker/worker.py rename to workers/template_worker/template_worker.py --- a/workers/template_worker/template_worker/worker.py +++ b/workers/template_worker/template_worker.py @@ -6,12 +6,16 @@ from workers.worker_base import Worker class TemplateWorker(Worker): - def __init__(self, config): + def __init__(self, config={}): - # Define what this worker can be given and know how to interpret + # Define the worker's type, which will be used for self identification. + # Should be unique among all workers and is the same key used to define + # this worker's settings in the configuration file. + worker_type = "template_worker" + # Define what this worker can be given and know how to interpret # given is usually either [['github_url']] or [['git_url']] (depending if your - # worker is exclusive to repos that are on the GitHub platform) + # worker is exclusive to repos that are on the GitHub platform) given = [[]] # The name the housekeeper/broker use to distinguish the data model this worker can fill @@ -28,7 +32,14 @@ def __init__(self, config): operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Do any additional configuration after the general initialization has been run + self.config.update(config) + + # If you need to do some preliminary interactions with the database, these MUST go + # in the model method. The database connection is instantiated only inside of each + # data collection process # Define data collection info self.tool_source = 'Fake Template Worker' @@ -54,8 +65,11 @@ def fake_data_model(self, task, repo_id): } :param repo_id: the collect() method queries the repo_id given the git/github url and passes it along to make things easier. An int such as: 27869 + """ + # Any initial database instructions, like finding the last tuple inserted or generate the next ID value + # Collection and insertion of data happens here # ... diff --git a/workers/template_worker/template_worker/runtime.py b/workers/template_worker/template_worker/runtime.py deleted file mode 100644 --- a/workers/template_worker/template_worker/runtime.py +++ /dev/null @@ -1,58 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from template_worker.worker import TemplateWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'repo_info_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.template_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port) - } - - #create instance of the worker - app.template_worker = TemplateWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - - if app.template_worker._child is not None: - app.template_worker._child.terminate() - - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - - - diff --git a/workers/util.py b/workers/util.py --- a/workers/util.py +++ b/workers/util.py @@ -1,5 +1,6 @@ import os, json, requests, logging from flask import Flask, Response, jsonify, request +import gunicorn.app.base def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): """ @@ -47,7 +48,7 @@ def read_config(section, name=None, environment_variable=None, default=None, con return value -def create_server(app, worker): +def create_server(app, worker=None): """ Consists of AUGWOP endpoints for the broker to communicate to this worker Can post a new task to be added to the workers queue Can retrieve current status of the worker @@ -83,4 +84,28 @@ def heartbeat(): def augwop_config(): """ Retrieve worker's config """ - return app.worker.config \ No newline at end of file + return app.worker.config + +class WorkerGunicornApplication(gunicorn.app.base.BaseApplication): + + def __init__(self, app): + self.options = { + 'bind': '%s:%s' % (app.worker.config["host"], app.worker.config["port"]), + 'workers': 1, + 'errorlog': app.worker.config['server_logfile'], + 'accesslog': app.worker.config['server_logfile'], + 'loglevel': app.worker.config['log_level'], + 'capture_output': app.worker.config['capture_output'] + } + + self.application = app + super().__init__() + + def load_config(self): + config = {key: value for key, value in self.options.items() + if key in self.cfg.settings and value is not None} + for key, value in config.items(): + self.cfg.set(key.lower(), value) + + def load(self): + return self.application diff --git a/workers/value_worker/__init__.py b/workers/value_worker/__init__.py new file mode 100644 diff --git a/workers/value_worker/runtime.py b/workers/value_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/value_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.value_worker.value_worker import ValueWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ValueWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) \ No newline at end of file diff --git a/workers/value_worker/setup.py b/workers/value_worker/setup.py --- a/workers/value_worker/setup.py +++ b/workers/value_worker/setup.py @@ -5,33 +5,23 @@ from setuptools import find_packages from setuptools import setup - -def read(filename): - filename = os.path.join(os.path.dirname(__file__), filename) - text_type = type(u"") - with io.open(filename, mode="r", encoding='utf-8') as fd: - return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) - - setup( name="value_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', - author="Augurlabs", author_email="[email protected]", - description="Augur Worker that gathers value data", - long_description=read("README.md"), - packages=find_packages(exclude=('tests',)), - - install_requires=['flask', 'requests', 'psycopg2-binary', 'click'], - + install_requires=[ + 'flask', + 'requests', + 'psycopg2-binary', + ], entry_points={ 'console_scripts': [ - 'value_worker_start=value_worker.runtime:main', + 'value_worker_start=workers.value_worker.runtime:main', ], }, diff --git a/workers/value_worker/value_worker.py b/workers/value_worker/value_worker.py new file mode 100644 --- /dev/null +++ b/workers/value_worker/value_worker.py @@ -0,0 +1,94 @@ +import os, subprocess +from datetime import datetime +import logging +import requests +import json +from urllib.parse import quote +from multiprocessing import Process, Queue + +import pandas as pd +import sqlalchemy as s +from sqlalchemy.ext.automap import automap_base +from sqlalchemy import MetaData +from workers.worker_base import Worker + +class ValueWorker(Worker): + def __init__(self, config={}): + + worker_type = "value_worker" + + # Define what this worker can be given and know how to interpret + given = [['git_url']] + models = ['value'] + + # Define the tables needed to insert, update, or delete on + data_tables = ['repo_labor'] + operations_tables = ['worker_history', 'worker_job'] + + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({ + 'repo_directory': self.augur_config.get_value('Workers', 'facade_worker')['repo_directory'] + }) + + self.tool_source = 'Value Worker' + self.tool_version = '1.0.0' + self.data_source = 'SCC' + + def value_model(self, entry_info, repo_id): + """ Data collection and storage method + """ + self.logger.info(entry_info) + self.logger.info(repo_id) + + repo_path_sql = s.sql.text(""" + SELECT repo_id, CONCAT(repo_group_id || chr(47) || repo_path || repo_name) AS path + FROM repo + WHERE repo_id = :repo_id + """) + + relative_repo_path = self.db.execute(repo_path_sql, {'repo_id': repo_id}).fetchone()[1] + absolute_repo_path = self.config['repo_directory'] + relative_repo_path + + try: + self.generate_value_data(repo_id, absolute_repo_path) + except Exception as e: + self.logger.error(e) + + self.register_task_completion(entry_info, repo_id, "value") + + def generate_value_data(self, repo_id, path): + """Runs scc on repo and stores data in database + + :param repo_id: Repository ID + :param path: Absolute path of the Repostiory + """ + self.logger.info('Running `scc`....') + self.logger.info(f'Repo ID: {repo_id}, Path: {path}') + + output = subprocess.check_output([self.config['scc_bin'], '-f', 'json', path]) + records = json.loads(output.decode('utf8')) + + for record in records: + for file in record['Files']: + repo_labor = { + 'repo_id': repo_id, + 'rl_analysis_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'), + 'programming_language': file['Language'], + 'file_path': file['Location'], + 'file_name': file['Filename'], + 'total_lines': file['Lines'], + 'code_lines': file['Code'], + 'comment_lines': file['Comment'], + 'blank_lines': file['Blank'], + 'code_complexity': file['Complexity'], + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source, + 'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') + } + + result = self.db.execute(self.repo_labor_table.insert().values(repo_labor)) + self.logger.info(f"Added Repo Labor Data: {result.inserted_primary_key}") diff --git a/workers/value_worker/value_worker/__init__.py b/workers/value_worker/value_worker/__init__.py deleted file mode 100644 --- a/workers/value_worker/value_worker/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""value_worker - Augur Worker that collects value data""" - -__tool_source__ = 'Value Worker' -__tool_version__ = '0.1.0' -__data_source__ = 'SCC' - -__author__ = 'Augur Team <[email protected]>' -__all__ = [] - diff --git a/workers/value_worker/value_worker/runtime.py b/workers/value_worker/value_worker/runtime.py deleted file mode 100644 --- a/workers/value_worker/value_worker/runtime.py +++ /dev/null @@ -1,122 +0,0 @@ -import json -import logging -import os -import subprocess -import sys - -import click -import requests -from flask import Flask, Response, jsonify, request - -from value_worker.worker import ValueWorker - -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - # POST a task to be added to the queue - if request.method == 'POST': - logging.info("Sending to work on task: {}".format(str(request.json))) - app.value_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.value_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51239, help='Port') [email protected]('--scc-bin', default=f'{os.environ["HOME"]}/go/bin/scc', help='scc binary') -def main(augur_url, host, port, scc_bin): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'value_worker', None, - { - "port": 37300, - "scc_bin": "/home/sean/go/bin/scc" - }) - - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.value_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - 'scc_bin': worker_info['scc_bin'], - 'repo_directory': read_config('Workers', 'facade_worker', None, None)['repo_directory'], - } - - # Create the worker that will be running on this server with specified config - app.value_worker = ValueWorker(config) - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - - app.run(debug=app.debug, host=host, port=worker_port) - if app.value_worker._child is not None: - app.value_worker._child.terminate() - try: - requests.post(f'http://{server["host"]}:{server["port"]}/api/unstable/workers/remove', json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/value_worker/value_worker/worker.py b/workers/value_worker/value_worker/worker.py deleted file mode 100644 --- a/workers/value_worker/value_worker/worker.py +++ /dev/null @@ -1,267 +0,0 @@ -import os, subprocess -from datetime import datetime -import logging -import requests -import json -from urllib.parse import quote -from multiprocessing import Process, Queue - -from value_worker import __data_source__, __tool_source__, __tool_version__ -import pandas as pd -import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData -from workers.standard_methods import register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class ValueWorker: - def __init__(self, config, task=None): - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.config = config - - self.db = None - self.value_table = None - - self._task = task - self._queue = Queue() - self._child = None - - self.history_id = None - self.finishing_task = False - self.working_on = None - self.results_counter = 0 - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["value"] - } - ], - "config": [self.config] - } - - self._db_str = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], - self.config['password'], - self.config['host'], - self.config['port'], - self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self._db_str, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self._db_str, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - logging.info("Database connection established...") - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['repo_labor']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.repo_labor_table = Base.classes.repo_labor.__table__ - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - logging.info("ORM setup complete...") - - # Organize different api keys/oauths available - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauth_sql = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(0)) - - for oauth in [{'oauth_id': 0, 'access_token': 0}] + json.loads(pd.read_sql(oauth_sql, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) \ - - datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - - # Send broker hello message - connect_to_broker(self) - logging.info("Connected to the broker...\n") - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "key": "", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['github_api_key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - - self._task = value - self.run() - - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def value_model(self, entry_info, repo_id): - """ Data collection and storage method - """ - logging.info(entry_info) - logging.info(repo_id) - - repo_path_sql = s.sql.text(""" - SELECT repo_id, CONCAT(repo_group_id || chr(47) || repo_path || repo_name) AS path - FROM repo - WHERE repo_id = :repo_id - """) - - relative_repo_path = self.db.execute(repo_path_sql, {'repo_id': repo_id}).fetchone()[1] - absolute_repo_path = self.config['repo_directory'] + relative_repo_path - - try: - self.generate_value_data(repo_id, absolute_repo_path) - except Exception as e: - logging.error(e) - - register_task_completion(self, entry_info, repo_id, "value") - - def generate_value_data(self, repo_id, path): - """Runs scc on repo and stores data in database - - :param repo_id: Repository ID - :param path: Absolute path of the Repostiory - """ - logging.info('Running `scc`....') - logging.info(f'Repo ID: {repo_id}, Path: {path}') - - output = subprocess.check_output([self.config['scc_bin'], '-f', 'json', path]) - records = json.loads(output.decode('utf8')) - - for record in records: - for file in record['Files']: - repo_labor = { - 'repo_id': repo_id, - 'rl_analysis_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'), - 'programming_language': file['Language'], - 'file_path': file['Location'], - 'file_name': file['Filename'], - 'total_lines': file['Lines'], - 'code_lines': file['Code'], - 'comment_lines': file['Comment'], - 'blank_lines': file['Blank'], - 'code_complexity': file['Complexity'], - 'tool_source': __tool_source__, - 'tool_version': __tool_version__, - 'data_source': __data_source__, - 'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') - } - - result = self.db.execute(self.repo_labor_table.insert().values(repo_labor)) - logging.info(f"Added Repo Labor Data: {result.inserted_primary_key}") - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'value': - self.value_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() diff --git a/workers/worker_base.py b/workers/worker_base.py --- a/workers/worker_base.py +++ b/workers/worker_base.py @@ -1,47 +1,92 @@ """ Helper methods constant across all workers """ -import requests, datetime, time, traceback, json, os, sys, math +import requests, datetime, time, traceback, json, os, sys, math, logging +from logging import FileHandler, Formatter, StreamHandler from multiprocessing import Process, Queue import sqlalchemy as s import pandas as pd -import os -import sys, logging -from urllib.parse import urlparse -from workers.util import read_config +from pathlib import Path +from urllib.parse import urlparse, quote from sqlalchemy import MetaData from sqlalchemy.ext.automap import automap_base +from augur.config import AugurConfig +from augur.logging import AugurLogging class Worker(): - def __init__(self, config={}, given=[], models=[], data_tables=[], operations_tables=[]): + ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + def __init__(self, worker_type, config={}, given=[], models=[], data_tables=[], operations_tables=[], platform="github"): + + self.worker_type = worker_type self._task = None # task currently being worked on (dict) self._child = None # process of currently running task (multiprocessing process) self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes) + self.data_tables = data_tables + self.operations_tables = operations_tables + self._root_augur_dir = Worker.ROOT_AUGUR_DIR + self.platform = platform # count of tuples inserted in the database (to store stats for each task in op tables) self.results_counter = 0 - # if we are finishing a previous task, certain operations work differenty + # if we are finishing a previous task, certain operations work differently self.finishing_task = False - # Update config with options that are general and not specific to any worker - self.config = config + self.augur_config = AugurConfig(self._root_augur_dir) + + self.config = { + 'worker_type': self.worker_type, + 'host': self.augur_config.get_value("Server", "host"), + 'gh_api_key': self.augur_config.get_value('Database', 'key'), + 'offline_mode': False + } + self.config.update(self.augur_config.get_section("Logging")) + + try: + worker_defaults = self.augur_config.get_default_config()['Workers'][self.config['worker_type']] + self.config.update(worker_defaults) + except KeyError as e: + logging.warn('Could not get default configuration for {}'.format(self.config['worker_type'])) + + worker_info = self.augur_config.get_value('Workers', self.config['worker_type']) + self.config.update(worker_info) + + worker_port = self.config['port'] + while True: + try: + r = requests.get('http://{}:{}/AUGWOP/heartbeat'.format( + self.config['host'], worker_port)).json() + if 'status' in r: + if r['status'] == 'alive': + worker_port += 1 + except: + break + self.config.update({ - 'port_broker': read_config('Server', 'port', 'AUGUR_PORT', 5000), - 'host_broker': read_config('Server', 'host', 'AUGUR_HOST', '0.0.0.0'), - 'host_database': read_config('Database', 'host', 'AUGUR_DB_HOST', 'host'), - 'port_database': read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - 'user_database': read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - 'name_database': read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - 'password_database': read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password') + "port": worker_port, + "id": "workers.{}.{}".format(self.worker_type, worker_port), + "capture_output": False, + 'location': 'http://{}:{}'.format(self.config["host"], worker_port), + 'port_broker': self.augur_config.get_value('Server', 'port'), + 'host_broker': self.augur_config.get_value('Server', 'host'), + 'host_database': self.augur_config.get_value('Database', 'host'), + 'port_database': self.augur_config.get_value('Database', 'port'), + 'user_database': self.augur_config.get_value('Database', 'user'), + 'name_database': self.augur_config.get_value('Database', 'name'), + 'password_database': self.augur_config.get_value('Database', 'password') }) + self.config.update(config) + + # Initialize logging in the main process + self.initialize_logging() + + # Clear log contents from previous runs + open(self.config["server_logfile"], "w").close() + open(self.config["collection_logfile"], "w").close() - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format( - self.config['id'].split('.')[len(self.config['id'].split('.')) - 1] - ), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) + # Get configured collection logger + self.logger = logging.getLogger(self.config["id"]) + self.logger.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) self.given = given self.models = models @@ -56,28 +101,100 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta ], 'config': self.config } - + + # Send broker hello message + if self.config["offline_mode"] is False: + self.connect_to_broker() + + try: + self.tool_source + self.tool_version + self.data_source + except: + self.tool_source = 'Augur Worker Testing' + self.tool_version = '0.0.0' + self.data_source = 'Augur Worker Testing' + + def __repr__(self): + return f"{self.config['id']}" + + def initialize_logging(self): + self.config["log_level"] = self.config["log_level"].upper() + if self.config["debug"]: + self.config["log_level"] = "DEBUG" + + if self.config["verbose"]: + format_string = AugurLogging.verbose_format_string + else: + format_string = AugurLogging.simple_format_string + + formatter = Formatter(fmt=format_string) + error_formatter = Formatter(fmt=AugurLogging.error_format_string) + + worker_dir = AugurLogging.get_log_directories(self.augur_config, reset_logfiles=False) + "/workers/" + Path(worker_dir).mkdir(exist_ok=True) + logfile_dir = worker_dir + f"/{self.worker_type}/" + Path(logfile_dir).mkdir(exist_ok=True) + + server_logfile = logfile_dir + '{}_{}_server.log'.format(self.worker_type, self.config["port"]) + collection_logfile = logfile_dir + '{}_{}_collection.log'.format(self.worker_type, self.config["port"]) + collection_errorfile = logfile_dir + '{}_{}_collection.err'.format(self.worker_type, self.config["port"]) + self.config.update({ + "logfile_dir": logfile_dir, + "server_logfile": server_logfile, + "collection_logfile": collection_logfile, + "collection_errorfile": collection_errorfile + }) + + collection_file_handler = FileHandler(filename=self.config["collection_logfile"], mode="a") + collection_file_handler.setFormatter(formatter) + collection_file_handler.setLevel(self.config["log_level"]) + + collection_errorfile_handler = FileHandler(filename=self.config["collection_errorfile"], mode="a") + collection_errorfile_handler.setFormatter(error_formatter) + collection_errorfile_handler.setLevel(logging.WARNING) + + logger = logging.getLogger(self.config["id"]) + logger.handlers = [] + logger.addHandler(collection_file_handler) + logger.addHandler(collection_errorfile_handler) + logger.setLevel(self.config["log_level"]) + logger.propagate = False + + if self.config["debug"]: + self.config["log_level"] = "DEBUG" + console_handler = StreamHandler() + console_handler.setFormatter(formatter) + console_handler.setLevel(self.config["log_level"]) + logger.addHandler(console_handler) + + if self.config["quiet"]: + logger.disabled = True + + self.logger = logger + + def initialize_database_connections(self): DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( self.config['user_database'], self.config['password_database'], self.config['host_database'], self.config['port_database'], self.config['name_database'] ) # Create an sqlalchemy engine for both database schemas - logging.info("Making database connections... {}".format(DB_STR)) + self.logger.info("Making database connections") db_schema = 'augur_data' - self.db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, + self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(db_schema)}) helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, + self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(helper_schema)}) metadata = MetaData() helper_metadata = MetaData() # Reflect only the tables we will use for each schema's metadata object - metadata.reflect(self.db, only=data_tables) - helper_metadata.reflect(self.helper_db, only=operations_tables) + metadata.reflect(self.db, only=self.data_tables) + helper_metadata.reflect(self.helper_db, only=self.operations_tables) Base = automap_base(metadata=metadata) HelperBase = automap_base(metadata=helper_metadata) @@ -86,28 +203,27 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta HelperBase.prepare() # So we can access all our tables when inserting, updating, etc - for table in data_tables: + for table in self.data_tables: setattr(self, '{}_table'.format(table), Base.classes[table].__table__) try: - logging.info(HelperBase.classes.keys()) + self.logger.info(HelperBase.classes.keys()) except: pass - for table in operations_tables: + for table in self.operations_tables: try: setattr(self, '{}_table'.format(table), HelperBase.classes[table].__table__) except Exception as e: - logging.info("Error setting attribute for table: {} : {}".format(table, e)) + self.logger.error("Error setting attribute for table: {} : {}".format(table, e)) # Increment so we are ready to insert the 'next one' of each of these most recent ids self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 # Organize different api keys/oauths available - if 'gh_api_key' in self.config: - self.init_oauths() - - # Send broker hello message - self.connect_to_broker() + if 'gh_api_key' in self.config or 'gitlab_api_key' in self.config: + self.init_oauths(self.platform) + else: + self.oauths = [{'oauth_id': 0}] @property def task(self): @@ -128,7 +244,7 @@ def task(self, value): # This setting is set by the housekeeper and is attached to the task before it gets sent here if 'focused_task' in value: if value['focused_task'] == 1: - logging.info("Focused task is ON\n") + self.logger.debug("Focused task is ON\n") self.finishing_task = True self._task = value @@ -143,21 +259,23 @@ def run(self): """ Kicks off the processing of the queue if it is not already being processed Gets run whenever a new task is added """ - logging.info("Running...\n") # Spawn a subprocess to handle message reading and performing the tasks self._child = Process(target=self.collect, args=()) self._child.start() - + def collect(self): """ Function to process each entry in the worker's task queue Determines what action to take based off the message type """ + self.initialize_logging() # need to initialize logging again in child process cause multiprocessing + self.logger.info("Starting data collection process\n") + self.initialize_database_connections() while True: if not self._queue.empty(): message = self._queue.get() # Get the task off our MP queue else: break - logging.info("Popped off message: {}\n".format(str(message))) + self.logger.info("Popped off message: {}\n".format(str(message))) if message['job_type'] == 'STOP': break @@ -172,13 +290,13 @@ def collect(self): SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' """.format(message['given'][self.given[0][0]])) repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - + self.logger.info("repo_id for which data collection is being initiated: {}".format(str(repo_id))) # Call method corresponding to model sent in task try: model_method = getattr(self, '{}_model'.format(message['models'][0])) self.record_model_process(repo_id, 'repo_info') except Exception as e: - logging.info('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) + + self.logger.error('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) + 'must have name of {}_model'.format(message['models'][0])) self.register_task_failure(message, repo_id, e) break @@ -186,18 +304,53 @@ def collect(self): # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught # and worker can move onto the next task without stopping try: + self.logger.info("Calling model method {}_models".format(message['models'][0])) model_method(message, repo_id) - except Exception as e: + except Exception as e: # this could be a custom exception, might make things easier self.register_task_failure(message, repo_id, e) - pass + break + + self.logger.debug('Closing database connections\n') + self.db.dispose() + self.helper_db.dispose() + self.logger.info("Collection process finished") def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}): - """ map objects => { *our db col* : *gh json key*} """ + """ Include an extra key-value pair on each element of new_data that represents + the action that should be taken with this element (i.e. 'need_insertion') + + :param new_data: List of dictionaries, data to be assigned an action to + :param table_values: Pandas DataFrame, existing data in the database to check + what action should be taken on the new_data depending on the presence of + each element in this DataFrame + :param update_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + updates (if source data value != value in existing database row, then an + update is needed). Key is source data column name, value is database field name. + Example: {'id': 'gh_issue_id'} + :param duplicate_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + duplicates (if source data value == value in existing database row, then this + element is a duplicate and would not need an insertion). Key is source data + column name, value is database field name. Example: {'id': 'gh_issue_id'} + :param table_pkey: String, the field name of the primary key of the table in + the database that we are checking the table_values for. + :param value_update_col_map: Dictionary, sometimes we add a new field to a table, + and we want to trigger an update of that row in the database even if all of the + data values are the same and would not need an update ordinarily. Checking for + a specific existing value in the database field allows us to do this. The key is the + name of the field in the database we are checking for a specific value to trigger + an update, the value is the value we are checking for equality to trigger an update. + Example: {'cntrb_id': None} + :return: List of dictionaries, contains all the same elements of new_data, except + each element now has an extra key-value pair with the key being 'flag', and + the value being 'need_insertion', 'need_update', or 'none' + """ need_insertion_count = 0 need_update_count = 0 for i, obj in enumerate(new_data): if type(obj) != dict: - logging.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) + self.logger.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) continue obj['flag'] = 'none' # default of no action needed @@ -206,31 +359,37 @@ def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_ if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any(): continue - logging.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) + self.logger.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) obj['flag'] = 'need_insertion' need_insertion_count += 1 break if obj['flag'] == 'need_insertion': - logging.info('Already determined that current tuple needs insertion, skipping checking updates. ' + self.logger.info('Already determined that current tuple needs insertion, skipping checking updates. ' 'Moving to next tuple.\n') continue - existing_tuple = table_values[table_values[db_dupe_key].isin( + try: + existing_tuple = table_values[table_values[db_dupe_key].isin( [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0] + except Exception as e: + self.logger.info('Special case assign_tuple_action error') + self.logger.info(f'Error: {e}') + self.logger.info(f'Related vars: {table_values}, ' + + f'{table_values[db_dupe_key].isin([obj[duplicate_col_map[db_dupe_key]]])}') # If we need to check the values of the existing tuple to determine if an update is needed for augur_col, value_check in value_update_col_map.items(): not_nan_check = not (math.isnan(value_check) and math.isnan(existing_tuple[augur_col])) if value_check is not None else True if existing_tuple[augur_col] != value_check and not_nan_check: continue - logging.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) + self.logger.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) obj['flag'] = 'need_update' obj['pkey'] = existing_tuple[table_pkey] need_update_count += 1 if obj['flag'] == 'need_update': - logging.info('Already determined that current tuple needs update, skipping checking further updates. ' + self.logger.info('Already determined that current tuple needs update, skipping checking further updates. ' 'Moving to next tuple.\n') continue @@ -240,25 +399,34 @@ def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_ continue if obj[update_col_map[col]] == existing_tuple[col]: continue - logging.info("Found a tuple that needs an update for column: {}\n".format(col)) + self.logger.info("Found a tuple that needs an update for column: {}\n".format(col)) obj['flag'] = 'need_update' obj['pkey'] = existing_tuple[table_pkey] need_update_count += 1 - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + + self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + "was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count)) return new_data - def check_duplicates(new_data, table_values, key): + def check_duplicates(self, new_data, table_values, key): + """ Filters what items of the new_data json (list of dictionaries) that are not + present in the table_values df + + :param new_data: List of dictionaries, new data to filter duplicates out of + :param table_values: Pandas DataFrame, existing data to check what data is already + present in the database + :param key: String, key of each dict in new_data whose value we are checking + duplicates with + :return: List of dictionaries, contains elements of new_data that are not already + present in the database + """ need_insertion = [] for obj in new_data: - if type(obj) == dict: - if not table_values.isin([obj[key]]).any().any(): - need_insertion.append(obj) - # else: - # logging.info("Tuple with github's {} key value already".format(key) + - # "exists in our db: {}\n".format(str(obj[key]))) - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + + if type(obj) != dict: + continue + if not table_values.isin([obj[key]]).any().any(): + need_insertion.append(obj) + self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + "was reduced to {} tuples.\n".format(str(len(need_insertion)))) return need_insertion @@ -266,16 +434,16 @@ def connect_to_broker(self): connected = False for i in range(5): try: - logging.info("attempt {}\n".format(i)) + self.logger.debug("Connecting to broker, attempt {}\n".format(i)) if i > 0: time.sleep(10) requests.post('http://{}:{}/api/unstable/workers'.format( self.config['host_broker'],self.config['port_broker']), json=self.specs) - logging.info("Connection to the broker was successful\n") + self.logger.info("Connection to the broker was successful\n") connected = True break except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker. Trying again...\n') + self.logger.error('Cannot connect to the broker. Trying again...\n') if not connected: sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n') @@ -290,23 +458,39 @@ def dump_queue(queue): # time.sleep(.1) return result - def find_id_from_login(self, login): + def find_id_from_login(self, login, platform='github'): + """ + Retrieves our contributor table primary key value for the contributor with + the given GitHub login credentials, if this contributor is not there, then + they get inserted. + + :param login: String, the GitHub login username to find the primary key id for + :return: Integer, the id of the row in our database with the matching GitHub login + """ idSQL = s.sql.text(""" - SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' - """.format(login)) + SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' \ + AND LOWER(data_source) = '{} api' + """.format(login, platform)) + + self.logger.info(idSQL) + rs = pd.read_sql(idSQL, self.db, params={}) data_list = [list(row) for row in rs.itertuples(index=False)] try: return data_list[0][0] except: - logging.info("contributor needs to be added...") + self.logger.info('contributor needs to be added...') - cntrb_url = ("https://api.github.com/users/" + login) - logging.info("Hitting endpoint: {} ...\n".format(cntrb_url)) + if platform == 'github': + cntrb_url = ("https://api.github.com/users/" + login) + elif platform == 'gitlab': + cntrb_url = ("https://gitlab.com/api/v4/users?username=" + login ) + self.logger.info("Hitting endpoint: {} ...\n".format(cntrb_url)) r = requests.get(url=cntrb_url, headers=self.headers) - self.update_gh_rate_limit(r) + self.update_rate_limit(r) contributor = r.json() + company = None location = None email = None @@ -317,46 +501,83 @@ def find_id_from_login(self, login): if 'email' in contributor: email = contributor['email'] - cntrb = { - "cntrb_login": contributor['login'] if 'login' in contributor else None, - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, - "cntrb_canonical": None, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } + if platform == 'github': + cntrb = { + "cntrb_login": contributor['login'] if 'login' in contributor else None, + "cntrb_email": email, + "cntrb_company": company, + "cntrb_location": location, + "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, + "cntrb_canonical": None, + "gh_user_id": contributor['id'], + "gh_login": contributor['login'], + "gh_url": contributor['url'], + "gh_html_url": contributor['html_url'], + "gh_node_id": contributor['node_id'], + "gh_avatar_url": contributor['avatar_url'], + "gh_gravatar_id": contributor['gravatar_id'], + "gh_followers_url": contributor['followers_url'], + "gh_following_url": contributor['following_url'], + "gh_gists_url": contributor['gists_url'], + "gh_starred_url": contributor['starred_url'], + "gh_subscriptions_url": contributor['subscriptions_url'], + "gh_organizations_url": contributor['organizations_url'], + "gh_repos_url": contributor['repos_url'], + "gh_events_url": contributor['events_url'], + "gh_received_events_url": contributor['received_events_url'], + "gh_type": contributor['type'], + "gh_site_admin": contributor['site_admin'], + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + + elif platform == 'gitlab': + cntrb = { + "cntrb_login": contributor[0]['username'] if 'username' in contributor[0] else None, + "cntrb_email": email, + "cntrb_company": company, + "cntrb_location": location, + "cntrb_created_at": contributor[0]['created_at'] if 'created_at' in contributor[0] else None, + "cntrb_canonical": None, + "gh_user_id": contributor[0]['id'], + "gh_login": contributor[0]['username'], + "gh_url": contributor[0]['web_url'], + "gh_html_url": None, + "gh_node_id": None, + "gh_avatar_url": contributor[0]['avatar_url'], + "gh_gravatar_id": None, + "gh_followers_url": None, + "gh_following_url": None, + "gh_gists_url": None, + "gh_starred_url": None, + "gh_subscriptions_url": None, + "gh_organizations_url": None, + "gh_repos_url": None, + "gh_events_url": None, + "gh_received_events_url": None, + "gh_type": None, + "gh_site_admin": None, + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted contributor: " + contributor['login'] + "\n") + self.logger.info("Inserted contributor: " + contributor[0]['username'] + "\n") - return self.find_id_from_login(login) + return self.find_id_from_login(login, platform) - def get_owner_repo(self, github_url): - split = github_url.split('/') + def get_owner_repo(self, git_url): + """ Gets the owner and repository names of a repository from a git url + + :param git_url: String, the git url of a repository + :return: Tuple, includes the owner and repository names in that order + """ + split = git_url.split('/') owner = split[-2] repo = split[-1] @@ -367,6 +588,19 @@ def get_owner_repo(self, github_url): return owner, repo def get_max_id(self, table, column, default=25150, operations_table=False): + """ Gets the max value (usually used for id/pk's) of any Integer column + of any table + + :param table: String, the table that consists of the column you want to + query a max value for + :param column: String, the column that you want to query the max value for + :param default: Integer, if there are no values in the + specified column, the value of this parameter will be returned + :param operations_table: Boolean, if True, this signifies that the table/column + that is wanted to be queried is in the augur_operations schema rather than + the augur_data schema. Default False + :return: Integer, the max value of the specified column/table + """ maxIdSQL = s.sql.text(""" SELECT max({0}.{1}) AS {1} FROM {0} @@ -375,14 +609,24 @@ def get_max_id(self, table, column, default=25150, operations_table=False): rs = pd.read_sql(maxIdSQL, db, params={}) if rs.iloc[0][column] is not None: max_id = int(rs.iloc[0][column]) + 1 - logging.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) + self.logger.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) else: max_id = default - logging.info("Could not find max id for {} column in the {} table... using default set to: \ - {}\n".format(column, table, max_id)) + self.logger.warning('Could not find max id for {} column in the {} table... ' + + 'using default set to: {}\n'.format(column, table, max_id)) return max_id def get_table_values(self, cols, tables, where_clause=""): + """ Can query all values of any column(s) from any table(s) + with an optional where clause + + :param cols: List of Strings, column(s) that user wants to query + :param tables: List of Strings, table(s) that user wants to query + :param where_clause: String, optional where clause to filter the values + queried + :return: Pandas DataFrame, contains all values queried in the columns, tables, and + optional where clause provided + """ table_str = tables[0] del tables[0] @@ -394,46 +638,103 @@ def get_table_values(self, cols, tables, where_clause=""): for col in cols: col_str += ", " + col - tableValuesSQL = s.sql.text(""" + table_values_sql = s.sql.text(""" SELECT {} FROM {} {} """.format(col_str, table_str, where_clause)) - logging.info("Getting table values with the following PSQL query: \n{}\n".format(tableValuesSQL)) - values = pd.read_sql(tableValuesSQL, self.db, params={}) + self.logger.info('Getting table values with the following PSQL query: \n{}\n'.format( + table_values_sql)) + values = pd.read_sql(table_values_sql, self.db, params={}) return values - def init_oauths(self): + def init_oauths(self, platform="github"): self.oauths = [] self.headers = None - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - # Make a list of api key in the config combined w keys stored in the database - oauthSQL = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(self.config['gh_api_key'])) - for oauth in [{'oauth_id': 0, 'access_token': self.config['gh_api_key']}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}\n".format(oauth)) + # Select endpoint to hit solely to retrieve rate limit information from headers of the response + # Adjust header keys needed to fetch rate limit information from the API responses + if platform == "github": + url = "https://api.github.com/users/gabe-heim" + oauthSQL = s.sql.text(""" + SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'github' + """.format(self.config['gh_api_key'])) + key_name = "gh_api_key" + rate_limit_header_key = "X-RateLimit-Remaining" + rate_limit_reset_header_key = "X-RateLimit-Reset" + elif platform == "gitlab": + url = "https://gitlab.com/api/v4/version" + oauthSQL = s.sql.text(""" + SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'gitlab' + """.format(self.config['gitlab_api_key'])) + key_name = "gitlab_api_key" + rate_limit_header_key = "ratelimit-remaining" + rate_limit_reset_header_key = "ratelimit-reset" + + for oauth in [{'oauth_id': 0, 'access_token': self.config[key_name]}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): + if platform == "github": + self.headers = {'Authorization': 'token %s' % oauth['access_token']} + elif platform == "gitlab": + self.headers = {'Authorization': 'Bearer %s' % oauth['access_token']} + self.logger.info("Getting rate limit info for oauth: {}\n".format(oauth)) response = requests.get(url=url, headers=self.headers) self.oauths.append({ 'oauth_id': oauth['oauth_id'], 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() + 'rate_limit': int(response.headers[rate_limit_header_key]), + 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers[rate_limit_reset_header_key])) - datetime.datetime.now()).total_seconds() }) - logging.info("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) + self.logger.debug("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") + self.logger.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") # First key to be used will be the one specified in the config (first element in # self.oauths array will always be the key in use) + if platform == "github": + self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} + elif platform == "gitlab": + self.headers = {'Authorization': 'Bearer %s' % self.oauths[0]['access_token']} + self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} + self.logger.info("OAuth initialized") + + def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}, platform="github"): + """ Paginate either backwards or forwards (depending on the value of the worker's + finishing_task attribute) through all the GitHub or GitLab api endpoint pages. + + :param url: String, the url of the API endpoint we are paginating through, expects + a curly brace string formatter within the string to format the Integer + representing the page number that is wanted to be returned + :param duplicate_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + duplicates (if source data value == value in existing database row, then this + element is a duplicate and would not need an insertion). Key is source data + column name, value is database field name. Example: {'id': 'gh_issue_id'} + :param update_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + updates (if source data value != value in existing database row, then an + update is needed). Key is source data column name, value is database field name. + Example: {'id': 'gh_issue_id'} + :param table: String, the name of the table that holds the values to check for + duplicates/updates against + :param table_pkey: String, the field name of the primary key of the table in + the database that we are getting the values for to cross-reference to check + for duplicates. + :param where_clause: String, optional where clause to filter the values + that are queried when preparing the values that will be cross-referenced + for duplicates/updates + :param value_update_col_map: Dictionary, sometimes we add a new field to a table, + and we want to trigger an update of that row in the database even if all of the + data values are the same and would not need an update ordinarily. Checking for + a specific existing value in the database field allows us to do this. The key is the + name of the field in the database we are checking for a specific value to trigger + an update, the value is the value we are checking for equality to trigger an update. + Example: {'cntrb_id': None} + :return: List of dictionaries, all data points from the pages of the specified API endpoint + each with a 'flag' key-value pair representing the required action to take with that + data point (i.e. 'need_insertion', 'need_update', 'none') + """ - def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}): - # Paginate backwards through all the tuples but get first page in order - # to determine if there are multiple pages and if the 1st page covers all update_keys = list(update_col_map.keys()) if update_col_map else [] update_keys += list(value_update_col_map.keys()) if value_update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] @@ -446,10 +747,18 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh num_attempts = 0 success = False while num_attempts < 3: - logging.info("Hitting endpoint: " + url.format(i) + " ...\n") + self.logger.info(f'Hitting endpoint: {url.format(i)}...\n') r = requests.get(url=url.format(i), headers=self.headers) - self.update_gh_rate_limit(r) - logging.info("Analyzing page {} of {}\n".format(i, int(r.links['last']['url'][-6:].split('=')[1]) + 1 if 'last' in r.links else '*last page not known*')) + + self.update_rate_limit(r, platform=platform) + if 'last' not in r.links: + last_page = None + else: + if platform == "github": + last_page = r.links['last']['url'][-6:].split('=')[1] + elif platform == "gitlab": + last_page = r.links['last']['url'].split('&')[2].split("=")[1] + self.logger.info("Analyzing page {} of {}\n".format(i, int(last_page) + 1 if last_page is not None else '*last page not known*')) try: j = r.json() @@ -460,21 +769,23 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh success = True break elif type(j) == dict: - logging.info("Request returned a dict: {}\n".format(j)) + self.logger.info("Request returned a dict: {}\n".format(j)) if j['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + self.logger.warning("Github repo was not found or does not exist for endpoint: {}\n".format(url)) break if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': num_attempts -= 1 - self.update_gh_rate_limit(r, temporarily_disable=True) + self.logger.info("rate limit update code goes here") + self.update_rate_limit(r, temporarily_disable=True,platform=platform) if j['message'] == 'Bad credentials': - self.update_gh_rate_limit(r, bad_credentials=True) + self.logger.info("rate limit update code goes here") + self.update_rate_limit(r, bad_credentials=True, platform=platform) elif type(j) == str: - logging.info("J was string: {}\n".format(j)) + self.logger.info(f'J was string: {j}\n') if '<!DOCTYPE html>' in j: - logging.info("HTML was returned, trying again...\n") + self.logger.info('HTML was returned, trying again...\n') elif len(j) == 0: - logging.info("Empty string, trying again...\n") + self.logger.warning('Empty string, trying again...\n') else: try: j = json.loads(j) @@ -488,44 +799,52 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh # Find last page so we can decrement from there if 'last' in r.links and not multiple_pages and not self.finishing_task: - param = r.links['last']['url'][-6:] - i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") + if platform == "github": + param = r.links['last']['url'][-6:] + i = int(param.split('=')[1]) + 1 + elif platform == "gitlab": + i = int(r.links['last']['url'].split('&')[2].split("=")[1]) + 1 + self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n") multiple_pages = True elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") + self.logger.info("Only 1 page of request\n") elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ..." + self.logger.info("Finishing a previous task, paginating forwards ..." " excess rate limit requests will be made\n") - + if len(j) == 0: - logging.info("Response was empty, breaking from pagination.\n") + self.logger.info("Response was empty, breaking from pagination.\n") break - + # Checking contents of requests with what we already have in the db j = self.assign_tuple_action(j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map) if not j: - logging.info("Assigning tuple action failed, moving to next page.\n") + self.logger.error("Assigning tuple action failed, moving to next page.\n") i = i + 1 if self.finishing_task else i - 1 continue try: - to_add = [obj for obj in j if obj not in tuples and obj['flag'] != 'none'] + to_add = [obj for obj in j if obj not in tuples and (obj['flag'] != 'none')] except Exception as e: - logging.info("Failure accessing data of page: {}. Moving to next page.\n".format(e)) + self.logger.error("Failure accessing data of page: {}. Moving to next page.\n".format(e)) i = i + 1 if self.finishing_task else i - 1 continue if len(to_add) == 0 and multiple_pages and 'last' in r.links: - logging.info("{}".format(r.links['last'])) - if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown tuples, breaking from pagination.\n") + self.logger.info("{}".format(r.links['last'])) + if platform == "github": + page_number = int(r.links['last']['url'][-6:].split('=')[1]) + elif platform == "gitlab": + page_number = int(r.links['last']['url'].split('&')[2].split("=")[1]) + if i - 1 != page_number: + self.logger.info("No more pages with unknown tuples, breaking from pagination.\n") break + tuples += to_add i = i + 1 if self.finishing_task else i - 1 # Since we already wouldve checked the first page... break if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") + self.logger.info("No more pages to check, breaking from pagination.\n") break return tuples @@ -535,24 +854,16 @@ def query_github_contributors(self, entry_info, repo_id): """ Data collection function Query the GitHub API for contributors """ - logging.info("Querying contributors with given entry info: " + str(entry_info) + "\n") + self.logger.info(f'Querying contributors with given entry info: {entry_info}\n') github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url'] # Extract owner/repo from the url for the endpoint - path = urlparse(github_url) - split = path[2].split('/') - - owner = split[1] - name = split[2] - - # Handles git url case by removing the extension - if ".git" in name: - name = name[:-4] + owner, name = self.get_owner_repo(github_url) # Set the base of the url and place to hold contributors to insert - contributors_url = ("https://api.github.com/repos/" + owner + "/" + - name + "/contributors?per_page=100&page={}") + contributors_url = (f'https://api.github.com/repos/{owner}/{name}/' + + 'contributors?per_page=100&page={}') # Get contributors that we already have stored # Set our duplicate and update column map keys (something other than PK) to @@ -565,7 +876,7 @@ def query_github_contributors(self, entry_info, repo_id): #list to hold contributors needing insertion or update contributors = self.paginate(contributors_url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") + self.logger.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") for repo_contributor in contributors: try: @@ -573,7 +884,7 @@ def query_github_contributors(self, entry_info, repo_id): # `created at` # i think that's it cntrb_url = ("https://api.github.com/users/" + repo_contributor['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) self.update_gh_rate_limit(r) contributor = r.json() @@ -624,69 +935,118 @@ def query_github_contributors(self, entry_info, repo_id): if repo_contributor['flag'] == 'need_update': result = self.db.execute(self.contributors_table.update().where( self.worker_history_table.c.cntrb_email==email).values(cntrb)) - logging.info("Updated tuple in the contributors table with existing email: {}".format(email)) + self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email)) self.cntrb_id_inc = repo_contributor['pkey'] elif repo_contributor['flag'] == 'need_insertion': result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: " + contributor['login'] + "\n") + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") # Increment our global track of the cntrb id for the possibility of it being used as a FK self.cntrb_id_inc = int(result.inserted_primary_key[0]) except Exception as e: - logging.info("Caught exception: {}".format(e)) - logging.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) + self.logger.error("Caught exception: {}".format(e)) + self.logger.error("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) continue - def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): - """ - Read a variable in specified section of the config file, unless provided an environment variable + def query_gitlab_contribtutors(self, entry_info, repo_id): - :param section: location of given variable - :param name: name of variable - """ - config_file_path = os.getenv("AUGUR_CONFIG_FILE", config_file_path) - _config_file_name = 'augur.config.json' - _config_bad = False - _already_exported = {} - _runtime_location = 'runtime/' - _default_config = {} - _config_file = None + gitlab_url = entry_info['given']['gitlab_url'] if 'gitlab_url' in entry_info['given'] else entry_info['given']['git_url'] - try: - _config_file = open(config_file_path, 'r+') - except: - print('Couldn\'t open {}'.format(_config_file_name)) + self.logger.info("Querying contributors with given entry info: " + str(entry_info) + "\n") - # Load the config file - try: - config_text = _config_file.read() - _config = json.loads(config_text) - except json.decoder.JSONDecodeError as e: - if not _config_bad: - _using_config_file = False - print('{} could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: {}'.format(config_file_path, str(e))) - _config = _default_config - - value = None - if environment_variable is not None: - value = os.getenv(environment_variable) - if value is None: + path = urlparse(gitlab_url) + split = path[2].split('/') + + owner = split[1] + name = split[2] + + # Handles git url case by removing the extension + if ".git" in name: + name = name[:-4] + + url_encoded_format = quote(owner + '/' + name, safe='') + + table = 'contributors' + table_pkey = 'cntrb_id' + update_col_map = {'cntrb_email': 'email'} + duplicate_col_map = {'cntrb_login': 'email'} + + # list to hold contributors needing insertion or update + contributors = self.paginate("https://gitlab.com/api/v4/projects/" + url_encoded_format + "/repository/contributors?per_page=100&page={}", duplicate_col_map, update_col_map, table, table_pkey, platform='gitlab') + + for repo_contributor in contributors: try: - if name is not None: - value = _config[section][name] - else: - value = _config[section] - except Exception as e: - value = default - if not section in _config: - _config[section] = {} + cntrb_compressed_url = ("https://gitlab.com/api/v4/users?search=" + repo_contributor['email']) + self.logger.info("Hitting endpoint: " + cntrb_compressed_url + " ...\n") + r = requests.get(url=cntrb_compressed_url, headers=self.headers) + contributor_compressed = r.json() + + email = repo_contributor['email'] + if len(contributor_compressed) == 0 or "id" not in contributor_compressed[0]: + continue + + self.logger.info("Fetching for user: " + str(contributor_compressed[0]["id"])) + + cntrb_url = ("https://gitlab.com/api/v4/users/" + str(contributor_compressed[0]["id"])) + self.logger.info("Hitting end point to get complete contributor info now: " + cntrb_url + "...\n") + r = requests.get(url=cntrb_url, headers=self.headers) + contributor = r.json() - return value + cntrb = { + "cntrb_login": contributor.get('username', None), + "cntrb_created_at": contributor.get('created_at', None), + "cntrb_email": email, + "cntrb_company": contributor.get('organization', None), + "cntrb_location": contributor.get('location', None), + # "cntrb_type": , dont have a use for this as of now ... let it default to null + "cntrb_canonical": contributor.get('public_email', None), + "gh_user_id": contributor.get('id', None), + "gh_login": contributor.get('username', None), + "gh_url": contributor.get('web_url', None), + "gh_html_url": contributor.get('web_url', None), + "gh_node_id": None, + "gh_avatar_url": contributor.get('avatar_url', None), + "gh_gravatar_id": None, + "gh_followers_url": None, + "gh_following_url": None, + "gh_gists_url": None, + "gh_starred_url": None, + "gh_subscriptions_url": None, + "gh_organizations_url": None, + "gh_repos_url": None, + "gh_events_url": None, + "gh_received_events_url": None, + "gh_type": None, + "gh_site_admin": None, + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + + # Commit insertion to table + if repo_contributor['flag'] == 'need_update': + result = self.db.execute(self.contributors_table.update().where( + self.worker_history_table.c.cntrb_email == email).values(cntrb)) + self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email)) + self.cntrb_id_inc = repo_contributor['pkey'] + elif repo_contributor['flag'] == 'need_insertion': + result = self.db.execute(self.contributors_table.insert().values(cntrb)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.results_counter += 1 + self.logger.info("Inserted contributor: " + contributor['username'] + "\n") + + # Increment our global track of the cntrb id for the possibility of it being used as a FK + self.cntrb_id_inc = int(result.inserted_primary_key[0]) + + except Exception as e: + self.logger.info("Caught exception: {}".format(e)) + self.logger.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) + continue def record_model_process(self, repo_id, model): @@ -705,7 +1065,7 @@ def record_model_process(self, repo_id, model): self.history_id += 1 else: result = self.helper_db.execute(self.worker_history_table.insert().values(task_history)) - logging.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) + self.logger.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) self.history_id = int(result.inserted_primary_key[0]) def register_task_completion(self, task, repo_id, model): @@ -716,10 +1076,12 @@ def register_task_completion(self, task, repo_id, model): 'repo_id': repo_id, 'job_model': model } - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] if 'git_url' in task['given'] else "INVALID_GIVEN" + key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else \ + 'gitlab_url' if 'gitlab_url' in task['given'] else 'INVALID_GIVEN' + task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] \ + if 'git_url' in task['given'] else task['given']['gitlab_url'] if 'gitlab_url' in task['given'] else 'INVALID_GIVEN' if key == 'INVALID_GIVEN': - self.register_task_failure(task, repo_id, "INVALID_GIVEN: not github nor git url") + self.register_task_failure(task, repo_id, "INVALID_GIVEN: Not a github/gitlab/git url.") return # Add to history table @@ -735,7 +1097,7 @@ def register_task_completion(self, task, repo_id, model): self.helper_db.execute(self.worker_history_table.update().where( self.worker_history_table.c.history_id==self.history_id).values(task_history)) - logging.info("Recorded job completion for: " + str(task_completed) + "\n") + self.logger.info("Recorded job completion for: " + str(task_completed) + "\n") # Update job process table updated_job = { @@ -746,28 +1108,31 @@ def register_task_completion(self, task, repo_id, model): } self.helper_db.execute(self.worker_job_table.update().where( self.worker_job_table.c.job_model==model).values(updated_job)) - logging.info("Updated job process for model: " + model + "\n") + self.logger.info("Updated job process for model: " + model + "\n") - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") + if self.config["offline_mode"] is False: + + # Notify broker of completion + self.logger.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + + "This task inserted: " + str(self.results_counter) + " tuples.\n") - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['host_broker'],self.config['port_broker']), json=task_completed) + requests.post('http://{}:{}/api/unstable/completed_task'.format( + self.config['host_broker'],self.config['port_broker']), json=task_completed) # Reset results counter for next task self.results_counter = 0 def register_task_failure(self, task, repo_id, e): - logging.info("Worker ran into an error for task: {}\n".format(task)) - logging.info("Printing traceback...\n") + self.logger.error("Worker ran into an error for task: {}\n".format(task)) + self.logger.error("Printing traceback...\n") tb = traceback.format_exc() - logging.info(tb) + self.logger.error(tb) - logging.info(f'This task inserted {self.results_counter} tuples before failure.\n') - logging.info("Notifying broker and logging task failure in database...\n") - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" + self.logger.info(f'This task inserted {self.results_counter} tuples before failure.\n') + self.logger.info("Notifying broker and logging task failure in database...\n") + key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else \ + 'gitlab_url' if 'gitlab_url' in task['given'] else 'INVALID_GIVEN' url = task['given'][key] """ Query all repos with repo url of given task """ @@ -781,9 +1146,11 @@ def register_task_failure(self, task, repo_id, e): requests.post("http://{}:{}/api/unstable/task_error".format( self.config['host_broker'],self.config['port_broker']), json=task) except requests.exceptions.ConnectionError: - logging.error('Could not send task failure message to the broker\n') + self.logger.error('Could not send task failure message to the broker\n') + self.logger.error(e) except Exception: - logging.exception('An error occured while informing broker about task failure\n') + self.logger.error('An error occured while informing broker about task failure\n') + self.logger.error(e) # Add to history table task_history = { @@ -797,7 +1164,7 @@ def register_task_failure(self, task, repo_id, e): } self.helper_db.execute(self.worker_history_table.update().where(self.worker_history_table.c.history_id==self.history_id).values(task_history)) - logging.info("Recorded job error in the history table for: " + str(task) + "\n") + self.logger.error("Recorded job error in the history table for: " + str(task) + "\n") # Update job process table updated_job = { @@ -807,7 +1174,7 @@ def register_task_failure(self, task, repo_id, e): "analysis_state": 0 } self.helper_db.execute(self.worker_job_table.update().where(self.worker_job_table.c.job_model==task['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + task['models'][0] + "\n") + self.logger.info("Updated job process for model: " + task['models'][0] + "\n") # Reset results counter for next task self.results_counter = 0 @@ -830,35 +1197,97 @@ def retrieve_tuple(self, key_values, tables): SELECT * FROM {} WHERE {} """.format(table_str, where_str)) values = json.loads(pd.read_sql(retrieveTupleSQL, self.db, params={}).to_json(orient="records")) - return values + return values + + def update_gitlab_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): + # Try to get rate limit from request headers, sometimes it does not work (GH's issue) + # In that case we just decrement from last recieved header count + if bad_credentials and len(self.oauths) > 1: + self.logger.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) + del self.oauths[0] + + if temporarily_disable: + self.logger.info("Gitlab rate limit reached. Temp. disabling...\n") + self.oauths[0]['rate_limit'] = 0 + else: + try: + self.oauths[0]['rate_limit'] = int(response.headers['RateLimit-Remaining']) + self.logger.info("Recieved rate limit from headers\n") + except: + self.oauths[0]['rate_limit'] -= 1 + self.logger.info("Headers did not work, had to decrement\n") + self.logger.info("Updated rate limit, you have: " + + str(self.oauths[0]['rate_limit']) + " requests remaining.\n") + if self.oauths[0]['rate_limit'] <= 0: + try: + reset_time = response.headers['RateLimit-Reset'] + except Exception as e: + self.logger.info("Could not get reset time from headers because of error: {}".format(e)) + reset_time = 3600 + time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() + self.logger.info("Rate limit exceeded, checking for other available keys to use.\n") + + # We will be finding oauth with the highest rate limit left out of our list of oauths + new_oauth = self.oauths[0] + # Endpoint to hit solely to retrieve rate limit information from headers of the response + url = "https://gitlab.com/api/v4/version" + + other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] + for oauth in other_oauths: + self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) + self.headers = {"PRIVATE-TOKEN" : oauth['access_token']} + response = requests.get(url=url, headers=self.headers) + oauth['rate_limit'] = int(response.headers['RateLimit-Remaining']) + oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() + + # Update oauth to switch to if a higher limit is found + if oauth['rate_limit'] > new_oauth['rate_limit']: + self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth)) + new_oauth = oauth + elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: + self.logger.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) + new_oauth = oauth + + if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: + self.logger.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) + time.sleep(new_oauth['seconds_to_reset']) + + # Make new oauth the 0th element in self.oauths so we know which one is in use + index = self.oauths.index(new_oauth) + self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] + self.logger.info("Using oauth: {}\n".format(self.oauths[0])) + + # Change headers to be using the new oauth's key + self.headers = {"PRIVATE-TOKEN" : self.oauths[0]['access_token']} + def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): # Try to get rate limit from request headers, sometimes it does not work (GH's issue) # In that case we just decrement from last recieved header count if bad_credentials and len(self.oauths) > 1: - logging.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) + self.logger.warning("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) del self.oauths[0] if temporarily_disable: - logging.info("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") + self.logger.debug("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") self.oauths[0]['rate_limit'] = 0 else: try: self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - logging.info("Recieved rate limit from headers\n") + self.logger.info("Recieved rate limit from headers\n") except: self.oauths[0]['rate_limit'] -= 1 - logging.info("Headers did not work, had to decrement\n") - logging.info("Updated rate limit, you have: " + + self.logger.info("Headers did not work, had to decrement\n") + self.logger.info("Updated rate limit, you have: " + str(self.oauths[0]['rate_limit']) + " requests remaining.\n") if self.oauths[0]['rate_limit'] <= 0: try: reset_time = response.headers['X-RateLimit-Reset'] except Exception as e: - logging.info("Could not get reset time from headers because of error: {}".format(error)) + self.logger.error("Could not get reset time from headers because of error: {}".format(e)) reset_time = 3600 time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() - logging.info("Rate limit exceeded, checking for other available keys to use.\n") + self.logger.info("Rate limit exceeded, checking for other available keys to use.\n") # We will be finding oauth with the highest rate limit left out of our list of oauths new_oauth = self.oauths[0] @@ -867,7 +1296,7 @@ def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disa other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] for oauth in other_oauths: - logging.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) + self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) self.headers = {'Authorization': 'token %s' % oauth['access_token']} response = requests.get(url=url, headers=self.headers) oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) @@ -875,20 +1304,28 @@ def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disa # Update oauth to switch to if a higher limit is found if oauth['rate_limit'] > new_oauth['rate_limit']: - logging.info("Higher rate limit found in oauth: {}\n".format(oauth)) + self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth)) new_oauth = oauth elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: - logging.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) + self.logger.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) new_oauth = oauth if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: - logging.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) + self.logger.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) time.sleep(new_oauth['seconds_to_reset']) # Make new oauth the 0th element in self.oauths so we know which one is in use index = self.oauths.index(new_oauth) self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] - logging.info("Using oauth: {}\n".format(self.oauths[0])) + self.logger.info("Using oauth: {}\n".format(self.oauths[0])) # Change headers to be using the new oauth's key self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} + + def update_rate_limit(self, response, bad_credentials=False, temporarily_disable=False, platform="gitlab"): + if platform == 'gitlab': + return self.update_gitlab_rate_limit(response, bad_credentials=bad_credentials, + temporarily_disable=temporarily_disable) + elif platform == 'github': + return self.update_gh_rate_limit(response, bad_credentials=bad_credentials, + temporarily_disable=temporarily_disable) \ No newline at end of file
diff --git a/test/api/test_experimental_routes.py b/test/api/test_experimental_routes.py deleted file mode 100644 --- a/test/api/test_experimental_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_insight_routes.py b/test/api/test_insight_routes.py deleted file mode 100644 --- a/test/api/test_insight_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_message_routes.py b/test/api/test_message_routes.py deleted file mode 100644 --- a/test/api/test_message_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_platform_routes.py b/test/api/test_platform_routes.py deleted file mode 100644 --- a/test/api/test_platform_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_util_routes.py b/test/api/test_util_routes.py deleted file mode 100644 --- a/test/api/test_util_routes.py +++ /dev/null @@ -1,31 +0,0 @@ -import requests -import pytest - [email protected](scope="session") -def metrics(): - pass - -def test_common(endpoint="http://localhost:5000/api/unstable/repos"): - response = requests.get(endpoint) - data = response.json() - assert response.status_code == 200 - assert len(data) >= 1 - -def test_get_all_repos(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repos') - -def test_repo_groups(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups') - -def test_repos_in_repo_groups(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups') - -def test_get_repo_for_dosocs(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/dosocs/repos') - -def test_aggregate_summary_by_repo(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups/10/repos/25430/aggregate-summary') - -def test_aggregate_summary_by_group(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups/10/aggregate-summary') - diff --git a/test/metrics/test_experimental_metrics.py b/test/metrics/test_experimental_metrics.py deleted file mode 100644 --- a/test/metrics/test_experimental_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_insight_metrics.py b/test/metrics/test_insight_metrics.py deleted file mode 100644 --- a/test/metrics/test_insight_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_message_metrics.py b/test/metrics/test_message_metrics.py deleted file mode 100644 --- a/test/metrics/test_message_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_platform_metrics.py b/test/metrics/test_platform_metrics.py deleted file mode 100644 --- a/test/metrics/test_platform_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_util_metrics.py b/test/metrics/test_util_metrics.py deleted file mode 100644 --- a/test/metrics/test_util_metrics.py +++ /dev/null @@ -1,14 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - -# def test_get_repos_for_dosocs(metrics): -# assert metrics.get_repos_for_dosocs().isin( -# ['/home/sean/git-repos/25430/github.com/rails/rails-dom-testing']).any().any() - diff --git a/augur/housekeeper/__init__.py b/tests/__init__.py similarity index 100% rename from augur/housekeeper/__init__.py rename to tests/__init__.py diff --git a/tests/test_application.py b/tests/test_application.py new file mode 100644 --- /dev/null +++ b/tests/test_application.py @@ -0,0 +1,20 @@ +import pytest +import augur.application +import sqlalchemy as s +import json + +from augur.application import Application + +def test_init_augur_regular(): + augur_app = Application(disable_logs=True) + assert augur_app is not None + +def test_connect_to_database(monkeypatch): + def mock_fail_connection(self): + raise(s.exc.OperationalError("fake", "error", "message")) + + monkeypatch.setattr(s.engine.Engine, "connect", mock_fail_connection) + monkeypatch.setenv("AUGUR_LOG_QUIET", "1") + + with pytest.raises(s.exc.OperationalError): + augur_app = Application(disable_logs=True) diff --git a/test/metrics/test_commit_metrics.py b/tests/test_metrics/test_commit_metrics.py similarity index 90% rename from test/metrics/test_commit_metrics.py rename to tests/test_metrics/test_commit_metrics.py --- a/test/metrics/test_commit_metrics.py +++ b/tests/test_metrics/test_commit_metrics.py @@ -2,12 +2,6 @@ import pytest [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_annual_commit_count_ranked_by_repo_in_repo_group(metrics): assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10).iloc[0].net > 0 assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10, 25430).iloc[0].net > 0 diff --git a/test/metrics/test_contributor_metrics.py b/tests/test_metrics/test_contributor_metrics.py similarity index 91% rename from test/metrics/test_contributor_metrics.py rename to tests/test_metrics/test_contributor_metrics.py --- a/test/metrics/test_contributor_metrics.py +++ b/tests/test_metrics/test_contributor_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_contributors(metrics): # repo group assert metrics.contributors(20).iloc[0]['total'] > 0 diff --git a/test/metrics/test_issue_metrics.py b/tests/test_metrics/test_issue_metrics.py similarity index 97% rename from test/metrics/test_issue_metrics.py rename to tests/test_metrics/test_issue_metrics.py --- a/test/metrics/test_issue_metrics.py +++ b/tests/test_metrics/test_issue_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_issues_new(metrics): #repo_id assert metrics.issues_new(10, 25430, period='year').iloc[0]['issues'] > 0 diff --git a/test/metrics/test_pull_request_metrics.py b/tests/test_metrics/test_pull_request_metrics.py similarity index 91% rename from test/metrics/test_pull_request_metrics.py rename to tests/test_metrics/test_pull_request_metrics.py --- a/test/metrics/test_pull_request_metrics.py +++ b/tests/test_metrics/test_pull_request_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_pull_requests_merge_contributor_new(metrics): # repo id assert metrics.pull_requests_merge_contributor_new(10, repo_id=25430, period='year').isin( diff --git a/test/metrics/test_repo_meta_metrics.py b/tests/test_metrics/test_repo_meta_metrics.py similarity index 96% rename from test/metrics/test_repo_meta_metrics.py rename to tests/test_metrics/test_repo_meta_metrics.py --- a/test/metrics/test_repo_meta_metrics.py +++ b/tests/test_metrics/test_repo_meta_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_code_changes(metrics): #repo_id assert metrics.code_changes(10, 25430, period='year').isin([pd.Timestamp('2019-01-01T00:00:00+00:00'), 2]).any().any() diff --git a/test/api/runner.py b/tests/test_routes/runner.py similarity index 84% rename from test/api/runner.py rename to tests/test_routes/runner.py --- a/test/api/runner.py +++ b/tests/test_routes/runner.py @@ -9,9 +9,10 @@ start = subprocess.Popen(["augur", "run", "--disable-housekeeper", "--skip-cleanup"], stdout=FNULL, stderr=subprocess.STDOUT) print("Waiting for the server to start...") time.sleep(5) -process = subprocess.run(["pytest", "-ra", "--tb=short", "-x", "test/metrics"]) + +process = subprocess.run(["pytest", "tests/test_routes/"]) time.sleep(2) + subprocess.Popen(["augur", "util", "kill"], stdout=FNULL, stderr=subprocess.STDOUT) print("Server successfully shutdown.") - sys.exit(process.returncode) diff --git a/test/api/test_commit_routes.py b/tests/test_routes/test_commit_routes.py similarity index 97% rename from test/api/test_commit_routes.py rename to tests/test_routes/test_commit_routes.py --- a/test/api/test_commit_routes.py +++ b/tests/test_routes/test_commit_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_annual_commit_count_ranked_by_new_repo_in_repo_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/annual-commit-count-ranked-by-new-repo-in-repo-group/') data = response.json() diff --git a/test/api/test_contributor_routes.py b/tests/test_routes/test_contributor_routes.py similarity index 95% rename from test/api/test_contributor_routes.py rename to tests/test_routes/test_contributor_routes.py --- a/test/api/test_contributor_routes.py +++ b/tests/test_routes/test_contributor_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_contributors_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/contributors') data = response.json() diff --git a/test/api/test_issue_routes.py b/tests/test_routes/test_issue_routes.py similarity index 99% rename from test/api/test_issue_routes.py rename to tests/test_routes/test_issue_routes.py --- a/test/api/test_issue_routes.py +++ b/tests/test_routes/test_issue_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_issues_new_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/issues-new') data = response.json() diff --git a/test/api/test_pull_request_routes.py b/tests/test_routes/test_pull_request_routes.py similarity index 94% rename from test/api/test_pull_request_routes.py rename to tests/test_routes/test_pull_request_routes.py --- a/test/api/test_pull_request_routes.py +++ b/tests/test_routes/test_pull_request_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_pull_requests_merge_contributor_new_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/pull-requests-merge-contributor-new') data = response.json() diff --git a/test/api/test_repo_meta_routes.py b/tests/test_routes/test_repo_meta_routes.py similarity index 98% rename from test/api/test_repo_meta_routes.py rename to tests/test_routes/test_repo_meta_routes.py --- a/test/api/test_repo_meta_routes.py +++ b/tests/test_routes/test_repo_meta_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_code_changes_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/code-changes') data = response.json() @@ -51,7 +47,6 @@ def test_sub_projects_by_repo(metrics): def test_cii_best_practices_badge_by_repo(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/repos/25430/cii-best-practices-badge') - print(response) data = response.json() assert response.status_code == 200 assert len(data) >= 1 diff --git a/tests/test_routes/test_util_routes.py b/tests/test_routes/test_util_routes.py new file mode 100644 --- /dev/null +++ b/tests/test_routes/test_util_routes.py @@ -0,0 +1,20 @@ +import requests +import pytest + +from conftest import create_full_routes + +util_routes = [\ +"repos",\ +"repo-groups",\ +"repo-groups",\ +"dosocs/repos",\ +"repo-groups/<default_repo_group_id>/aggregate-summary",\ +"repo-groups/<default_repo_group_id>/repos/<default_repo_id>/aggregate-summary",\ +] + [email protected]("endpoint", create_full_routes(util_routes)) +def test_base_test(client, endpoint): + response = client.get(endpoint) + data = response.get_json() + assert response.status_code == 200 + assert len(data) >= 1 diff --git a/tests/test_workers/test_repo_info_worker.py b/tests/test_workers/test_repo_info_worker.py new file mode 100644 --- /dev/null +++ b/tests/test_workers/test_repo_info_worker.py @@ -0,0 +1,29 @@ +import pytest +from time import sleep + +from workers.repo_info_worker.repo_info_worker import RepoInfoWorker + [email protected] +def test_task(): + return { + "given": { + "github_url": "https://github.com/chaoss/augur.git" + }, + "models": ["repo_info"], + "job_type": "MAINTAIN", + "display_name": "repo_info model for url: https://github.com/chaoss/augur.git", + "focused_task": 1 + } + [email protected] +def repo_info_worker(): + config = { + "offline_mode": True, + "quiet": True + } + + repo_info_worker = RepoInfoWorker(config=config) + return repo_info_worker + +def test_repo_info_worker(repo_info_worker, test_task): + assert repo_info_worker is not None diff --git a/test/__init__.py b/workers/contributor_worker/__init__.py similarity index 100% rename from test/__init__.py rename to workers/contributor_worker/__init__.py diff --git a/test/test_model.py b/workers/github_worker/__init__.py similarity index 100% rename from test/test_model.py rename to workers/github_worker/__init__.py diff --git a/workers/metric_status_worker/tests/tests_worker.py b/workers/metric_status_worker/tests/tests_worker.py deleted file mode 100644 --- a/workers/metric_status_worker/tests/tests_worker.py +++ /dev/null @@ -1,16 +0,0 @@ -import requests -import pytest - -from metric_status_worker.worker import MetricsStatus - -def test_get_metric_index_in_table_row(): - row = "metric |sTatuS|TestString" - metric_status = MetricsStatus("api.github.com") - result = metric_status.get_metric_index_in_table_row(row) - print(result) - assert result == (0, 3) - -def test_is_has_link(): - metric_status = MetricsStatus("api.github.com") - re_result = metric_status.is_has_link(" [oss](augur" , None) - assert re_result == ('oss', 'augur') diff --git a/workers/tests/test_standard_methods.py b/workers/tests/test_standard_methods.py deleted file mode 100644 --- a/workers/tests/test_standard_methods.py +++ /dev/null @@ -1,28 +0,0 @@ -# Sample Test passing with nose and pytest -import pandas as pd -import pytest -from workers.standard_methods import check_duplicates, dump_queue, read_config -from queue import Queue - - -def test_check_duplicates(): - obj = {"website":["walmart.com"]} - new_data = [obj] - table_values = pd.read_csv("augur/data/companies.csv") - assert check_duplicates(new_data, table_values, "website") == [obj] - -def test_dump_queues(): - sample_queue = Queue() - list_sample = ["[email protected]", "[email protected]", "[email protected]"] - for list_item in list_sample: - sample_queue.put(list_item) - queue_to_list = dump_queue(sample_queue) - assert queue_to_list == ["[email protected]", "[email protected]", "[email protected]"] - -def test_read_config_no_exception(): - db_name = read_config('Database', 'user', 'AUGUR_DB_USER', 'augur',config_file_path="augur.config.json") - assert db_name == "augur" - -def test_read_config_exception(): - with pytest.raises(AttributeError): - db_name = read_config('Server', 'username')
repo_info worker: dev/test branch Please help us help you by filling out the following sections as thoroughly as you can. **Description:** Looks like the new Fork information collection has some kind of mismatch between the method and parameters passed: ``` INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}} INFO:root:Printing traceback... INFO:root:Traceback (most recent call last): File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect model_method(message, repo_id) File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model forked = self.is_forked(owner, repo) File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked data = self.get_repo_data(self, url, r) TypeError: get_repo_data() takes 3 positional arguments but 4 were given INFO:root:This task inserted 0 tuples before failure. INFO:root:Notifying broker and logging task failure in database... INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 - INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'} INFO:root:Updated job process for model: repo_info ``` If the log does not provide enough info, let me know
2020-06-21T13:54:53Z
[]
[]
chaoss/augur
792
chaoss__augur-792
[ "737" ]
5f927b73ab4fae059b40f38df7fd9799bfcbd34b
diff --git a/augur/__init__.py b/augur/__init__.py --- a/augur/__init__.py +++ b/augur/__init__.py @@ -1,10 +1,4 @@ #SPDX-License-Identifier: MIT -import logging -import coloredlogs - -coloredlogs.install() -logger = logging.getLogger('augur') - -# Classes -from .application import Application, logger +import os +ROOT_AUGUR_DIRECTORY = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) diff --git a/augur/application.py b/augur/application.py --- a/augur/application.py +++ b/augur/application.py @@ -4,72 +4,52 @@ """ import os -import time -import multiprocessing as mp +from pathlib import Path import logging +from logging import FileHandler, Formatter import coloredlogs import json -import pkgutil from beaker.cache import CacheManager from beaker.util import parse_cache_config_options import sqlalchemy as s import psycopg2 -from augur import logger +from augur import ROOT_AUGUR_DIRECTORY from augur.metrics import Metrics -from augur.cli.configure import default_config +from augur.config import AugurConfig +from augur.logging import AugurLogging -class Application(object): +logger = logging.getLogger(__name__) + +class Application(): """Initalizes all classes from Augur using a config file or environment variables""" - def __init__(self): + def __init__(self, given_config={}, disable_logs=False, offline_mode=False): """ Reads config, creates DB session, and initializes cache """ - self.config_file_name = 'augur.config.json' - self.__shell_config = None - self.__export_file = None - self.__env_file = None - self.config = default_config - self.env_config = {} - self.root_augur_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - default_config_path = self.root_augur_dir + '/' + self.config_file_name - using_config_file = False - - - config_locations = [self.config_file_name, default_config_path, f"/opt/augur/{self.config_file_name}"] - if os.getenv('AUGUR_CONFIG_FILE') is not None: - config_file_path = os.getenv('AUGUR_CONFIG_FILE') - using_config_file = True - else: - for index, location in enumerate(config_locations): - try: - f = open(location, "r+") - config_file_path = os.path.abspath(location) - using_config_file = True - f.close() - break - except FileNotFoundError: - pass - - if using_config_file: - try: - with open(config_file_path, 'r+') as config_file_handle: - self.config = json.loads(config_file_handle.read()) - except json.decoder.JSONDecodeError as e: - logger.warning('%s could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: %s', config_file_path, str(e)) - else: - logger.warning('%s could not be parsed, using defaults.') - - self.load_env_configuration() - - logger.setLevel(self.read_config("Development", "log_level")) + self.logging = AugurLogging(disable_logs=disable_logs) + self.root_augur_dir = ROOT_AUGUR_DIRECTORY + self.config = AugurConfig(self.root_augur_dir, given_config) + + # we need these for later + self.housekeeper = None + self.manager = None + + self.gunicorn_options = { + 'bind': '%s:%s' % (self.config.get_value("Server", "host"), self.config.get_value("Server", "port")), + 'workers': int(self.config.get_value('Server', 'workers')), + 'timeout': int(self.config.get_value('Server', 'timeout')) + } + self.logging.configure_logging(self.config) + self.gunicorn_options.update(self.logging.gunicorn_logging_options) self.cache_config = { 'cache.type': 'file', 'cache.data_dir': 'runtime/cache/', 'cache.lock_dir': 'runtime/cache/' } + if not os.path.exists(self.cache_config['cache.data_dir']): os.makedirs(self.cache_config['cache.data_dir']) if not os.path.exists(self.cache_config['cache.lock_dir']): @@ -77,75 +57,56 @@ def __init__(self): cache_parsed = parse_cache_config_options(self.cache_config) self.cache = CacheManager(**cache_parsed) - self.database = self.__connect_to_database() - self.spdx_db = self.__connect_to_database(include_spdx=True) + if offline_mode is False: + logger.debug("Running in online mode") + self.database, self.operations_database, self.spdx_database = self._connect_to_database() - self.metrics = Metrics(self) + self.metrics = Metrics(self) - def __connect_to_database(self, include_spdx=False): - user = self.read_config('Database', 'user') - host = self.read_config('Database', 'host') - port = self.read_config('Database', 'port') - dbname = self.read_config('Database', 'name') + def _connect_to_database(self): + logger.debug("Testing database connections") + user = self.config.get_value('Database', 'user') + host = self.config.get_value('Database', 'host') + port = self.config.get_value('Database', 'port') + dbname = self.config.get_value('Database', 'name') database_connection_string = 'postgresql://{}:{}@{}:{}/{}'.format( - user, self.read_config('Database', 'password'), host, port, dbname + user, self.config.get_value('Database', 'password'), host, port, dbname ) csearch_path_options = 'augur_data' - if include_spdx == True: - csearch_path_options += ',spdx' engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, connect_args={'options': f'-csearch_path={csearch_path_options}'}, pool_pre_ping=True) + csearch_path_options += ',spdx' + spdx_engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, + connect_args={'options': f'-csearch_path={csearch_path_options}'}, pool_pre_ping=True) + + helper_engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, + connect_args={'options': f'-csearch_path=augur_operations'}, pool_pre_ping=True) + try: - test_connection = engine.connect() - test_connection.close() - return engine + engine.connect().close() + helper_engine.connect().close() + spdx_engine.connect().close() + return engine, helper_engine, spdx_engine except s.exc.OperationalError as e: - logger.fatal(f"Unable to connect to the database. Terminating...") - exit() + logger.error("Unable to connect to the database. Terminating...") + raise(e) - def read_config(self, section, name=None): - """ - Read a variable in specified section of the config file, unless provided an environment variable + def shutdown(self): + if self.logging.stop_event is not None: + logger.debug("Stopping housekeeper logging listener...") + self.logging.stop_event.set() - :param section: location of given variable - :param name: name of variable - """ - if name is not None: - try: - value = self.config[section][name] - except KeyError as e: - value = default_config[section][name] - else: - try: - value = self.config[section] - except KeyError as e: - value = default_config[section] - - return value - - def load_env_configuration(self): - self.set_env_value(section='Database', name='key', environment_variable='AUGUR_GITHUB_API_KEY') - self.set_env_value(section='Database', name='host', environment_variable='AUGUR_DB_HOST') - self.set_env_value(section='Database', name='name', environment_variable='AUGUR_DB_NAME') - self.set_env_value(section='Database', name='port', environment_variable='AUGUR_DB_PORT') - self.set_env_value(section='Database', name='user', environment_variable='AUGUR_DB_USER') - self.set_env_value(section='Database', name='password', environment_variable='AUGUR_DB_PASSWORD') - self.set_env_value(section='Development', name='log_level', environment_variable='AUGUR_LOG_LEVEL') - - def set_env_value(self, section, name, environment_variable, sub_config=None): - """ - Sets names and values of specified config section according to their environment variables. - """ - # using sub_config lets us grab values from nested config blocks - if sub_config is None: - sub_config = self.config + if self.housekeeper is not None: + logger.debug("Shutting down housekeeper updates...") + self.housekeeper.shutdown_updates() + self.housekeeper = None - env_value = os.getenv(environment_variable) + if self.manager is not None: + logger.debug("Shutting down manager...") + self.manager.shutdown() + self.manager = None - if env_value is not None: - self.env_config[environment_variable] = env_value - sub_config[section][name] = env_value diff --git a/augur/cli/__init__.py b/augur/cli/__init__.py --- a/augur/cli/__init__.py +++ b/augur/cli/__init__.py @@ -0,0 +1,34 @@ +import click +from functools import update_wrapper + +from augur.application import Application +from augur.config import AugurConfig +from augur.logging import AugurLogging, ROOT_AUGUR_DIRECTORY + +def pass_application(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + ctx.obj = Application() + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def pass_config(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + ctx.obj = Application(offline_mode=True).config + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def pass_logs_dir(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + config = AugurConfig(ROOT_AUGUR_DIRECTORY) + ctx.obj = AugurLogging.get_log_directories(config, reset_logfiles=False) + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def initialize_logging(f): + def new_func(*args, **kwargs): + AugurLogging(reset_logfiles=False) + return f(*args, **kwargs) + return update_wrapper(new_func, f) \ No newline at end of file diff --git a/augur/runtime.py b/augur/cli/_multicommand.py similarity index 63% rename from augur/runtime.py rename to augur/cli/_multicommand.py --- a/augur/runtime.py +++ b/augur/cli/_multicommand.py @@ -6,16 +6,14 @@ import os import sys import click +import importlib import augur.application CONTEXT_SETTINGS = dict(auto_envvar_prefix='AUGUR') class AugurMultiCommand(click.MultiCommand): - def __commands_folder(self): - return os.path.abspath( - os.path.join(os.path.dirname(__file__), 'cli') - ) + return os.path.abspath(os.path.dirname(__file__)) def list_commands(self, ctx): rv = [] @@ -26,13 +24,8 @@ def list_commands(self, ctx): return rv def get_command(self, ctx, name): - # try: - if sys.version_info[0] == 2: - name = name.encode('ascii', 'replace') - mod = __import__('augur.cli.' + name, - None, None, ['cli']) - - return mod.cli + module = importlib.import_module('.' + name, 'augur.cli') + return module.cli @click.command(cls=AugurMultiCommand, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -40,11 +33,4 @@ def run(ctx): """ Augur is an application for open source community health analytics """ - - app = augur.application.Application() - ctx.obj = app - return ctx.obj - - -if __name__ == '__main__': - run() + return ctx diff --git a/augur/cli/configure.py b/augur/cli/configure.py --- a/augur/cli/configure.py +++ b/augur/cli/configure.py @@ -6,190 +6,15 @@ import os import click import json +import logging -from augur import logger +from augur.config import default_config, ENVVAR_PREFIX +from augur.cli import initialize_logging +from augur.logging import ROOT_AUGUR_DIRECTORY +logger = logging.getLogger(__name__) ENVVAR_PREFIX = "AUGUR_" -default_config = { - "Database": { - "name": "augur", - "host": "localhost", - "key": "key", - "password": "augur", - "port": 5432, - "user": "augur" - }, - "Housekeeper": { - "jobs": [ - { - "all_focused": 1, - "delay": 150000, - "given": [ - "github_url" - ], - "model": "issues", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "github_url" - ], - "model": "pull_request_commits", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "github_url" - ], - "model": "repo_info", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "repo_group" - ], - "model": "commits", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "github_url" - ], - "model": "pull_requests", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "contributors", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "insights", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "badges", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "value", - "repo_group_id": 0 - }, - { - "delay": 100000, - "given": [ - "github_url" - ], - "model": "pull_request_files", - "repo_group_id": 0 - } - ] - }, - "Workers": { - "facade_worker": { - "port": 50100, - "repo_directory": "repos/", - "switch": 1, - "workers": 1 - }, - "github_worker": { - "port": 50200, - "switch": 1, - "workers": 1 - }, - "insight_worker": { - "port": 50300, - "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"}, - "contamination": 0.041, - "switch": 0, - "workers": 1, - "training_days": 365, - "anomaly_days": 2 - }, - "linux_badge_worker": { - "port": 50400, - "switch": 1, - "workers": 1 - }, - "metric_status_worker": { - "port": 50500, - "switch": 0, - "workers": 1 - }, - "pull_request_worker": { - "port": 50600, - "switch": 1, - "workers": 1 - }, - "repo_info_worker": { - "port": 50700, - "switch": 1, - "workers": 1 - }, - "value_worker": { - "port": 50800, - "scc_bin": "scc", - "switch": 0, - "workers": 1 - }, - "contributor_worker": { - "port": 50900, - "switch": 1, - "workers": 1 - } - }, - "Facade": { - "check_updates": 1, - "clone_repos": 1, - "create_xlsx_summary_files": 1, - "delete_marked_repos": 0, - "fix_affiliations": 1, - "force_analysis": 1, - "force_invalidate_caches": 1, - "force_updates": 1, - "limited_run": 0, - "multithreaded": 0, - "nuke_stored_affiliations": 0, - "pull_repos": 1, - "rebuild_caches": 1, - "run_analysis": 1 - }, - "Server": { - "cache_expire": "3600", - "host": "0.0.0.0", - "port": "5000", - "workers": 4, - "timeout": 60 - }, - "Frontend": { - "host": "0.0.0.0", - "port": "5000" - }, - "Development": { - "log_level": "INFO" - } - } @click.group('configure', short_help='Generate an augur.config.json') def cli(): @@ -204,7 +29,9 @@ def cli(): @click.option('--github_api_key', help="GitHub API key for data collection from the GitHub API", envvar=ENVVAR_PREFIX + 'GITHUB_API_KEY') @click.option('--facade_repo_directory', help="Directory on the database server where Facade should clone repos", envvar=ENVVAR_PREFIX + 'FACADE_REPO_DIRECTORY') @click.option('--rc-config-file', help="File containing existing config whose values will be used as the defaults", type=click.Path(exists=True)) -def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file): [email protected]('--gitlab_api_key', help="GitLab API key for data collection from the GitLab API", envvar=ENVVAR_PREFIX + 'GITLAB_API_KEY') +@initialize_logging +def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file, gitlab_api_key): """ Generate an augur.config.json """ @@ -250,11 +77,13 @@ def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, fa config['Database']['password'] = db_password if github_api_key is not None: config['Database']['key'] = github_api_key + if gitlab_api_key is not None: + config['Database']['gitlab_api_key'] = gitlab_api_key if facade_repo_directory is not None: config['Workers']['facade_worker']['repo_directory'] = facade_repo_directory try: - with open(os.path.abspath('augur.config.json'), 'w') as f: + with open(os.path.abspath(ROOT_AUGUR_DIRECTORY + '/augur.config.json'), 'w') as f: json.dump(config, f, indent=4) logger.info('augur.config.json successfully created') except Exception as e: diff --git a/augur/cli/db.py b/augur/cli/db.py --- a/augur/cli/db.py +++ b/augur/cli/db.py @@ -1,5 +1,6 @@ from os import walk, chdir, environ, chmod, path import os +import logging from sys import exit import stat from collections import OrderedDict @@ -12,7 +13,9 @@ import pandas as pd from sqlalchemy import exc -from augur import logger +from augur.cli import pass_config, pass_application + +logger = logging.getLogger(__name__) @click.group('db', short_help='Database utilities') def cli(): @@ -20,14 +23,12 @@ def cli(): @cli.command('add-repos') @click.argument('filename', type=click.Path(exists=True)) [email protected]_context -def add_repos(ctx, filename): +@pass_application +def add_repos(augur_app, filename): """ Add repositories to Augur's database """ - app = ctx.obj - - df = app.database.execute(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups")) + df = augur_app.database.execute(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups")) repo_group_IDs = [group[0] for group in df.fetchall()] insertSQL = s.sql.text(""" @@ -41,33 +42,29 @@ def add_repos(ctx, filename): for row in data: logger.info(f"Inserting repo with Git URL `{row[1]}` into repo group {row[0]}") if int(row[0]) in repo_group_IDs: - result = app.database.execute(insertSQL, repo_group_id=int(row[0]), repo_git=row[1]) + result = augur_app.database.execute(insertSQL, repo_group_id=int(row[0]), repo_git=row[1]) else: - logger.warn(f"Invalid repo group id specified for {row[1]}, skipping.") + logger.warning(f"Invalid repo group id specified for {row[1]}, skipping.") @cli.command('get-repo-groups') [email protected]_context -def get_repo_groups(ctx): +@pass_application +def get_repo_groups(augur_app): """ List all repo groups and their associated IDs """ - app = ctx.obj - - df = pd.read_sql(s.sql.text("SELECT repo_group_id, rg_name, rg_description FROM augur_data.repo_groups"), app.database) + df = pd.read_sql(s.sql.text("SELECT repo_group_id, rg_name, rg_description FROM augur_data.repo_groups"), augur_app.database) print(df) return df @cli.command('add-repo-groups') @click.argument('filename', type=click.Path(exists=True)) [email protected]_context -def add_repo_groups(ctx, filename): +@pass_application +def add_repo_groups(augur_app, filename): """ Create new repo groups in Augur's database """ - app = ctx.obj - - df = pd.read_sql(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups"), app.database) + df = pd.read_sql(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups"), augur_app.database) repo_group_IDs = df['repo_group_id'].values.tolist() insert_repo_group_sql = s.sql.text(""" @@ -80,51 +77,48 @@ def add_repo_groups(ctx, filename): logger.info(f"Inserting repo group with name {row[1]} and ID {row[0]}...") if int(row[0]) not in repo_group_IDs: repo_group_IDs.append(int(row[0])) - app.database.execute(insert_repo_group_sql, repo_group_id=int(row[0]), repo_group_name=row[1]) + augur_app.database.execute(insert_repo_group_sql, repo_group_id=int(row[0]), repo_group_name=row[1]) else: logger.info(f"Repo group with ID {row[1]} for repo group {row[1]} already exists, skipping...") @cli.command('update-repo-directory') @click.argument('repo_directory') [email protected]_context -def update_repo_directory(ctx, repo_directory): +@pass_application +def update_repo_directory(augur_app, repo_directory): """ Update Facade worker repo cloning directory """ - app = ctx.obj - updateRepoDirectorySQL = s.sql.text(""" UPDATE augur_data.settings SET VALUE = :repo_directory WHERE setting='repo_directory'; """) - app.database.execute(updateRepoDirectorySQL, repo_directory=repo_directory) + augur_app.database.execute(updateRepoDirectorySQL, repo_directory=repo_directory) logger.info(f"Updated Facade repo directory to: {repo_directory}") # get_db_version is a helper function to print_db_version and upgrade_db_version -def get_db_version(app): +def get_db_version(augur_app): db_version_sql = s.sql.text(""" SELECT * FROM augur_operations.augur_settings WHERE setting = 'augur_data_version' """) - return int(app.database.execute(db_version_sql).fetchone()[2]) + return int(augur_app.database.execute(db_version_sql).fetchone()[2]) @cli.command('print-db-version') [email protected]_context -def print_db_version(ctx): +@pass_application +def print_db_version(augur_app): """ Get the version of the configured database """ - print(get_db_version(ctx.obj)) + print(get_db_version(augur_app)) @cli.command('upgrade-db-version') [email protected]_context -def upgrade_db_version(ctx): +@pass_application +def upgrade_db_version(augur_app): """ Upgrade the configured database to the latest version """ - app = ctx.obj - check_pgpass_credentials(app.config) - current_db_version = get_db_version(app) + check_pgpass_credentials(augur_app.config.get_raw_config()) + current_db_version = get_db_version(augur_app) update_scripts_filenames = [] for (_, _, filenames) in walk('schema/generate'): @@ -143,23 +137,22 @@ def upgrade_db_version(ctx): if current_db_version == most_recent_version: logger.info("Your database is already up to date. ") elif current_db_version > most_recent_version: - logger.info(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") + logger.error(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") for target_version, script_location in target_version_script_map.items(): if target_version == current_db_version + 1: logger.info(f"Upgrading from {current_db_version} to {target_version}") - run_psql_command_in_database(app, '-f', f"schema/generate/{script_location}") + run_psql_command_in_database(augur_app, '-f', f"schema/generate/{script_location}") current_db_version += 1 @cli.command('check-for-upgrade') [email protected]_context -def check_for_upgrade(ctx): +@pass_application +def check_for_upgrade(augur_app): """ Upgrade the configured database to the latest version """ - app = ctx.obj - check_pgpass_credentials(app.config) - current_db_version = get_db_version(app) + check_pgpass_credentials(augur_app.config.get_raw_config()) + current_db_version = get_db_version(augur_app) update_scripts_filenames = [] for (_, _, filenames) in walk('schema/generate'): @@ -180,18 +173,17 @@ def check_for_upgrade(ctx): elif current_db_version < most_recent_version: logger.info(f"Current database version: v{current_db_version}\nPlease upgrade to the most recent version (v{most_recent_version}) with augur db upgrade-db-version.") elif current_db_version > most_recent_version: - logger.warn(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") + logger.error(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") @cli.command('create-schema') [email protected]_context -def create_schema(ctx): +@pass_application +def create_schema(augur_app): """ Create schema in the configured database """ - app = ctx.obj - check_pgpass_credentials(app.config) - run_psql_command_in_database(app, '-f', 'schema/create_schema.sql') + check_pgpass_credentials(augur_app.config.get_raw_config()) + run_psql_command_in_database(augur_app, '-f', 'schema/create_schema.sql') def generate_key(length): return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length)) @@ -202,46 +194,40 @@ def generate_api_key(ctx): """ Generate and set a new Augur API key """ - app = ctx.obj key = generate_key(32) ctx.invoke(update_api_key, api_key=key) print(key) @cli.command('update-api-key') @click.argument("api_key") [email protected]_context -def update_api_key(ctx, api_key): +@pass_application +def update_api_key(augur_app, api_key): """ Update the API key in the database to the given key """ - app = ctx.obj - update_api_key_sql = s.sql.text(""" UPDATE augur_operations.augur_settings SET VALUE = :api_key WHERE setting='augur_api_key'; """) - app.database.execute(update_api_key_sql, api_key=api_key) - logger.info(f"Update Augur API key to: {api_key}") + augur_app.database.execute(update_api_key_sql, api_key=api_key) + logger.info(f"Updated Augur API key to: {api_key}") @cli.command('get-api-key') [email protected]_context -def get_api_key(ctx): - app = ctx.obj - +@pass_application +def get_api_key(augur_app): get_api_key_sql = s.sql.text(""" SELECT value FROM augur_operations.augur_settings WHERE setting='augur_api_key'; """) try: - print(app.database.execute(get_api_key_sql).fetchone()[0]) + print(augur_app.database.execute(get_api_key_sql).fetchone()[0]) except TypeError: - logger.warn("No Augur API key found.") + logger.error("No Augur API key found.") @cli.command('check-pgpass', short_help="Check the ~/.pgpass file for Augur's database credentials") [email protected]_context -def check_pgpass(ctx): - app = ctx.obj - check_pgpass_credentials(app.config) +@pass_config +def check_pgpass(config): + check_pgpass_credentials(config.get_raw_config()) @cli.command('init-database') @click.option('--default-db-name', default='postgres') @@ -252,12 +238,10 @@ def check_pgpass(ctx): @click.option('--target-password', default='augur') @click.option('--host', default='localhost') @click.option('--port', default='5432') [email protected]_context -def init_database(ctx, default_db_name, default_user, default_password, target_db_name, target_user, target_password, host, port): +def init_database(default_db_name, default_user, default_password, target_db_name, target_user, target_password, host, port): """ Create database with the given credentials using the given maintenance database """ - app = ctx.obj config = { 'Database': { 'name': default_db_name, @@ -276,15 +260,15 @@ def init_database(ctx, default_db_name, default_user, default_password, target_d def run_db_creation_psql_command(host, port, user, name, command): call(['psql', '-h', host, '-p', port, '-U', user, '-d', name, '-a', '-w', '-c', command]) -def run_psql_command_in_database(app, target_type, target): +def run_psql_command_in_database(augur_app, target_type, target): if target_type not in ['-f', '-c']: - logger.fatal("Invalid target type. Exiting...") + logger.error("Invalid target type. Exiting...") exit(1) - call(['psql', '-h', app.read_config('Database', 'host'),\ - '-d', app.read_config('Database', 'name'),\ - '-U', app.read_config('Database', 'user'),\ - '-p', str(app.read_config('Database', 'port')),\ + call(['psql', '-h', augur_app.config.get_value('Database', 'host'),\ + '-d', augur_app.config.get_value('Database', 'name'),\ + '-U', augur_app.config.get_value('Database', 'user'),\ + '-p', str(augur_app.config.get_value('Database', 'port')),\ '-a', '-w', target_type, target ]) @@ -292,14 +276,14 @@ def check_pgpass_credentials(config): pgpass_file_path = environ['HOME'] + '/.pgpass' if not path.isfile(pgpass_file_path): - logger.debug("~/.pgpass does not exist, creating.") + logger.info("~/.pgpass does not exist, creating.") open(pgpass_file_path, 'w+') chmod(pgpass_file_path, stat.S_IWRITE | stat.S_IREAD) pgpass_file_mask = oct(os.stat(pgpass_file_path).st_mode & 0o777) if pgpass_file_mask != '0o600': - logger.debug("Updating ~/.pgpass file permissions.") + logger.info("Updating ~/.pgpass file permissions.") chmod(pgpass_file_path, stat.S_IWRITE | stat.S_IREAD) with open(pgpass_file_path, 'a+') as pgpass_file: diff --git a/augur/cli/logging.py b/augur/cli/logging.py new file mode 100644 --- /dev/null +++ b/augur/cli/logging.py @@ -0,0 +1,89 @@ +import click +import os +from os import walk + +from augur.cli import pass_logs_dir + [email protected]("logging", short_help="View Augur's log files") +def cli(): + pass + [email protected]("directory") +@pass_logs_dir +def directory(logs_dir): + """ + Print the location of Augur's logs directory + """ + print(logs_dir) + [email protected]("tail") [email protected]("lines", default=20) +@pass_logs_dir +def tail(logs_dir, lines): + """ + Output the last n lines of the main Augur and worker logfiles + """ + root_log_dir = logs_dir + worker_log_dir = logs_dir + "/workers/" + if lines is None: + lines = 20 + + files = [] + directories = [] + for (_, _, filenames) in walk(root_log_dir): + for file in filenames: + result = _tail(open(root_log_dir + "/" + file), lines) + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + break + + files = [] + directories = [] + for (dirpath, dirnames, filenames) in walk(worker_log_dir): + directories.extend(dirnames) + break + + for directory in directories: + specific_worker_log_dir = worker_log_dir + directory + for (_, _, filenames) in walk(specific_worker_log_dir): + files.extend(filenames) + + for file in [file for file in filenames if "collection" in file]: + result = _tail(open(specific_worker_log_dir + "/" + file), lines) + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + break + +def _tail(f, lines=20, _buffer=4098): + lines_found = [] + + # block counter will be multiplied by buffer + # to get the block size from the end + block_counter = -1 + + # loop until we find X lines + while len(lines_found) < lines: + try: + f.seek(block_counter * _buffer, os.SEEK_END) + except IOError: # either file is too small, or too many lines requested + f.seek(0) + lines_found = f.readlines() + break + + lines_found = f.readlines() + + # we found enough lines, get out + # Removed this line because it was redundant the while will catch + # it, I left it for history + # if len(lines_found) > lines: + # break + + # decrement the block counter to get the + # next X bytes + block_counter -= 1 + + return lines_found[-lines:] \ No newline at end of file diff --git a/augur/cli/run.py b/augur/cli/run.py --- a/augur/cli/run.py +++ b/augur/cli/run.py @@ -4,187 +4,143 @@ """ from copy import deepcopy -import os, time, atexit, subprocess, click +import os, time, atexit, subprocess, click, atexit, logging, sys import multiprocessing as mp import gunicorn.app.base -from gunicorn.six import iteritems from gunicorn.arbiter import Arbiter -from augur.housekeeper.housekeeper import Housekeeper -from augur import logger +from augur.housekeeper import Housekeeper from augur.server import Server - from augur.cli.util import kill_processes -import time +from augur.application import Application + +logger = logging.getLogger("augur") @click.command("run") @click.option("--disable-housekeeper", is_flag=True, default=False, help="Turns off the housekeeper") @click.option("--skip-cleanup", is_flag=True, default=False, help="Disables the old process cleanup that runs before Augur starts") [email protected]_context -def cli(ctx, disable_housekeeper, skip_cleanup): +def cli(disable_housekeeper, skip_cleanup): """ Start Augur's backend server """ + augur_app = Application() + logger.info("Augur application initialized") if not skip_cleanup: - logger.info("Cleaning up old Augur processes. Just a moment please...") - ctx.invoke(kill_processes) + logger.debug("Cleaning up old Augur processes...") + kill_processes() time.sleep(2) else: - logger.info("Skipping cleanup processes.") - - def get_process_id(name): - """Return process ids found by name or command - """ - child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False) - response = child.communicate()[0] - return [int(pid) for pid in response.split()] + logger.debug("Skipping process cleanup") - app = ctx.obj + master = initialize_components(augur_app, disable_housekeeper) + logger.info('Starting Gunicorn server in the background...') + if not disable_housekeeper: + logger.info('Housekeeper update process logs will now take over.') + else: + logger.info("Gunicorn server logs will be written to gunicorn.log") + logger.info("Augur is still running...don't close this process!") + Arbiter(master).run() - mp.set_start_method('forkserver', force=True) +def initialize_components(augur_app, disable_housekeeper): master = None - manager = None broker = None housekeeper = None - - logger.info("Booting broker and its manager...") - manager = mp.Manager() - broker = manager.dict() - - controller = app.read_config('Workers') - worker_pids = [] worker_processes = [] + mp.set_start_method('forkserver', force=True) if not disable_housekeeper: - if not controller: - return + logger.info("Booting manager") + manager = mp.Manager() + + logger.info("Booting broker") + broker = manager.dict() + + housekeeper = Housekeeper(broker=broker, augur_app=augur_app) + + controller = augur_app.config.get_section('Workers') + for worker in controller.keys(): - if not controller[worker]['switch']: - continue - logger.info("Your config has the option set to automatically boot {} instances of the {}".format(controller[worker]['workers'], worker)) - pids = get_process_id("/bin/sh -c cd workers/{} && {}_start".format(worker, worker)) - worker_pids += pids - if len(pids) > 0: - worker_pids.append(pids[0] + 1) - pids.append(pids[0] + 1) - logger.info("Found and preparing to kill previous {} worker pids: {}".format(worker,pids)) - for pid in pids: - try: - os.kill(pid, 9) - except: - logger.info("Worker process {} already killed".format(pid)) - - @atexit.register - def exit(): - try: - for pid in worker_pids: - os.kill(pid, 9) - except: - logger.info("Worker process {} already killed".format(pid)) - for process in worker_processes: - logger.info("Shutting down worker process with pid: {} ...".format(process.pid)) - process.terminate() + if controller[worker]['switch']: + for i in range(controller[worker]['workers']): + logger.info("Booting {} #{}".format(worker, i + 1)) + worker_process = mp.Process(target=worker_start, name=f"{worker}_{i}", kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True) + worker_processes.append(worker_process) + worker_process.start() - if master is not None: - master.halt() - logger.info("Shutting down housekeeper updates...") - if housekeeper is not None: - housekeeper.shutdown_updates() - - # if hasattr(manager, "shutdown"): - # wait for the spawner and the worker threads to go down - # - if manager is not None: - manager.shutdown() - # check if it is still alive and kill it if necessary - # if manager._process.is_alive(): - manager._process.terminate() - - # Prevent multiprocessing's atexit from conflicting with gunicorn - logger.info("Killing main augur process with PID: {}".format(os.getpid())) - os.kill(os.getpid(), 9) - os._exit(0) + augur_app.manager = manager + augur_app.broker = broker + augur_app.housekeeper = housekeeper - if not disable_housekeeper: - logger.info("Booting housekeeper...") - jobs = deepcopy(app.read_config('Housekeeper', 'jobs')) - try: - housekeeper = Housekeeper( - jobs, - broker, - broker_host=app.read_config('Server', 'host'), - broker_port=app.read_config('Server', 'port'), - user=app.read_config('Database', 'user'), - password=app.read_config('Database', 'password'), - host=app.read_config('Database', 'host'), - port=app.read_config('Database', 'port'), - dbname=app.read_config('Database', 'name') - ) - except KeyboardInterrupt as e: - exit() - - logger.info("Housekeeper has finished booting.") - - if controller: - for worker in controller.keys(): - if controller[worker]['switch']: - for i in range(controller[worker]['workers']): - logger.info("Booting {} #{}".format(worker, i + 1)) - worker_process = mp.Process(target=worker_start, kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True) - worker_process.start() - worker_processes.append(worker_process) - - host = app.read_config('Server', 'host') - port = app.read_config('Server', 'port') - workers = int(app.read_config('Server', 'workers')) - timeout = int(app.read_config('Server', 'timeout')) - options = { - 'bind': '%s:%s' % (host, port), - 'workers': workers, - 'accesslog': '-', - 'access_log_format': '%(h)s - %(t)s - %(r)s', - 'timeout': timeout - } - logger.info('Starting server...') - master = Arbiter(AugurGunicornApp(options, manager=manager, broker=broker, housekeeper=housekeeper)).run() + atexit._clear() + atexit.register(exit, augur_app, worker_processes, master) + return AugurGunicornApp(augur_app.gunicorn_options, augur_app=augur_app) def worker_start(worker_name=None, instance_number=0, worker_port=None): - time.sleep(120 * instance_number) - destination = subprocess.DEVNULL try: - destination = open("workers/{}/worker_{}.log".format(worker_name, worker_port), "a+") - except IOError as e: - logger.error("Error opening log file for auto-started worker {}: {}".format(worker_name, e)) - process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT) - logger.info("{} booted.".format(worker_name)) + time.sleep(30 * instance_number) + destination = subprocess.DEVNULL + process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT) + logger.info("{} #{} booted.".format(worker_name,instance_number+1)) + except KeyboardInterrupt as e: + pass + +def exit(augur_app, worker_processes, master): + + logger.info("Shutdown started for this Gunicorn worker...") + augur_app.shutdown() + + if worker_processes: + for process in worker_processes: + logger.debug("Shutting down worker process with pid: {}...".format(process.pid)) + process.terminate() + + if master is not None: + logger.debug("Shutting down Gunicorn server") + master.halt() + master = None + + logger.info("Shutdown complete") + sys.exit(0) class AugurGunicornApp(gunicorn.app.base.BaseApplication): """ Loads configurations, initializes Gunicorn, loads server """ - def __init__(self, options=None, manager=None, broker=None, housekeeper=None): - self.options = options or {} - self.manager = manager - self.broker = broker - self.housekeeper = housekeeper + def __init__(self, options={}, augur_app=None): + self.options = options + self.augur_app = augur_app + self.manager = self.augur_app.manager + self.broker = self.augur_app.broker + self.housekeeper = self.augur_app.housekeeper + self.server = None + logger.debug(f"Gunicorn will start {self.options['workers']} worker processes") super(AugurGunicornApp, self).__init__() - # self.cfg.pre_request.set(pre_request) def load_config(self): """ Sets the values for configurations """ - config = dict([(key, value) for key, value in iteritems(self.options) - if key in self.cfg.settings and value is not None]) - for key, value in iteritems(config): + config = {key: value for key, value in self.options.items() + if key in self.cfg.settings and value is not None} + for key, value in config.items(): self.cfg.set(key.lower(), value) - def load(self): + def get_augur_app(self): """ Returns the loaded server """ - server = Server(manager=self.manager, broker=self.broker, housekeeper=self.housekeeper) - return server.app + self.load() + return self.server.augur_app + def load(self): + """ + Returns the loaded server + """ + if self.server is None: + try: + self.server = Server(augur_app=self.augur_app) + except Exception as e: + logger.error(f"An error occured when Gunicorn tried to load the server: {e}") + return self.server.app diff --git a/augur/cli/util.py b/augur/cli/util.py --- a/augur/cli/util.py +++ b/augur/cli/util.py @@ -5,6 +5,7 @@ import os import signal +import logging from subprocess import call, run import psutil @@ -12,36 +13,38 @@ import pandas as pd import sqlalchemy as s -from augur import logger -from augur.cli.configure import default_config +from augur.cli import initialize_logging, pass_config, pass_application + +logger = logging.getLogger(__name__) @click.group('util', short_help='Miscellaneous utilities') def cli(): pass @cli.command('export-env') [email protected]_context -def export_env(ctx): +@pass_config +def export_env(config): """ Exports your GitHub key and database credentials """ - app = ctx.obj export_file = open(os.getenv('AUGUR_EXPORT_FILE', 'augur_export_env.sh'), 'w+') export_file.write('#!/bin/bash') export_file.write('\n') env_file = open(os.getenv('AUGUR_ENV_FILE', 'docker_env.txt'), 'w+') - for env_var in app.env_config.items(): - export_file.write('export ' + env_var[0] + '="' + str(env_var[1]) + '"\n') - env_file.write(env_var[0] + '=' + str(env_var[1]) + '\n') + for env_var in config.get_env_config().items(): + if "LOG" not in env_var[0]: + logger.info(f"Exporting {env_var[0]}") + export_file.write('export ' + env_var[0] + '="' + str(env_var[1]) + '"\n') + env_file.write(env_var[0] + '=' + str(env_var[1]) + '\n') export_file.close() env_file.close() @cli.command('kill') [email protected]_context -def kill_processes(ctx): +@initialize_logging +def cli_kill_processes(): """ Terminates all currently running backend Augur processes, including any workers. Will only work in a virtual environment. """ @@ -49,14 +52,26 @@ def kill_processes(ctx): if processes != []: for process in processes: if process.pid != os.getpid(): - # logger.info(f"Killing {process.pid}: {' '.join(process.info['cmdline'][1:])}") logger.info(f"Killing process {process.pid}") try: process.send_signal(signal.SIGTERM) except psutil.NoSuchProcess as e: pass +def kill_processes(): + logger = logging.getLogger("augur") + processes = get_augur_processes() + if processes != []: + for process in processes: + if process.pid != os.getpid(): + logger.info(f"Killing process {process.pid}") + try: + process.send_signal(signal.SIGTERM) + except psutil.NoSuchProcess as e: + logger.warning(e) + @cli.command('list',) +@initialize_logging def list_processes(): """ Outputs the name and process ID (PID) of all currently running backend Augur processes, including any workers. Will only work in a virtual environment. @@ -78,13 +93,11 @@ def get_augur_processes(): return processes @cli.command('repo-reset') [email protected]_context -def repo_reset(ctx): +@pass_application +def repo_reset(augur_app): """ Refresh repo collection to force data collection """ - app = ctx.obj - - app.database.execute("UPDATE augur_data.repo SET repo_path = NULL, repo_name = NULL, repo_status = 'New'; TRUNCATE augur_data.commits CASCADE; ") + augur_app.database.execute("UPDATE augur_data.repo SET repo_path = NULL, repo_name = NULL, repo_status = 'New'; TRUNCATE augur_data.commits CASCADE; ") logger.info("Repos successfully reset") diff --git a/augur/config.py b/augur/config.py new file mode 100644 --- /dev/null +++ b/augur/config.py @@ -0,0 +1,349 @@ +import os +import json +import logging + +ENVVAR_PREFIX = "AUGUR_" + +default_config = { + "version": 1, + "Database": { + "name": "augur", + "host": "localhost", + "key": "key", + "password": "augur", + "port": 5432, + "user": "augur", + "gitlab_api_key":"gitlab_api_key" + }, + "Housekeeper": { + "jobs": [ + { + "all_focused": 1, + "delay": 150000, + "given": [ + "github_url" + ], + "model": "issues", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "github_url" + ], + "model": "pull_request_commits", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "github_url" + ], + "model": "repo_info", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "repo_group" + ], + "model": "commits", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "github_url" + ], + "model": "pull_requests", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "contributors", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "insights", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "badges", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "value", + "repo_group_id": 0 + }, + { + "delay": 100000, + "given": [ + "github_url" + ], + "model": "pull_request_files", + "repo_group_id": 0 + }, + { + "delay": 100000, + "given": [ + "github_url" + ], + "model": "releases", + "repo_group_id": 0 + } + ] + }, + "Workers": { + "facade_worker": { + "port": 50100, + "repo_directory": "repos/", + "switch": 1, + "workers": 1 + }, + "github_worker": { + "port": 50200, + "switch": 1, + "workers": 1 + }, + "insight_worker": { + "port": 50300, + "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", + "reviews": "pull_requests", "contributors-new": "new_contributors"}, + "confidence_interval": 95, + "contamination": 0.041, + "switch": 0, + "workers": 1, + "training_days": 365, + "anomaly_days": 2 + }, + "linux_badge_worker": { + "port": 50400, + "switch": 1, + "workers": 1 + }, + "metric_status_worker": { + "port": 50500, + "switch": 0, + "workers": 1 + }, + "pull_request_worker": { + "port": 50600, + "switch": 1, + "workers": 1 + }, + "repo_info_worker": { + "port": 50700, + "switch": 1, + "workers": 1 + }, + "value_worker": { + "port": 50800, + "scc_bin": "scc", + "switch": 0, + "workers": 1 + }, + "contributor_worker": { + "port": 50900, + "switch": 1, + "workers": 1 + }, + "gitlab_issues_worker": { + "port": 51000, + "switch": 1, + "workers": 1 + }, + "release_worker": { + "port": 51100, + "switch": 1, + "workers": 1 + }, + "gitlab_merge_request_worker": { + "port": 51200, + "switch": 1, + "workers": 1 + } + }, + "Facade": { + "check_updates": 1, + "clone_repos": 1, + "create_xlsx_summary_files": 1, + "delete_marked_repos": 0, + "fix_affiliations": 1, + "force_analysis": 1, + "force_invalidate_caches": 1, + "force_updates": 1, + "limited_run": 0, + "multithreaded": 0, + "nuke_stored_affiliations": 0, + "pull_repos": 1, + "rebuild_caches": 1, + "run_analysis": 1 + }, + "Server": { + "cache_expire": "3600", + "host": "0.0.0.0", + "port": "5000", + "workers": 4, + "timeout": 60 + }, + "Frontend": { + "host": "0.0.0.0", + "port": "5000" + }, + "Logging": { + "logs_directory": "logs/", + "log_level": "INFO", + "verbose": 0, + "quiet": 0, + "debug": 0 + } + } + +logger = logging.getLogger(__name__) + +class AugurConfig(): + """docstring for AugurConfig""" + def __init__(self, root_augur_dir, given_config={}): + self._default_config_file_name = 'augur.config.json' + self._root_augur_dir = root_augur_dir + self._default_config = default_config + self._env_config = {} + self.load_config() + self.version = self.get_version() + self._config.update(given_config) + + def get_section(self, section_name): + try: + return self._config[section_name] + except KeyError as e: + if not self.using_default_config: + logger.warn(f"{section_name} not found in loaded config. Checking default config") + try: + return self._default_config[section_name] + except KeyError as e: + logger.error(f"No defaults found for {section_name}") + raise(e) + else: + logger.debug(f"Already using default config, skipping check for {section_name}") + + def get_version(self): + try: + return self._config["version"] + except KeyError as e: + logger.warning("No config version found. Setting version to 0.") + return 0 + + def get_value(self, section_name, value): + try: + return self._config[section_name][value] + except KeyError as e: + if not self.using_default_config: + logger.warn(f"{section_name}:{value} not found in loaded config. Checking default config") + try: + return self._default_config[section_name][value] + except KeyError as e: + logger.error(f"No defaults found for {section_name}:{value}") + raise(e) + else: + logger.debug(f"Already using default config, skipping check for {section_name}:{value}") + + def load_config(self): + self._config = None + self.using_default_config = False + + logger.debug("Attempting to load config file") + try: + config_file_path = self.discover_config_file() + try: + with open(config_file_path, 'r+') as config_file_handle: + self._config = json.loads(config_file_handle.read()) + logger.debug("Config file loaded successfully") + except json.decoder.JSONDecodeError as e: + logger.warning("Unable to parse config. Using default configuration") + self.using_default_config = True + self._config = default_config + except AugurConfigFileNotFoundException as e: + logger.warning("Config file not found. Using default configuration") + self.using_default_config = True + self._config = default_config + + self.load_env_configuration() + + def discover_config_file(self): + default_config_path = self._root_augur_dir + '/' + self._default_config_file_name + config_file_path = None + + config_locations = [self._default_config_file_name, default_config_path + , f"/opt/augur/{self._default_config_file_name}"] + if os.getenv('AUGUR_CONFIG_FILE', None) is not None: + config_file_path = os.getenv('AUGUR_CONFIG_FILE') + else: + for location in config_locations: + try: + f = open(location, "r+") + config_file_path = os.path.abspath(location) + f.close() + break + except FileNotFoundError: + pass + if config_file_path: + return config_file_path + else: + raise(AugurConfigFileNotFoundException(message=f"{self._default_config_file_name} not found", errors=None)) + + def load_env_configuration(self): + self.set_env_value(section='Database', name='key', environment_variable='AUGUR_GITHUB_API_KEY') + self.set_env_value(section='Database', name='host', environment_variable='AUGUR_DB_HOST') + self.set_env_value(section='Database', name='name', environment_variable='AUGUR_DB_NAME') + self.set_env_value(section='Database', name='port', environment_variable='AUGUR_DB_PORT') + self.set_env_value(section='Database', name='user', environment_variable='AUGUR_DB_USER') + self.set_env_value(section='Database', name='password', environment_variable='AUGUR_DB_PASSWORD') + self.set_env_value(section='Logging', name='log_level', environment_variable='AUGUR_LOG_LEVEL') + self.set_env_value(section='Logging', name='quiet', environment_variable='AUGUR_LOG_QUIET') + self.set_env_value(section='Logging', name='debug', environment_variable='AUGUR_LOG_DEBUG') + self.set_env_value(section='Logging', name='verbose', environment_variable='AUGUR_LOG_VERBOSE') + + def set_env_value(self, section, name, environment_variable, sub_config=None): + """ + Sets names and values of specified config section according to their environment variables. + """ + # using sub_config lets us grab values from nested config blocks + if sub_config is None: + sub_config = self._config + + env_value = os.getenv(environment_variable) + + if env_value is not None: + self._env_config[environment_variable] = env_value + sub_config[section][name] = env_value + # logger.info(f"{section}:[\"{name}\"] set to {env_value} by: {environment_variable}") + else: + self._env_config[environment_variable] = self.get_value(section, name) + + def get_raw_config(self): + return self._config + + def get_default_config(self): + return self._default_config + + def get_env_config(self): + return self._env_config + +class AugurConfigFileNotFoundException(Exception): + def __init__(self, message, errors): + super().__init__(message) diff --git a/augur/housekeeper/housekeeper.py b/augur/housekeeper.py similarity index 81% rename from augur/housekeeper/housekeeper.py rename to augur/housekeeper.py --- a/augur/housekeeper/housekeeper.py +++ b/augur/housekeeper.py @@ -1,69 +1,85 @@ """ Keeps data up to date """ +import coloredlogs +from copy import deepcopy import logging, os, time, requests -from multiprocessing import Process +import logging.config +from multiprocessing import Process, get_start_method from sqlalchemy.ext.automap import automap_base import sqlalchemy as s import pandas as pd from sqlalchemy import MetaData -logging.basicConfig(filename='housekeeper.log') + +from augur.logging import AugurLogging + +import warnings +warnings.filterwarnings('ignore') + +logger = logging.getLogger(__name__) class Housekeeper: - def __init__(self, jobs, broker, broker_host, broker_port, user, password, host, port, dbname): + def __init__(self, broker, augur_app): + logger.info("Booting housekeeper") - self.broker_host = broker_host - self.broker_port = broker_port + self._processes = [] + self.augur_logging = augur_app.logging + self.jobs = deepcopy(augur_app.config.get_value("Housekeeper", "jobs")) + self.broker_host = augur_app.config.get_value("Server", "host") + self.broker_port = augur_app.config.get_value("Server", "port") self.broker = broker - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - user, password, host, port, dbname - ) - - dbschema='augur_data' - self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) + self.db = augur_app.database + self.helper_db = augur_app.operations_database helper_metadata = MetaData() helper_metadata.reflect(self.helper_db, only=['worker_job']) HelperBase = automap_base(metadata=helper_metadata) HelperBase.prepare() - self.job_table = HelperBase.classes.worker_job.__table__ repoUrlSQL = s.sql.text(""" SELECT repo_git FROM repo """) - rs = pd.read_sql(repoUrlSQL, self.db, params={}) - all_repos = rs['repo_git'].values.tolist() # List of tasks that need periodic updates - self.__updatable = self.prep_jobs(jobs) + self.schedule_updates() + + def schedule_updates(self): + """ + Starts update processes + """ + self.prep_jobs() + self.augur_logging.initialize_housekeeper_logging_listener() + logger.info("Scheduling update processes") + for job in self.jobs: + process = Process(target=self.updater_process, name=job["model"], args=(self.broker_host, self.broker_port, self.broker, job, (self.augur_logging.housekeeper_job_config, self.augur_logging.get_config()))) + self._processes.append(process) + process.start() - self.__processes = [] - self.__updater() @staticmethod - def updater_process(broker_host, broker_port, broker, job): + def updater_process(broker_host, broker_port, broker, job, logging_config): """ Controls a given plugin's update process - :param name: name of object to be updated - :param delay: time needed to update - :param shared: shared object that is to also be updated + """ - + logging.config.dictConfig(logging_config[0]) + logger = logging.getLogger(f"augur.jobs.{job['model']}") + coloredlogs.install(level=logging_config[1]["log_level"], logger=logger, fmt=logging_config[1]["format_string"]) + + if logging_config[1]["quiet"]: + logger.disabled + if 'repo_group_id' in job: repo_group_id = job['repo_group_id'] - logging.info('Housekeeper spawned {} model updater process for repo group id {} with PID {}\n'.format(job['model'], repo_group_id, os.getpid())) + logger.info('Housekeeper spawned {} model updater process for repo group id {}'.format(job['model'], repo_group_id)) else: repo_group_id = None - logging.info('Housekeeper spawned {} model updater process for repo ids {} with PID {}\n'.format(job['model'], job['repo_ids'], os.getpid())) + logger.info('Housekeeper spawned {} model updater process for repo ids {}'.format(job['model'], job['repo_ids'])) try: compatible_worker_found = False @@ -76,10 +92,10 @@ def updater_process(broker_host, broker_port, broker, job): time.sleep(3) continue - logging.info("Housekeeper recognized that the broker has a worker that " + - "can handle the {} model... beginning to distribute maintained tasks\n".format(job['model'])) + logger.info("Housekeeper recognized that the broker has a worker that " + + "can handle the {} model... beginning to distribute maintained tasks".format(job['model'])) while True: - logging.info('Housekeeper updating {} model with given {}...\n'.format( + logger.info('Housekeeper updating {} model with given {}...'.format( job['model'], job['given'][0])) if job['given'][0] == 'git_url' or job['given'][0] == 'github_url': @@ -100,9 +116,9 @@ def updater_process(broker_host, broker_port, broker, job): requests.post('http://{}:{}/api/unstable/task'.format( broker_host,broker_port), json=task, timeout=10) except Exception as e: - logging.info("Error encountered: {}\n".format(e)) + logger.error("Error encountered: {}".format(e)) - logging.info(task) + logger.debug(task) time.sleep(15) @@ -119,61 +135,33 @@ def updater_process(broker_host, broker_port, broker, job): requests.post('http://{}:{}/api/unstable/task'.format( broker_host,broker_port), json=task, timeout=10) except Exception as e: - logging.info("Error encountered: {}\n".format(e)) + logger.error("Error encountered: {}".format(e)) - logging.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)\n".format(len(job['repos']))) + logger.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)".format(len(job['repos']))) time.sleep(job['delay']) - - except KeyboardInterrupt: - os.kill(os.getpid(), 9) - os._exit(0) - except: - raise - def __updater(self, jobs=None): - """ - Starts update processes - """ - logging.info("Starting update processes...") - if jobs is None: - jobs = self.__updatable - for job in jobs: - up = Process(target=self.updater_process, args=(self.broker_host, self.broker_port, self.broker, job), daemon=True) - up.start() - self.__processes.append(up) - - def update_all(self): - """ - Updates all plugins - """ - for updatable in self.__updatable: - updatable['update']() - - def schedule_updates(self): - """ - Schedules updates - """ - # don't use this, - logging.debug('Scheduling updates...') - self.__updater() + except KeyboardInterrupt as e: + pass def join_updates(self): """ Join to the update processes """ - for process in self.__processes: + for process in self._processes: + logger.debug(f"Joining {process.name} update process") process.join() def shutdown_updates(self): """ Ends all running update processes """ - for process in self.__processes: + for process in self._processes: + # logger.debug(f"Terminating {process.name} update process") process.terminate() - def prep_jobs(self, jobs): - - for job in jobs: + def prep_jobs(self): + logger.info("Preparing housekeeper jobs") + for job in self.jobs: if 'repo_group_id' in job or 'repo_ids' in job: # If RG id is 0 then it just means to query all repos where_and = 'AND' if job['model'] == 'issues' and 'repo_group_id' in job else 'WHERE' @@ -269,7 +257,7 @@ def prep_jobs(self, jobs): reorganized_repos = pd.read_sql(repo_url_sql, self.db, params={}) if len(reorganized_repos) == 0: - logging.info("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql)) + logger.warning("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql)) job['repos'] = [] continue @@ -290,7 +278,7 @@ def prep_jobs(self, jobs): 'oauth_id': 0 } result = self.helper_db.execute(self.job_table.insert().values(job_tuple)) - logging.info("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple)) + logger.debug("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple)) # If a last id is not recorded, start from beginning of repos # (first id is not necessarily 0) @@ -347,5 +335,3 @@ def prep_jobs(self, jobs): job['repos'] = rs # time.sleep(120) - return jobs - diff --git a/augur/logging.py b/augur/logging.py new file mode 100644 --- /dev/null +++ b/augur/logging.py @@ -0,0 +1,305 @@ +import logging +import logging.config +import logging.handlers +from logging import FileHandler, StreamHandler, Formatter +from multiprocessing import Process, Queue, Event, current_process +from time import sleep +import os +from pathlib import Path +import atexit +import shutil +import coloredlogs +from copy import deepcopy + +from augur import ROOT_AUGUR_DIRECTORY + +logger = logging.getLogger(__name__) + +class AugurLogging(): + + simple_format_string = "[%(process)d] %(name)s [%(levelname)s] %(message)s" + verbose_format_string = "%(asctime)s,%(msecs)dms [PID: %(process)d] %(name)s [%(levelname)s] %(message)s" + cli_format_string = "CLI: [%(module)s.%(funcName)s] [%(levelname)s] %(message)s" + config_format_string = "[%(levelname)s] %(message)s" + error_format_string = "%(asctime)s [PID: %(process)d] %(name)s [%(funcName)s() in %(filename)s:L%(lineno)d] [%(levelname)s]: %(message)s" + + @staticmethod + def get_log_directories(augur_config, reset_logfiles=True): + LOGS_DIRECTORY = augur_config.get_value("Logging", "logs_directory") + + if LOGS_DIRECTORY[0] != "/": + LOGS_DIRECTORY = ROOT_AUGUR_DIRECTORY + "/" + LOGS_DIRECTORY + + if LOGS_DIRECTORY[-1] != "/": + LOGS_DIRECTORY += "/" + + if reset_logfiles is True: + try: + shutil.rmtree(LOGS_DIRECTORY) + except FileNotFoundError as e: + pass + + Path(LOGS_DIRECTORY).mkdir(exist_ok=True) + + return LOGS_DIRECTORY + + def __init__(self, disable_logs=False, reset_logfiles=True): + self.stop_event = None + self.LOGS_DIRECTORY = None + self.WORKER_LOGS_DIRECTORY = None + self.LOG_LEVEL = None + self.VERBOSE = None + self.QUIET = None + self.DEGBUG = None + + self.logfile_config = None + self.housekeeper_job_config = None + + self._reset_logfiles = reset_logfiles + + self.formatters = { + "simple": { + "class": "logging.Formatter", + "format": AugurLogging.simple_format_string + }, + "verbose": { + "class": "logging.Formatter", + "format": AugurLogging.verbose_format_string + }, + "cli": { + "class": "logging.Formatter", + "format": AugurLogging.cli_format_string + }, + "config": { + "class": "logging.Formatter", + "format": AugurLogging.config_format_string + }, + "error": { + "class": "logging.Formatter", + "format": AugurLogging.error_format_string + } + } + + self._configure_cli_logger() + + level = logging.INFO + config_handler = StreamHandler() + config_handler.setFormatter(Formatter(fmt=AugurLogging.config_format_string)) + config_handler.setLevel(level) + + config_initialization_logger = logging.getLogger("augur.config") + config_initialization_logger.setLevel(level) + config_initialization_logger.handlers = [] + config_initialization_logger.addHandler(config_handler) + config_initialization_logger.propagate = False + + coloredlogs.install(level=level, logger=config_initialization_logger, fmt=AugurLogging.config_format_string) + + if disable_logs: + self._disable_all_logging() + + + def _disable_all_logging(self): + for logger in ["augur", "augur.application", "augur.housekeeper", "augur.config", "augur.cli", "root"]: + lg = logging.getLogger(logger) + lg.disabled = True + + def _configure_cli_logger(self): + cli_handler = StreamHandler() + cli_handler.setLevel(logging.INFO) + + cli_logger = logging.getLogger("augur.cli") + cli_logger.setLevel(logging.INFO) + cli_logger.handlers = [] + cli_logger.addHandler(cli_handler) + cli_logger.propagate = False + + coloredlogs.install(level=logging.INFO, logger=cli_logger, fmt=AugurLogging.cli_format_string) + + def _set_config(self, augur_config): + self.LOGS_DIRECTORY = AugurLogging.get_log_directories(augur_config, self._reset_logfiles) + self.LOG_LEVEL = augur_config.get_value("Logging", "log_level") + self.QUIET = int(augur_config.get_value("Logging", "quiet")) + self.DEBUG = int(augur_config.get_value("Logging", "debug")) + self.VERBOSE = int(augur_config.get_value("Logging", "verbose")) + # self.JOB_NAMES = [job["model"] for job in deepcopy(augur_config.get_value("Housekeeper", "jobs"))] + + if self.QUIET: + self._disable_all_logging() + + if self.DEBUG: + self.LOG_LEVEL = "DEBUG" + self.VERBOSE = True + + if self.VERBOSE: + self.FORMATTER = "verbose" + else: + self.FORMATTER = "simple" + self.format_string = self.formatters[self.FORMATTER]["format"] + + def configure_logging(self, augur_config): + self._set_config(augur_config) + self._configure_logfiles() + self._configure_cli_logger() + self._configure_gunicorn_logging() + logger.debug("Loggers are fully configured") + + def _configure_logfiles(self): + self.logfile_config = { + "version": 1, + "disable_existing_loggers": True, + "formatters": self.formatters, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "formatter": self.FORMATTER, + "level": self.LOG_LEVEL + }, + "logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "augur.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "errorfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "augur.err", + "mode": "a", + "level": logging.WARNING, + "formatter": "error" + }, + "server_logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "gunicorn.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "housekeeper_logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "housekeeper.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "housekeeper_errorfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "housekeeper.err", + "mode": "a", + "level": logging.WARNING, + "formatter": "error", + }, + }, + "loggers": { + "augur": { + "handlers": ["console", "logfile", "errorfile"], + "level": self.LOG_LEVEL + }, + "augur.server": { + "handlers": ["server_logfile"], + "level": self.LOG_LEVEL, + "propagate": False + }, + "augur.housekeeper": { + "handlers": ["housekeeper_logfile", "housekeeper_errorfile"], + "level": self.LOG_LEVEL, + }, + "augur.jobs": { + "handlers": ["housekeeper_logfile", "housekeeper_errorfile", "logfile", "errorfile"], + "level": self.LOG_LEVEL, + "propagate": False + } + }, + "root": { + "handlers": [], + "level": self.LOG_LEVEL + } + } + + logging.config.dictConfig(self.logfile_config) + for logger_name in ["augur", "augur.housekeeper", "augur.jobs"]: + coloredlogs.install(logger=logging.getLogger(logger_name), level=self.LOG_LEVEL, fmt=self.format_string) + + logger.debug("Logfiles initialized") + logger.debug("Logs will be written to: " + self.LOGS_DIRECTORY) + + def initialize_housekeeper_logging_listener(self): + queue = Queue() + self.housekeeper_job_config = { + "version": 1, + "disable_existing_loggers": True, + "formatters": self.formatters, + "handlers": { + "queue": { + "class": "logging.handlers.QueueHandler", + "queue": queue + } + }, + "root": { + "handlers": ["queue"], + "level": self.LOG_LEVEL + } + } + + stop_event = Event() + self.lp = Process(target=logging_listener_process, name='housekeeper_logging_listener', + args=(queue, stop_event, self.logfile_config)) + self.lp.start() + sleep(2) # just to let it fully start up + self.stop_event = stop_event + logger.debug("Houseekeeper logging listener initialized") + + def get_config(self): + return { + "log_level": self.LOG_LEVEL, + "quiet": self.QUIET, + "verbose": self.VERBOSE, + "debug": self.DEBUG, + "format_string": self.format_string + } + + def _configure_gunicorn_logging(self): + gunicorn_log_file = self.LOGS_DIRECTORY + "gunicorn.log" + self.gunicorn_logging_options = { + "errorlog": gunicorn_log_file, + "accesslog": gunicorn_log_file, + "loglevel": self.LOG_LEVEL, + "capture_output": False + } + +def logging_listener_process(queue, stop_event, config): + """ + This could be done in the main process, but is just done in a separate + process for illustrative purposes. + + This initialises logging according to the specified configuration, + starts the listener and waits for the main process to signal completion + via the event. The listener is then stopped, and the process exits. + """ + logging.config.dictConfig(config) + listener = logging.handlers.QueueListener(queue, AugurLoggingHandler()) + listener.start() + try: + stop_event.wait() + except KeyboardInterrupt: + pass + finally: + listener.stop() + +class AugurLoggingHandler: + """ + A simple handler for logging events. It runs in the listener process and + dispatches events to loggers based on the name in the received record, + which then get dispatched, by the logging system, to the handlers + configured for those loggers. + """ + + def handle(self, record): + if record.name == "root": + logger = logging.getLogger() + else: + logger = logging.getLogger(record.name) + + record.processName = '%s (for %s)' % (current_process().name, record.processName) + logger.handle(record) diff --git a/augur/metrics/__init__.py b/augur/metrics/__init__.py --- a/augur/metrics/__init__.py +++ b/augur/metrics/__init__.py @@ -1 +1,38 @@ -from .metrics import Metrics \ No newline at end of file +import os +import glob +import sys +import inspect +import types +import importlib +import logging + +logger = logging.getLogger(__name__) + +class Metrics(): + def __init__(self, app): + logger.debug("Loading metrics") + self.database = app.database + self.spdx_db = app.spdx_database + + self.models = [] #TODO: standardize this + for filename in glob.iglob("augur/metrics/**"): + file_id = get_file_id(filename) + if not file_id.startswith('__') and filename.endswith('.py') and file_id != "metrics": + self.models.append(file_id) + + for model in self.models: + importlib.import_module(f"augur.metrics.{model}") + add_metrics(self, f"augur.metrics.{model}") + +def get_file_id(path): + return os.path.splitext(os.path.basename(path))[0] + +def add_metrics(metrics, module_name): + # find all unbound endpoint functions objects + # (ones that have metadata) defined the given module_name + # and bind them to the metrics class + for name, obj in inspect.getmembers(sys.modules[module_name]): + if inspect.isfunction(obj) == True: + if hasattr(obj, 'is_metric') == True: + setattr(metrics, name, types.MethodType(obj, metrics)) + diff --git a/augur/metrics/metrics.py b/augur/metrics/metrics.py deleted file mode 100644 --- a/augur/metrics/metrics.py +++ /dev/null @@ -1,35 +0,0 @@ -import os -import glob -import sys -import inspect -import types -import importlib -from augur import logger - -class Metrics(): - def __init__(self, app): - self.database = app.database - self.spdx_db = app.spdx_db - - self.models = [] #TODO: standardize this - for filename in glob.iglob("augur/metrics/**"): - file_id = get_file_id(filename) - if not file_id.startswith('__') and filename.endswith('.py') and file_id != "metrics": - self.models.append(file_id) - - for model in self.models: - importlib.import_module(f"augur.metrics.{model}") - add_metrics(self, f"augur.metrics.{model}") - -def get_file_id(path): - return os.path.splitext(os.path.basename(path))[0] - -def add_metrics(metrics, module_name): - # find all unbound endpoint functions objects - # (ones that have metadata) defined the given module_name - # and bind them to the metrics class - for name, obj in inspect.getmembers(sys.modules[module_name]): - if inspect.isfunction(obj) == True: - if hasattr(obj, 'is_metric') == True: - setattr(metrics, name, types.MethodType(obj, metrics)) - diff --git a/augur/metrics/repo_meta.py b/augur/metrics/repo_meta.py --- a/augur/metrics/repo_meta.py +++ b/augur/metrics/repo_meta.py @@ -5,9 +5,12 @@ import datetime import sqlalchemy as s import pandas as pd -from augur import logger -from augur.util import register_metric import math +import logging + +from augur.util import register_metric + +logger = logging.getLogger("augur") @register_metric() def code_changes(self, repo_group_id, repo_id=None, period='week', begin_date=None, end_date=None): diff --git a/augur/models/__init__.py b/augur/models/__init__.py deleted file mode 100644 --- a/augur/models/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from sqlalchemy.orm import sessionmaker -from .user import User -from .repo import Repo, RepoGroup - - -__all__ = ['User', 'RepoGroup', 'Repo'] \ No newline at end of file diff --git a/augur/models/common.py b/augur/models/common.py deleted file mode 100644 --- a/augur/models/common.py +++ /dev/null @@ -1,2 +0,0 @@ -from sqlalchemy.ext.declarative import declarative_base -Base = declarative_base() \ No newline at end of file diff --git a/augur/models/repo.py b/augur/models/repo.py deleted file mode 100644 --- a/augur/models/repo.py +++ /dev/null @@ -1,48 +0,0 @@ -import datetime -from sqlalchemy import Table, ForeignKey, Column, Integer, String, DateTime -from sqlalchemy.orm import relationship -from .common import Base -from .user import user_has_repo_group - -repo_group_has_project = Table('repo_group_has_project', - Base.metadata, - Column('repo_group_id', ForeignKey('repo_group.id'), primary_key=True), - Column('repo_id', ForeignKey('repo.url'), primary_key=True), -) - -class Repo(Base): - """ - The Repo object models a VCS repository - """ - __tablename__ = 'repo' - - # Keys - url = Column(String(1024), primary_key=True) - vcs = Column(String(64), default='git') - - # Fields - created_at = Column(DateTime, default=datetime.datetime.utcnow) - - # Foreign Keys - repo_groups_member_of = relationship('RepoGroup', secondary=repo_group_has_project, back_populates='projects') - - def __repr__(self): - return f"<Repo(giturl='{self.password}')>" - - -class RepoGroup(Base): - """ - The RepoGroup class models lists of projects that a user wants to keep track of - """ - __tablename__ = 'repo_group' - - # Keys - id = Column(Integer, primary_key=True) - name = Column(String(128)) - - # Fields - created_at = Column(DateTime, default=datetime.datetime.utcnow) - - # Foreign Keys - projects = relationship('Repo', secondary=repo_group_has_project, back_populates='repo_groups_member_of') - users_of = relationship('User', secondary=user_has_repo_group, back_populates='repo_groups') \ No newline at end of file diff --git a/augur/models/user.py b/augur/models/user.py deleted file mode 100644 --- a/augur/models/user.py +++ /dev/null @@ -1,61 +0,0 @@ -import datetime -from sqlalchemy import Table, ForeignKey, Column, Integer, String, DateTime, Boolean -from sqlalchemy.orm import relationship -from sqlalchemy.ext.hybrid import hybrid_property -from .common import Base -from werkzeug.security import generate_password_hash, check_password_hash -from flask_login import UserMixin - -user_has_repo_group = Table('user_has_repo_group', - Base.metadata, - Column('user_id', ForeignKey('user.id'), primary_key=True), - Column('repo_group_id', ForeignKey('repo_group.id'), primary_key=True), -) - -class User(Base): - """ - The User object models users in the database. - """ - __tablename__ = 'user' - - # Keys - id = Column(Integer, primary_key=True) - username = Column(String(64), unique=True, nullable=False) - email = Column(String(64), unique=True, nullable=False) - - # Fields - password_hash = Column(String(128)) - email_confirmation_token = Column(String(128), nullable=True) - created_at = Column(DateTime, default=datetime.datetime.utcnow) - password_updated_at = Column(DateTime, default=datetime.datetime.utcnow) - last_login_at = Column(DateTime, nullable=True) - authenticated = Column(Boolean, default=False) - active = Column(Boolean, default=True) - administrator = Column(Boolean, default=False) - - # Foreign Keys - repo_groups = relationship('RepoGroup', secondary=user_has_repo_group, back_populates='users_of') - - def get_id(self): - return self.id - - def __repr__(self): - return f"<User(username='{self.username}', email='{self.email}')>" - - @hybrid_property - def password(self): - return self.password_hash - - @password.setter - def password(self, password): - self.password_hash = generate_password_hash(password) - - def check_password(self, password): - return check_password_hash(self.password_hash, password) - - def is_authenticated(self): - return self.authenticated - - def is_active(self): - # False as we do not support annonymity - return self.active diff --git a/augur/routes/__init__.py b/augur/routes/__init__.py --- a/augur/routes/__init__.py +++ b/augur/routes/__init__.py @@ -1,11 +1,12 @@ +import logging import importlib import os import glob import sys import inspect -from augur import logger +logger = logging.getLogger(__name__) def get_route_files(): route_files = [] @@ -13,11 +14,11 @@ def get_route_files(): def get_file_id(path): return os.path.splitext(os.path.basename(path))[0] - for filename in glob.iglob("**/routes/*"): + for filename in glob.iglob("augur/routes/*"): file_id = get_file_id(filename) if not file_id.startswith('__') and filename.endswith('.py'): route_files.append(file_id) - + return route_files route_files = get_route_files() diff --git a/augur/routes/batch.py b/augur/routes/batch.py --- a/augur/routes/batch.py +++ b/augur/routes/batch.py @@ -10,9 +10,10 @@ from sqlalchemy import exc from flask import request, Response from augur.util import metric_metadata -from augur import logger import json +logger = logging.getLogger(__name__) + def create_routes(server): @server.app.route('/{}/batch'.format(server.api_version), methods=['GET', 'POST']) diff --git a/augur/routes/broker.py b/augur/routes/broker.py --- a/augur/routes/broker.py +++ b/augur/routes/broker.py @@ -9,6 +9,9 @@ import requests from flask import request, Response +logger = logging.getLogger(__name__) + +# TODO: not this... def worker_start(worker_name=None): process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True) @@ -26,12 +29,12 @@ def send_task(worker_proxy): j = r.json() if 'status' not in j: - logging.info("Worker: {}'s heartbeat did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) + logger.error("Worker: {}'s heartbeat did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) worker_proxy['status'] = 'Disconnected' return if j['status'] != 'alive': - logging.info("Worker: {} is busy, setting its status as so.\n".format(worker_id)) + logger.info("Worker: {} is busy, setting its status as so.\n".format(worker_id)) return # Want to check user-created job requests first @@ -43,16 +46,16 @@ def send_task(worker_proxy): new_task = maintain_queue.pop(0) else: - logging.info("Both queues are empty for worker {}\n".format(worker_id)) + logger.debug("Both queues are empty for worker {}\n".format(worker_id)) worker_proxy['status'] = 'Idle' return - logging.info("Worker {} is idle, preparing to send the {} task to {}\n".format(worker_id, new_task['display_name'], task_endpoint)) + logger.info("Worker {} is idle, preparing to send the {} task to {}\n".format(worker_id, new_task['display_name'], task_endpoint)) try: requests.post(task_endpoint, json=new_task) worker_proxy['status'] = 'Working' except: - logging.info("Sending Worker: {} a task did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) + logger.error("Sending Worker: {} a task did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) worker_proxy['status'] = 'Disconnected' # If the worker died, then restart it worker_start(worker_id.split('.')[len(worker_id.split('.')) - 2]) @@ -71,9 +74,9 @@ def task(): for given_component in list(task['given'].keys()): given.append(given_component) model = task['models'][0] - logging.info("Broker recieved a new user task ... checking for compatible workers for given: " + str(given) + " and model(s): " + str(model) + "\n") + logger.info("Broker recieved a new user task ... checking for compatible workers for given: " + str(given) + " and model(s): " + str(model) + "\n") - logging.info("Broker's list of all workers: {}\n".format(server.broker._getvalue().keys())) + logger.debug("Broker's list of all workers: {}\n".format(server.broker._getvalue().keys())) worker_found = False compatible_workers = {} @@ -83,7 +86,7 @@ def task(): if type(server.broker[worker_id]._getvalue()) != dict: continue - logging.info("Considering compatible worker: {}\n".format(worker_id)) + logger.info("Considering compatible worker: {}\n".format(worker_id)) # Group workers by type (all gh workers grouped together etc) worker_type = worker_id.split('.')[len(worker_id.split('.'))-2] @@ -91,28 +94,28 @@ def task(): # Make worker that is prioritized the one with the smallest sum of task queues if (len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue'])) < min([compatible_workers[w]['task_load'] for w in compatible_workers.keys() if worker_type == w]): - logging.info("Worker id: {} has the smallest task load encountered so far: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']))) + logger.info("Worker id: {} has the smallest task load encountered so far: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']))) compatible_workers[worker_type]['task_load'] = len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']) compatible_workers[worker_type]['worker_id'] = worker_id for worker_type in compatible_workers.keys(): worker_id = compatible_workers[worker_type]['worker_id'] worker = server.broker[worker_id] - logging.info("Final compatible worker chosen: {} with smallest task load: {} found to work on task: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']), task)) + logger.info("Final compatible worker chosen: {} with smallest task load: {} found to work on task: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']), task)) if task['job_type'] == "UPDATE": worker['user_queue'].append(task) - logging.info("Added task for model: {}. New length of worker {}'s user queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['user_queue'])))) + logger.info("Added task for model: {}. New length of worker {}'s user queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['user_queue'])))) elif task['job_type'] == "MAINTAIN": worker['maintain_queue'].append(task) - logging.info("Added task for model: {}. New length of worker {}'s maintain queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['maintain_queue'])))) + logger.info("Added task for model: {}. New length of worker {}'s maintain queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['maintain_queue'])))) if worker['status'] == 'Idle': send_task(worker) worker_found = True # Otherwise, let the frontend know that the request can't be served if not worker_found: - logging.info("Augur does not have knowledge of any workers that are capable of handing the request: {}\n".format(task)) + logger.warning("Augur does not have knowledge of any workers that are capable of handing the request: {}\n".format(task)) return Response(response=task, status=200, @@ -124,7 +127,7 @@ def worker(): and telling the broker to add this worker to the set it maintains """ worker = request.json - logging.info("Recieved HELLO message from worker: {}\n".format(worker['id'])) + logger.info("Recieved HELLO message from worker: {}\n".format(worker['id'])) if worker['id'] not in server.broker: server.broker[worker['id']] = server.manager.dict() server.broker[worker['id']]['id'] = worker['id'] @@ -139,7 +142,7 @@ def worker(): server.broker[worker['id']]['status'] = 'Idle' server.broker[worker['id']]['location'] = worker['location'] else: - logging.info("Worker: {} has been reconnected.\n".format(worker['id'])) + logger.info("Worker: {} has been reconnected.\n".format(worker['id'])) models = server.broker[worker['id']]['models'] givens = server.broker[worker['id']]['given'] user_queue = server.broker[worker['id']]['user_queue'] @@ -157,7 +160,7 @@ def worker(): def sync_queue(): task = request.json worker = task['worker_id'] - logging.info("Message recieved that worker {} completed task: {}\n".format(worker,task)) + logger.info("Message recieved that worker {} completed task: {}\n".format(worker,task)) try: models = server.broker[worker]['models'] givens = server.broker[worker]['given'] @@ -167,8 +170,8 @@ def sync_queue(): if server.broker[worker]['status'] != 'Disconnected': send_task(server.broker[worker]) except Exception as e: - logging.info("Ran into error: {}\n".format(repr(e))) - logging.info("A past instance of the {} worker finished a previous leftover task.\n".format(worker)) + logger.error("Ran into error: {}\n".format(repr(e))) + logger.error("A past instance of the {} worker finished a previous leftover task.\n".format(worker)) return Response(response=task, status=200, @@ -190,7 +193,7 @@ def get_status(): @server.app.route('/{}/workers/remove'.format(server.api_version), methods=['POST']) def remove_worker(): worker = request.json - logging.info("Recieved a message to disconnect worker: {}\n".format(worker)) + logger.info("Recieved a message to disconnect worker: {}\n".format(worker)) server.broker[worker['id']]['status'] = 'Disconnected' return Response(response=worker, status=200, @@ -200,13 +203,13 @@ def remove_worker(): def task_error(): task = request.json worker_id = task['worker_id'] - logging.info("Recieved a message that {} ran into an error on task: {}\n".format(worker_id, task)) + logger.error("Recieved a message that {} ran into an error on task: {}\n".format(worker_id, task)) if worker_id in server.broker: if server.broker[worker_id]['status'] != 'Disconnected': - logging.info("{} ran into error while completing task: {}\n".format(worker_id, task)) + logger.error("{} ran into error while completing task: {}\n".format(worker_id, task)) send_task(server.broker[worker_id]) else: - logging.info("A previous instance of {} ran into error while completing task: {}\n".format(worker_id, task)) + logger.error("A previous instance of {} ran into error while completing task: {}\n".format(worker_id, task)) return Response(response=request.json, status=200, mimetype="application/json") \ No newline at end of file diff --git a/augur/routes/manager.py b/augur/routes/manager.py --- a/augur/routes/manager.py +++ b/augur/routes/manager.py @@ -285,15 +285,15 @@ def get_repo_name(self): repo = self.name return repo[repo.find('/')+1:] -def authenticate_request(app, request): +def authenticate_request(augur_app, request): # do I like doing it like this? not at all # do I have the time to implement a better solution right now? not at all - user = app.read_config('Database', 'user') - password = app.read_config('Database', 'password') - host = app.read_config('Database', 'host') - port = app.read_config('Database', 'port') - dbname = app.read_config('Database', 'name') + user = augur_app.config.get_value('Database', 'user') + password = augur_app.config.get_value('Database', 'password') + host = augur_app.config.get_value('Database', 'host') + port = augur_app.config.get_value('Database', 'port') + dbname = augur_app.config.get_value('Database', 'name') DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( user, password, host, port, dbname diff --git a/augur/routes/metrics/release.py b/augur/routes/metrics/release.py deleted file mode 100644 --- a/augur/routes/metrics/release.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.releases, 'releases') \ No newline at end of file diff --git a/augur/routes/util.py b/augur/routes/util.py --- a/augur/routes/util.py +++ b/augur/routes/util.py @@ -200,7 +200,7 @@ def get_issues(repo_group_id, repo_id=None): @server.app.route('/{}/api-port'.format(server.api_version)) def api_port(): - response = {'port': server.augur_app.read_config('Server', 'port')} + response = {'port': server.augur_app.config.get_value('Server', 'port')} return Response(response=json.dumps(response), status=200, mimetype="application/json") diff --git a/augur/server.py b/augur/server.py --- a/augur/server.py +++ b/augur/server.py @@ -10,50 +10,43 @@ import json import os import base64 +import logging from flask import Flask, request, Response, redirect from flask_cors import CORS import pandas as pd import augur -from augur.util import logger from augur.routes import create_routes AUGUR_API_VERSION = 'api/unstable' -class VueCompatibleFlask(Flask): - jinja_options = Flask.jinja_options.copy() - jinja_options.update(dict( - block_start_string='(%', - block_end_string='%)', - variable_start_string='%%', - variable_end_string='%%', - comment_start_string='(#', - comment_end_string='#)', - )) - +logger = logging.getLogger(__name__) class Server(object): """ Defines Augur's server's behavior """ - def __init__(self, frontend_folder='../frontend/public', manager=None, broker=None, housekeeper=None): + def __init__(self, augur_app=None): """ Initializes the server, creating both the Flask application and Augur application """ # Create Flask application - self.app = VueCompatibleFlask(__name__, static_folder=frontend_folder, template_folder=frontend_folder) + self.app = Flask(__name__) + logger.debug("Created Flask app") self.api_version = AUGUR_API_VERSION app = self.app CORS(app) app.url_map.strict_slashes = False - # Create Augur application - self.augur_app = augur.Application() + self.augur_app = augur_app + self.manager = augur_app.manager + self.broker = augur_app.broker + self.housekeeper = augur_app.housekeeper # Initialize cache - expire = int(self.augur_app.read_config('Server', 'cache_expire')) + expire = int(self.augur_app.config.get_value('Server', 'cache_expire')) self.cache = self.augur_app.cache.get_cache('server', expire=expire) self.cache.clear() @@ -61,10 +54,7 @@ def __init__(self, frontend_folder='../frontend/public', manager=None, broker=No self.show_metadata = False - self.manager = manager - self.broker = broker - self.housekeeper = housekeeper - + logger.debug("Creating API routes...") create_routes(self) ##################################### @@ -190,40 +180,3 @@ def add_standard_metric(self, function, endpoint, **kwargs): self.app.route(repo_endpoint)(self.routify(function, 'repo')) self.app.route(repo_group_endpoint)(self.routify(function, 'repo_group')) self.app.route(deprecated_repo_endpoint )(self.routify(function, 'deprecated_repo')) - -def run(): - """ - Runs server with configured hosts/ports - """ - server = Server() - host = server.augur_app.read_config('Server', 'host') - port = server.augur_app.read_config('Server', 'port') - Server().app.run(host=host, port=int(port), debug=True) - -wsgi_app = None -def wsgi(environ, start_response): - """ - Creates WSGI app - """ - global wsgi_app - if (wsgi_app is None): - app_instance = Server() - wsgi_app = app_instance.app - # Stuff to make proxypass work - script_name = environ.get('HTTP_X_SCRIPT_NAME', '') - if script_name: - environ['SCRIPT_NAME'] = script_name - path_info = environ['PATH_INFO'] - if path_info.startswith(script_name): - environ['PATH_INFO'] = path_info[len(script_name):] - - scheme = environ.get('HTTP_X_SCHEME', '') - if scheme: - environ['wsgi.url_scheme'] = scheme - server = environ.get('HTTP_X_FORWARDED_SERVER', '') - if server: - environ['HTTP_HOST'] = server - return wsgi_app(environ, start_response) - -if __name__ == "__main__": - run() diff --git a/augur/util.py b/augur/util.py --- a/augur/util.py +++ b/augur/util.py @@ -8,8 +8,9 @@ import types import sys import beaker +import logging -from augur import logger +logger = logging.getLogger(__name__) __ROOT = os.path.abspath(os.path.dirname(__file__)) def get_data_path(path): diff --git a/conftest.py b/conftest.py new file mode 100644 --- /dev/null +++ b/conftest.py @@ -0,0 +1,31 @@ +import pytest +import re + +from augur.application import Application +from augur.cli.run import initialize_components + +default_repo_id = "25430" +default_repo_group_id = "10" + +def create_full_routes(routes): + full_routes = [] + for route in routes: + route = re.sub("<default_repo_id>", default_repo_id, route) + route = re.sub("<default_repo_group_id>", default_repo_group_id, route) + route = "http://localhost:5000/api/unstable/" + route + full_routes.append(route) + return full_routes + [email protected](scope="session") +def augur_app(): + augur_app = Application(disable_logs=True) + return augur_app + [email protected](scope="session") +def metrics(augur_app): + return augur_app.metrics + [email protected](scope="session") +def client(augur_app): + flask_client = initialize_components(augur_app, disable_housekeeper=True).load() + return flask_client.test_client() diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -25,7 +25,6 @@ exec(open(os.path.join(here, "../../metadata.py")).read()) - sys.path.insert(0, os.path.abspath('../../../augur')) # -- General configuration ------------------------------------------------ @@ -82,8 +81,6 @@ copyright = __copyright__ author = 'Carter Landis' - - # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. diff --git a/metadata.py b/metadata.py --- a/metadata.py +++ b/metadata.py @@ -1,13 +1,11 @@ -from os import path - __name__ = "Augur" __slug__ = "augur" __url__ = "https://github.com/chaoss/augur" __short_description__ = "Python 3 package for free/libre and open-source software community metrics & data collection" -__version__ = "0.12.0" -__release__ = "0.12.0" +__version__ = "0.13.0" +__release__ = "v0.13.0" __license__ = "MIT" __copyright__ = "CHAOSS & Augurlabs 2020" diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ "psycopg2-binary", "click", "psutil", - "gunicorn==19.9.0", + "gunicorn", "six>=1.14.0" ], extras_require={ @@ -61,7 +61,7 @@ }, entry_points={ "console_scripts": [ - "augur=augur.runtime:run" + "augur=augur.cli._multicommand:run" ], } ) diff --git a/util/alembic/env.py b/util/alembic/env.py deleted file mode 100644 --- a/util/alembic/env.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import with_statement -from alembic import context -from sqlalchemy import engine_from_config, pool -from logging.config import fileConfig - - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -from augur.models.common import Base -target_metadata = Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, target_metadata=target_metadata, literal_binds=True) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - import augur.application - app = augur.application.Application() - - context.configure( - connection=app.db.connect(), - target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py b/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py deleted file mode 100644 --- a/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Create basic tables - -Revision ID: 2eaa930b1f5a -Revises: -Create Date: 2019-02-09 16:10:24.251828 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '2eaa930b1f5a' -down_revision = None -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.create_table('repo', - sa.Column('url', sa.String(length=1024), nullable=False), - sa.Column('vcs', sa.String(length=64), nullable=True), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('url') - ) - op.create_table('repo_group', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('id') - ) - op.create_table('user', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('username', sa.String(length=64), nullable=False), - sa.Column('email', sa.String(length=64), nullable=False), - sa.Column('password_hash', sa.String(length=128), nullable=True), - sa.Column('email_confirmation_token', sa.String(length=128), nullable=True), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('password_updated_at', sa.DateTime(), nullable=True), - sa.Column('last_login_at', sa.DateTime(), nullable=True), - sa.Column('authenticated', sa.Boolean(), nullable=True), - sa.Column('active', sa.Boolean(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('email'), - sa.UniqueConstraint('username') - ) - op.create_table('repo_group_has_project', - sa.Column('repo_group_id', sa.Integer(), nullable=False), - sa.Column('repo_id', sa.String(length=1024), nullable=False), - sa.ForeignKeyConstraint(['repo_group_id'], ['repo_group.id'], ), - sa.ForeignKeyConstraint(['repo_id'], ['repo.url'], ), - sa.PrimaryKeyConstraint('repo_group_id', 'repo_id') - ) - op.create_table('user_has_repo_group', - sa.Column('user_id', sa.Integer(), nullable=False), - sa.Column('repo_group_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['repo_group_id'], ['repo_group.id'], ), - sa.ForeignKeyConstraint(['user_id'], ['user.id'], ), - sa.PrimaryKeyConstraint('user_id', 'repo_group_id') - ) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.drop_table('user_has_repo_group') - op.drop_table('repo_group_has_project') - op.drop_table('user') - op.drop_table('repo_group') - op.drop_table('repo') - # ### end Alembic commands ### diff --git a/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py b/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py deleted file mode 100644 --- a/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Add admin to User, name to RepoGroup - -Revision ID: a051167419fa -Revises: 2eaa930b1f5a -Create Date: 2019-02-17 13:09:42.138936 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = 'a051167419fa' -down_revision = '2eaa930b1f5a' -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.add_column('repo_group', sa.Column('name', sa.String(length=128), nullable=True)) - op.add_column('user', sa.Column('administrator', sa.Boolean(), nullable=True)) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.drop_column('user', 'administrator') - op.drop_column('repo_group', 'name') - # ### end Alembic commands ### diff --git a/workers/contributor_worker/contributor_worker/worker.py b/workers/contributor_worker/contributor_worker.py similarity index 68% rename from workers/contributor_worker/contributor_worker/worker.py rename to workers/contributor_worker/contributor_worker.py --- a/workers/contributor_worker/contributor_worker/worker.py +++ b/workers/contributor_worker/contributor_worker.py @@ -8,189 +8,50 @@ import statistics, logging, os, json, time import numpy as np import datetime -from workers.standard_methods import * #init_oauths, get_table_values, get_max_id, register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process, paginate -import warnings -warnings.filterwarnings('ignore') -class ContributorWorker: +from workers.worker_base import Worker + +class ContributorWorker(Worker): """ Worker that detects anomalies on a select few of our metrics task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self._task = task - self._child = None - self._queue = Queue() - self.db = None - self.tool_source = 'Contributor Worker' - self.tool_version = '0.0.1' # See __init__.py - self.data_source = 'Augur Commit Data' - self.finishing_task = False - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["contributors"] - } - ], - "config": [self.config] - } + def __init__(self, config={}): - self.results_counter = 0 + worker_type = "contributor_worker" - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) + given = [['git_url']] + models = ['contributors'] - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) + data_tables = ['contributors', 'contributors_aliases', 'contributor_affiliations', + 'issue_events', 'pull_request_events', 'issues', 'message', 'issue_assignees', + 'pull_request_assignees', 'pull_request_reviewers', 'pull_request_meta', 'pull_request_repo'] + operations_tables = ['worker_history', 'worker_job'] - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - # produce our own MetaData object - metadata = MetaData() - helper_metadata = MetaData() + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) - # we can reflect it ourselves from a database, using options - # such as 'only' to limit what tables we look at... - metadata.reflect(self.db, only=['contributors', 'contributors_aliases', 'contributor_affiliations', - 'issue_events', 'pull_request_events', 'issues', 'message', 'issue_assignees', - 'pull_request_assignees', 'pull_request_reviewers', 'pull_request_meta', 'pull_request_repo']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job']) - - # we can then produce a set of mappings from this MetaData. - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - # calling prepare() just sets up mapped classes and relationships. - Base.prepare() - HelperBase.prepare() - - # mapped classes are ready - self.contributors_table = Base.classes.contributors.__table__ - self.contributors_aliases_table = Base.classes.contributors_aliases.__table__ - self.contributor_affiliations_table = Base.classes.contributor_affiliations.__table__ - self.issue_events_table = Base.classes.issue_events.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.issues_table = Base.classes.issues.__table__ - self.message_table = Base.classes.message.__table__ - self.issue_assignees_table = Base.classes.issue_assignees.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5432/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'contributors': - self.contributors_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass + # These 3 are included in every tuple the worker inserts (data collection info) + self.tool_source = 'Contributor Worker' + self.tool_version = '1.0.0' + self.data_source = 'Augur Commit Data' def contributors_model(self, entry_info, repo_id): + # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's + self.logger.info("Querying starting ids info...\n") + + self.cntrb_id_inc = self.get_max_id('contributors', 'cntrb_id') + # Get and insert all users (emails) found by the facade worker self.insert_facade_contributors(entry_info, repo_id) # Get and insert all users github considers to be contributors for this repo - query_github_contributors(self, entry_info, repo_id) + self.query_github_contributors(entry_info, repo_id) - logging.info("Searching users for commits from the facade worker for repo with entry info: {}\n".format(entry_info)) + self.logger.info("Searching users for commits from the facade worker for repo with entry info: {}\n".format(entry_info)) # Get all distinct combinations of emails and names by querying the repo's commits userSQL = s.sql.text(""" @@ -242,7 +103,7 @@ def contributors_model(self, entry_info, repo_id): commit_cntrbs = json.loads(pd.read_sql(userSQL, self.db, \ params={'repo_id': repo_id}).to_json(orient="records")) - logging.info("We found {} distinct emails to search for in this repo (repo_id = {})".format( + self.logger.info("We found {} distinct emails to search for in this repo (repo_id = {})\n".format( len(commit_cntrbs), repo_id)) # For every unique commit contributor info combination... @@ -283,7 +144,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(times_used_tuple)) self.results_counter += 1 - logging.info("Updated cntrb_created_at and cntrb_last_used columns for existing " + self.logger.info("Updated cntrb_created_at and cntrb_last_used columns for existing " "tuple in the contributors table with email: {}\n".format(contributor['commit_email'])) # If cntrb_full_name column is not filled, go ahead and fill it bc we have that info @@ -297,7 +158,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(name_col)) - logging.info("Inserted cntrb_full_name column for existing tuple in the contributors " + self.logger.info("Inserted cntrb_full_name column for existing tuple in the contributors " "table with email: {}\n".format(contributor['cntrb_email'])) # If cntrb_canonical column is not filled, go ahead and fill it w main email bc @@ -312,7 +173,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(canonical_col)) - logging.info("Inserted cntrb_canonical column for existing tuple in the contributors " + self.logger.info("Inserted cntrb_canonical column for existing tuple in the contributors " "table with email: {}\n".format(contributor['cntrb_email'])) @@ -347,20 +208,20 @@ def contributors_model(self, entry_info, repo_id): url = 'https://api.github.com/search/users?q={}+in:email'.format( cmt_cntrb['email']) - logging.info("Hitting endpoint: " + url + " ...\n") + self.logger.info("Hitting endpoint: " + url + " ...\n") r = requests.get(url=url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) results = r.json() # If no matches or bad response, continue with other contributors if 'total_count' not in results: - logging.info("Search query returned an empty response, moving on...\n") + self.logger.info("Search query returned an empty response, moving on...\n") continue if results['total_count'] == 0: - logging.info("Search query did not return any results, moving on...\n") + self.logger.info("Search query did not return any results, moving on...\n") continue - logging.info("When searching for a contributor with info {}, we found the following users: {}\n".format( + self.logger.info("When searching for a contributor with info {}, we found the following users: {}\n".format( cmt_cntrb, results)) # Grab first result and make sure it has the highest match score @@ -370,9 +231,9 @@ def contributors_model(self, entry_info, repo_id): match = item cntrb_url = ("https://api.github.com/users/" + match['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) contributor = r.json() # Fill in all github information @@ -407,11 +268,12 @@ def contributors_model(self, entry_info, repo_id): } result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==self.cntrb_id_inc).values(cntrb_gh_info)) - logging.info("Updated existing tuple in the contributors table with github info after " + self.logger.info("Updated existing tuple in the contributors table with github info after " "a successful search query on a facade commit's author : {} {}\n".format(contributor, cntrb_gh_info)) # Dupe check + self.logger.info('Checking dupes.\n') dupe_cntrb_sql = s.sql.text(""" SELECT contributors.* FROM contributors inner join ( @@ -424,10 +286,23 @@ def contributors_model(self, entry_info, repo_id): dupe_cntrbs = pd.read_sql(dupe_cntrb_sql, self.db, params={}) - # Turn this column from nan to None - dupe_cntrbs['gh_user_id'] = dupe_cntrbs['gh_user_id'].where(pd.notnull(dupe_cntrbs['gh_user_id']), None) + self.logger.info(f'There are {len(dupe_cntrbs)} duplicates.\n') + + # Turn these columns from nan/nat to None + dupe_cntrbs['gh_user_id'] = dupe_cntrbs['gh_user_id'].where( + pd.notnull(dupe_cntrbs['gh_user_id']), None) + dupe_cntrbs['cntrb_last_used'] = dupe_cntrbs['cntrb_last_used'].astype( + object).where(dupe_cntrbs['cntrb_last_used'].notnull(), None) + dupe_cntrbs['cntrb_last_used'] = dupe_cntrbs['cntrb_last_used'].astype( + object).where(dupe_cntrbs['cntrb_last_used'].notnull(), None) for i, cntrb_existing in dupe_cntrbs.iterrows(): + + self.logger.info(f'Processing dupe: {cntrb_existing}.\n') + if i == 0: + self.logger.info('skipping first\n') + continue + cntrb_new = cntrb_existing.copy() del cntrb_new['cntrb_id'] del cntrb_new['data_collection_date'] @@ -447,22 +322,29 @@ def contributors_model(self, entry_info, repo_id): dupe_ids = pd.read_sql(dupe_ids_sql, self.db, params={'pk': pk, \ 'email': cntrb_new['cntrb_email']})['cntrb_id'].values.tolist() - self.map_new_id(self, dupe_ids, pk) + self.map_new_id(dupe_ids, pk) delete_dupe_ids_sql = s.sql.text(""" DELETE FROM contributors WHERE cntrb_id <> {} - AND cntrb_email = '{}' + AND cntrb_email = '{}'; """.format(pk, cntrb_new['cntrb_email'])) - self.db.execute(delete_dupe_ids_sql) + self.logger.info(f'Trying to delete dupes with sql: {delete_dupe_ids_sql}') + + try: + result = self.db.execute(delete_dupe_ids_sql) + except Exception as e: + self.logger.info(f'Deleting dupes failed with error: {e}') + + self.logger.info('Deleted duplicates.\n') # Register this task as completed - register_task_completion(self, entry_info, repo_id, "contributors") + self.register_task_completion(entry_info, repo_id, "contributors") def insert_facade_contributors(self, entry_info, repo_id): - logging.info("Beginning process to insert contributors from facade commits for repo w entry info: {}\n".format(entry_info)) + self.logger.info("Beginning process to insert contributors from facade commits for repo w entry info: {}\n".format(entry_info)) # Get all distinct combinations of emails and names by querying the repo's commits userSQL = s.sql.text(""" @@ -498,7 +380,7 @@ def insert_facade_contributors(self, entry_info, repo_id): """) commit_cntrbs = json.loads(pd.read_sql(userSQL, self.db, params={'repo_id': repo_id}).to_json(orient="records")) - logging.info("We found {} distinct contributors needing insertion (repo_id = {})".format( + self.logger.info("We found {} distinct contributors needing insertion (repo_id = {})".format( len(commit_cntrbs), repo_id)) for cntrb in commit_cntrbs: @@ -511,10 +393,10 @@ def insert_facade_contributors(self, entry_info, repo_id): 'cntrb_full_name': cntrb['name'] } result = self.db.execute(self.contributors_table.insert().values(cntrb_tuple)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: {}\n".format(cntrb['email'])) + self.logger.info("Inserted contributor: {}\n".format(cntrb['email'])) def handle_alias(self, tuple): cntrb_email = tuple['cntrb_email'] # canonical @@ -522,7 +404,7 @@ def handle_alias(self, tuple): cntrb_id = tuple['cntrb_id'] # Check existing contributors table tuple - existing_tuples = retrieve_tuple(self, {'cntrb_email': tuple['commit_email']}, ['contributors']) + existing_tuples = self.retrieve_tuple({'cntrb_email': tuple['commit_email']}, ['contributors']) if len(existing_tuples) == 0: """ Insert alias tuple into the contributor table """ @@ -543,15 +425,15 @@ def handle_alias(self, tuple): del cntrb['cntrb_id'] result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) + self.logger.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) alias_id = self.cntrb_id_inc elif len(existing_tuples) > 1: # fix all dupe references to dupe cntrb ids before we delete them - logging.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the contributors table, we will delete all tuples with this cntrb_email and re-insert only 1\n") - logging.info("For cntrb_email: {}".format(tuple['commit_email'])) + self.logger.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the contributors table, we will delete all tuples with this cntrb_email and re-insert only 1\n") + self.logger.info("For cntrb_email: {}".format(tuple['commit_email'])) """ Insert alias tuple into the contributor table """ @@ -576,7 +458,7 @@ def handle_alias(self, tuple): del cntrb['cntrb_id'] result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) + self.logger.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) alias_id = self.cntrb_id_inc @@ -610,14 +492,14 @@ def handle_alias(self, tuple): try: # Delete all dupes result = self.db.execute(deleteSQL) - logging.info("Deleted all non-canonical contributors with the email: {}\n".format(commit_email)) + self.logger.info("Deleted all non-canonical contributors with the email: {}\n".format(commit_email)) except Exception as e: - logging.info("When trying to delete a duplicate contributor, worker ran into error: {}".format(e)) + self.logger.info("When trying to delete a duplicate contributor, worker ran into error: {}".format(e)) else: #then there would be exactly 1 existing tuple, so that id is the one we want alias_id = existing_tuples[0]['cntrb_id'] - logging.info('Checking canonicals match.\n') + self.logger.info('Checking canonicals match.\n') alias_sql = s.sql.text(""" SELECT * FROM contributors @@ -636,14 +518,14 @@ def handle_alias(self, tuple): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_canonical==canonical_id_result.iloc[0]['cntrb_canonical'] ).values(canonical_col)) - logging.info("Updated cntrb_canonical column for existing tuple in the contributors " + self.logger.info("Updated cntrb_canonical column for existing tuple in the contributors " "table with email: {}\n".format(tuple['cntrb_email'])) # Now check existing alias table tuple - existing_tuples = retrieve_tuple(self, {'alias_email': commit_email}, ['contributors_aliases']) + existing_tuples = self.retrieve_tuple({'alias_email': commit_email}, ['contributors_aliases']) if len(existing_tuples) == 0: - logging.info("Finding cntrb_id for canonical email: {}".format(cntrb_email)) + self.logger.info("Finding cntrb_id for canonical email: {}".format(cntrb_email)) canonical_id_sql = s.sql.text(""" SELECT cntrb_id as canonical_id from contributors @@ -652,7 +534,7 @@ def handle_alias(self, tuple): canonical_id_result = json.loads(pd.read_sql(canonical_id_sql, self.db, params={'email': cntrb_email}).to_json( orient="records")) if len(canonical_id_result) > 1: - logging.info("MORE THAN ONE CANONICAL CONTRIBUTOR found for email: {}".format(cntrb_email)) + self.logger.info("MORE THAN ONE CANONICAL CONTRIBUTOR found for email: {}".format(cntrb_email)) alias_tuple = { 'cntrb_id': canonical_id_result[0]['canonical_id'], 'cntrb_a_id': alias_id, @@ -665,9 +547,9 @@ def handle_alias(self, tuple): } result = self.db.execute(self.contributors_aliases_table.insert().values(alias_tuple)) self.results_counter += 1 - logging.info("Inserted alias with email: {}\n".format(commit_email)) + self.logger.info("Inserted alias with email: {}\n".format(commit_email)) if len(existing_tuples) > 1: - logging.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the alias " + self.logger.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the alias " "table AND NEED TO ADD DELETION LOGIC: {}\n".format(existing_tuples)) def map_new_id(self, dupe_ids, new_id): @@ -693,48 +575,49 @@ def map_new_id(self, dupe_ids, new_id): alias_result = self.db.execute(self.contributors_aliases_table.update().where( self.contributors_aliases_table.c.cntrb_a_id.in_(dupe_ids)).values(alias_update_col)) - logging.info("Updated cntrb_a_id column for tuples in the contributors_aliases table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_a_id column for tuples in the contributors_aliases table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) except Exception as e: - logging.info(f'Alias re-map already done... error: {e}') + self.logger.info(f'Alias re-map already done... error: {e}') issue_events_result = self.db.execute(self.issue_events_table.update().where( self.issue_events_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the issue_events table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the issue_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_events_result = self.db.execute(self.pull_request_events_table.update().where( self.pull_request_events_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the pull_request_events table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the pull_request_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issues_cntrb_result = self.db.execute(self.issues_table.update().where( self.issues_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the issues table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issues_reporter_result = self.db.execute(self.issues_table.update().where( self.issues_table.c.reporter_id.in_(dupe_ids)).values(reporter_col)) - logging.info("Updated reporter_id column in the issues table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated reporter_id column in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issue_assignee_result = self.db.execute(self.issue_assignees_table.update().where( self.issue_assignees_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the issue_assignees table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the issue_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_assignee_result = self.db.execute(self.pull_request_assignees_table.update().where( self.pull_request_assignees_table.c.contrib_id.in_(dupe_ids)).values(pr_assignee_col)) - logging.info("Updated contrib_id column for tuple in the pull_request_assignees table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated contrib_id column for tuple in the pull_request_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) message_result = self.db.execute(self.message_table.update().where( self.message_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the message table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the message table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_reviewers_result = self.db.execute(self.pull_request_reviewers_table.update().where( self.pull_request_reviewers_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_reviewers table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_reviewers table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_meta_result = self.db.execute(self.pull_request_meta_table.update().where( self.pull_request_meta_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_meta table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_meta table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_repo_result = self.db.execute(self.pull_request_repo_table.update().where( self.pull_request_repo_table.c.pr_cntrb_id.in_(dupe_ids)).values(pr_repo_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_repo table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_repo table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info('Done mapping new id.\n') diff --git a/workers/contributor_worker/contributor_worker/__init__.py b/workers/contributor_worker/contributor_worker/__init__.py deleted file mode 100644 --- a/workers/contributor_worker/contributor_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.0.1' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/contributor_worker/contributor_worker/runtime.py b/workers/contributor_worker/contributor_worker/runtime.py deleted file mode 100644 --- a/workers/contributor_worker/contributor_worker/runtime.py +++ /dev/null @@ -1,110 +0,0 @@ -from flask import Flask, jsonify, request -from contributor_worker.worker import ContributorWorker -import click, os, json, logging, requests -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}\n".format(str(request.json))) - app.contributor_worker.task = request.json - - #set task - return jsonify({"success": "sucess"}) - - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": app.contributor_worker._queue, - "tasks": [{ - "given": list(app.contributor_worker._queue) - }] - }) - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.contributor_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51252, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'contributor_worker', None, {}) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.contributor_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "anomaly_days": worker_info['anomaly_days'] if 'anomaly_days' in worker_info else 2, - "training_days": worker_info['training_days'] if 'training_days' in worker_info else 365, - "confidence_interval": worker_info['confidence_interval'] if 'confidence_interval' in worker_info else .95, - "contamination": worker_info['contamination'] if 'contamination' in worker_info else 0.041, - 'metrics': worker_info['metrics'] if 'metrics' in worker_info else {"issues-new": "issues", - "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"} - } - - #create instance of the worker - app.contributor_worker = ContributorWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - print("Starting Flask App on host {} with port {} with pid: ".format(broker_host, worker_port) + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - print("Killing Flask App: {} and telling broker that this worker is disconnected.".format(str(os.getpid()))) - try: - logging.info("Sending disconnected message to broker... @ -> {} with info: {}\n".format('http://{}:{}/api/unstable/workers'.format( - config['broker_host'], config['broker_port']), config)) - requests.post('http://{}:{}/api/unstable/workers/remove'.format( - config['broker_host'], config['broker_port']), json=config) #hello message - except Exception as e: - logging.info("Ran into error: {}".format(e)) - logging.info("Broker's port is busy, worker will not be able to accept tasks, " - "please restart Augur if you want this worker to attempt connection again.") - diff --git a/workers/contributor_worker/runtime.py b/workers/contributor_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/contributor_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.contributor_worker.contributor_worker import ContributorWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ContributorWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/contributor_worker/setup.py b/workers/contributor_worker/setup.py --- a/workers/contributor_worker/setup.py +++ b/workers/contributor_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="contributor_worker", - version="0.0.2", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -31,7 +31,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'contributor_worker_start=contributor_worker.runtime:main', + 'contributor_worker_start=workers.contributor_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/facade_worker/facade_worker/facade00mainprogram.py b/workers/facade_worker/facade_worker/facade00mainprogram.py --- a/workers/facade_worker/facade_worker/facade00mainprogram.py +++ b/workers/facade_worker/facade_worker/facade00mainprogram.py @@ -26,20 +26,8 @@ # repos. It also rebuilds analysis data, checks any changed affiliations and # aliases, and caches data for display. -import pymysql -import sys -import platform -import imp -import time -import datetime -import html.parser -import subprocess -import os -import getopt -import xlsxwriter -import configparser +import pymysql, sys, platform, imp, time, datetime, html.parser, subprocess, os, getopt, xlsxwriter, configparser, logging from multiprocessing import Process, Queue - from facade_worker.facade01config import Config#increment_db, update_db, migrate_database_config, database_connection, get_setting, update_status, log_activity from facade_worker.facade02utilitymethods import update_repo_log, trim_commit, store_working_author, trim_author from facade_worker.facade03analyzecommit import analyze_commit @@ -48,55 +36,45 @@ from facade_worker.facade06analyze import analysis from facade_worker.facade07rebuildcache import nuke_affiliations, fill_empty_affiliations, invalidate_caches, rebuild_unknown_affiliation_and_web_caches -from workers.standard_methods import read_config +from workers.util import read_config +from workers.worker_base import Worker + +html = html.parser.HTMLParser() -import logging +class FacadeWorker(Worker): + def __init__(self, config={}, task=None): + worker_type = "facade_worker" -# if platform.python_implementation() == 'PyPy': -# import pymysql -# else: -# import MySQLdb -# ## End Imports + # Define what this worker can be given and know how to interpret + given = [['repo_group']] + models = ['commits'] -html = html.parser.HTMLParser() + # Define the tables needed to insert, update, or delete on + data_tables = [] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Facade-specific config + self.cfg = Config(self.logger) + + # Define data collection info + self.tool_source = 'Facade Worker' + self.tool_version = '1.0.0' + self.data_source = 'Git Log' -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class FacadeWorker: - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(os.getpid())) - - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.cfg = Config() - - ### The real program starts here ### + def initialize_database_connections(self): # Set up the database - db_user = self.config['user'] - db_pass = self.config['password'] - db_name = self.config['database'] - db_host = self.config['host'] - db_port = self.config['port'] - db_user_people = self.config['user'] - db_pass_people = self.config['password'] - db_name_people = self.config['database'] - db_host_people = self.config['host'] - db_port_people = self.config['port'] + db_user = self.config['user_database'] + db_pass = self.config['password_database'] + db_name = self.config['name_database'] + db_host = self.config['host_database'] + db_port = self.config['port_database'] # Open a general-purpose connection - db,cursor = self.cfg.database_connection( + self.db, self.cursor = self.cfg.database_connection( db_host, db_user, db_pass, @@ -104,157 +82,68 @@ def __init__(self, config, task=None): db_port, False, False) # Open a connection for the people database - db_people,cursor_people = self.cfg.database_connection( - db_host_people, - db_user_people, - db_pass_people, - db_name_people, - db_port_people, True, False) + self.db_people,self.cursor_people = self.cfg.database_connection( + db_host, + db_user, + db_pass, + db_name, + db_port, True, False) # Check if the database is current and update it if necessary try: - current_db = int(self.cfg.get_setting('database_version')) + self.current_db = int(self.cfg.get_setting('database_version')) except: # Catch databases which existed before database versioning - current_db = -1 - - #WHAT IS THE UPSTREAM_DB??? - # if current_db < upstream_db: - - # print(("Current database version: %s\nUpstream database version %s\n" % - # (current_db, upstream_db))) - - # self.cfg.update_db(current_db); + self.current_db = -1 - self.commit_model() - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - rg_id = value['given']['repo_group_id'] - - """ Query all repos """ - # repoUrlSQL = s.sql.text(""" - # SELECT repo_id,repo_group_id,repo_git FROM repo WHERE repo_group_id = '{}' - # """.format(rg_id)) - # rs = pd.read_sql(repoUrlSQL, self.db, params={}) - try: - if value['job_type'] == "UPDATE": - self._queue.put(CollectorTask(message_type='TASK', entry_info=value)) - elif value['job_type'] == "MAINTAIN": - self._maintain_queue.put(CollectorTask(message_type='TASK', entry_info=value)) - - except Exception as e: - logging.info("error: {}".format(e)) - - self._task = CollectorTask(message_type='TASK', entry_info={"task": value, "repo_id": repo_id}) - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...") - if self._child is None: - self._child = Process(target=self.collect, args=()) - self._child.start() - def collect(self): """ Function to process each entry in the worker's task queue Determines what action to take based off the message type """ + self.initialize_logging() # need to initialize logging again in child process cause multiprocessing + self.logger.info("Starting data collection process\n") + self.initialize_database_connections() while True: - time.sleep(0.5) if not self._queue.empty(): - message = self._queue.get() - logging.info("Popped off message: {}".format(message.entry_info)) - self.working_on = "UPDATE" + message = self._queue.get() # Get the task off our MP queue else: - if not self._maintain_queue.empty(): - message = self._maintain_queue.get() - logging.info("Popped off message: {}".format(message.entry_info)) - self.working_on = "MAINTAIN" - else: - break - - if message.type == 'EXIT': + break + self.logger.info("Popped off message: {}\n".format(str(message))) + + if message['job_type'] == 'STOP': break - if message.type != 'TASK': - raise ValueError(f'{message.type} is not a recognized task type') - - if message.type == 'TASK': - try: - git_url = message.entry_info['task']['given']['git_url'] - self.query_issues({'git_url': git_url, 'repo_id': message.entry_info['repo_id']}) - except Exception as e: - logging.info("Worker ran into an error for task: {}\n".format(message.entry_info['task'])) - logging.info("Error encountered: " + repr(e) + "\n") - logging.info("Notifying broker and logging task failure in database...\n") - message.entry_info['task']['worker_id'] = self.config['id'] - requests.post("http://{}:{}/api/unstable/task_error".format( - self.config['broker_host'],self.config['broker_port']), json=message.entry_info['task']) - # Add to history table - task_history = { - "repo_id": message.entry_info['repo_id'], - "worker": self.config['id'], - "job_model": message.entry_info['task']['models'][0], - "oauth_id": self.config['zombie_id'], - "timestamp": datetime.datetime.now(), - "status": "Error", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job error for: " + str(message.entry_info['task']) + "\n") - - # Update job process table - updated_job = { - "since_id_str": message.entry_info['repo_id'], - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==message.entry_info['task']['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + message.entry_info['task']['models'][0] + "\n") - - # Reset results counter for next task - self.results_counter = 0 - pass - - def commit_model(self): + # If task is not a valid job type + if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': + raise ValueError('{} is not a recognized task type'.format(message['job_type'])) + pass + try: + self.commits_model(message) + except Exception as e: + self.logger.error(e) + raise(e) + break + + def commits_model(self, message): # Figure out what we need to do - limited_run = read_config("Facade", name="limited_run", default=0) - delete_marked_repos = read_config("Facade", name="delete_marked_repos", default=0) - pull_repos = read_config("Facade", name="pull_repos", default=0) - clone_repos = read_config("Facade", name="clone_repos", default=1) - check_updates = read_config("Facade", name="check_updates", default=0) - force_updates = read_config("Facade", name="force_updates", default=0) - run_analysis = read_config("Facade", name="run_analysis", default=0) - force_analysis = read_config("Facade", name="force_analysis", default=0) - nuke_stored_affiliations = read_config("Facade", name="nuke_stored_affiliations", default=0) - fix_affiliations = read_config("Facade", name="fix_affiliations", default=1) - force_invalidate_caches = read_config("Facade", name="force_invalidate_caches", default=0) - rebuild_caches = read_config("Facade", name="rebuild_caches", default=1) #if abs((datetime.datetime.strptime(self.cfg.get_setting('aliases_processed')[:-3], + limited_run = self.augur_config.get_value("Facade", "limited_run") + delete_marked_repos = self.augur_config.get_value("Facade", "delete_marked_repos") + pull_repos = self.augur_config.get_value("Facade", "pull_repos") + clone_repos = self.augur_config.get_value("Facade", "clone_repos") + check_updates = self.augur_config.get_value("Facade", "check_updates") + force_updates = self.augur_config.get_value("Facade", "force_updates") + run_analysis = self.augur_config.get_value("Facade", "run_analysis") + force_analysis = self.augur_config.get_value("Facade", "force_analysis") + nuke_stored_affiliations = self.augur_config.get_value("Facade", "nuke_stored_affiliations") + fix_affiliations = self.augur_config.get_value("Facade", "fix_affiliations") + force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches") + rebuild_caches = self.augur_config.get_value("Facade", "rebuild_caches") #if abs((datetime.datetime.strptime(self.cfg.get_setting('aliases_processed')[:-3], # '%Y-%m-%d %I:%M:%S.%f') - datetime.datetime.now()).total_seconds()) // 3600 > int(self.cfg.get_setting( # 'update_frequency')) else 0 - force_invalidate_caches = read_config("Facade", name="force_invalidate_caches", default=0) - create_xlsx_summary_files = read_config("Facade", name="create_xlsx_summary_files", default=0) - multithreaded = read_config("Facade", name="multithreaded", default=1) + force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches") + create_xlsx_summary_files = self.augur_config.get_value("Facade", "create_xlsx_summary_files") + multithreaded = self.augur_config.get_value("Facade", "multithreaded") opts,args = getopt.getopt(sys.argv[1:],'hdpcuUaAmnfIrx') for opt in opts: @@ -355,9 +244,9 @@ def commit_model(self): if len(repo_base_directory) == 0: self.cfg.log_activity('Error','No base directory. It is unsafe to continue.') - update_status('Failed: No base directory') + self.cfg.update_status('Failed: No base directory') sys.exit(1) - + # Begin working start_time = time.time() diff --git a/workers/facade_worker/facade_worker/facade01config.py b/workers/facade_worker/facade_worker/facade01config.py --- a/workers/facade_worker/facade_worker/facade01config.py +++ b/workers/facade_worker/facade_worker/facade01config.py @@ -39,15 +39,15 @@ import json import logging -from workers.standard_methods import read_config - +from workers.util import read_config class Config: - def __init__(self): + def __init__(self, logger): self.upstream_db = 7 self.cursor = None self.cursor_people = None + self.logger = logger self.db = None self.db_people = None @@ -60,9 +60,10 @@ def __init__(self): " in your \'Workers\' -> \'facade_worker\' object in your config " "to the directory in which you want to clone repos. Exiting...") sys.exit(1) - self.tool_source = '\'FacadeAugur\'' - self.tool_version = '\'0.0.1\'' - self.data_source = '\'git_repository\'' + + self.tool_source = 'Facade Worker' + self.tool_version = '1.0.0' + self.data_source = 'Git Log' # Figure out how much we're going to log logging.basicConfig(filename='worker_{}.log'.format(worker_options['port']), filemode='w', level=logging.INFO) @@ -199,7 +200,7 @@ def log_activity(self, level, status): # "Debug", then just print it and don't save it in the database. log_options = ('Error','Quiet','Info','Verbose','Debug') - logging.info("* %s\n" % status) + self.logger.info("* %s\n" % status) if self.log_level == 'Debug' and level == 'Debug': return @@ -209,7 +210,7 @@ def log_activity(self, level, status): self.cursor.execute(query, (level, status)) self.db.commit() except Exception as e: - logging.info('Error encountered: {}\n'.format(e)) + self.logger.info('Error encountered: {}\n'.format(e)) # Set up the database db_user = read_config('Database', 'user', 'AUGUR_DB_USER', 'augur') diff --git a/workers/facade_worker/facade_worker/facade03analyzecommit.py b/workers/facade_worker/facade_worker/facade03analyzecommit.py --- a/workers/facade_worker/facade_worker/facade03analyzecommit.py +++ b/workers/facade_worker/facade_worker/facade03analyzecommit.py @@ -38,7 +38,7 @@ import configparser import traceback -from workers.standard_methods import read_config +from workers.util import read_config def analyze_commit(cfg, repo_id, repo_loc, commit, multithreaded): diff --git a/workers/facade_worker/facade_worker/facade07rebuildcache.py b/workers/facade_worker/facade_worker/facade07rebuildcache.py --- a/workers/facade_worker/facade_worker/facade07rebuildcache.py +++ b/workers/facade_worker/facade_worker/facade07rebuildcache.py @@ -156,7 +156,6 @@ def discover_null_affiliations(attribution,email): cfg.log_activity('Debug','Found domain match for %s' % email) - # try: for match in matches: update = ("UPDATE commits " "SET cmt_%s_affiliation = %%s " @@ -164,7 +163,6 @@ def discover_null_affiliations(attribution,email): "AND cmt_%s_affiliation IS NULL " "AND cmt_%s_date::date >= %%s::date" % (attribution, attribution, attribution, attribution)) - #"AND cmt_%s_date >= TO_TIMESTAMP(%%s, 'YYYY-MM-DD')" % cfg.log_activity('Info', 'attr: {} \nmatch:{}\nsql: {}'.format(attribution, match, update)) @@ -175,15 +173,6 @@ def discover_null_affiliations(attribution,email): cfg.log_activity('Info', 'Error encountered: {}'.format(e)) cfg.log_activity('Info', 'Affiliation insertion failed for %s ' % email) - # except Exception as e: - # cfg.log_activity('Info', '1st Error encountered: {}'.format(e)) - # cfg.log_activity('Info', 'Attribution matching failed for %s ' % email) - # except Exception as e: - # logging.info('2nd Error encountered: {}'.format(e)) - # cfg.log_activity('Info', 'Attribution matching failed and exception logging failed') - # else: - # cfg.log_activity('Info', 'Attribution matching failed and exception logging failed and the exception to the exception failed.') - def discover_alias(email): # Match aliases with their canonical email diff --git a/workers/facade_worker/facade_worker/runtime.py b/workers/facade_worker/facade_worker/runtime.py --- a/workers/facade_worker/facade_worker/runtime.py +++ b/workers/facade_worker/facade_worker/runtime.py @@ -1,102 +1,23 @@ from flask import Flask, jsonify, request, Response import click, os, json, requests, logging -from facade_worker.facade00mainprogram import FacadeWorker -from workers.standard_methods import read_config +from workers.facade_worker.facade_worker.facade00mainprogram import FacadeWorker +from workers.util import create_server, WorkerGunicornApplication -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(request.json)) - app.facade_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.facade_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51258, help='Port') -def main(augur_url, host, port): +def main(): """ Declares singular worker and creates the server and flask app that it will be running on """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'facade_worker', None, None) - worker_port = worker_info['port'] if 'port' in worker_info else port - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - config = { - "id": "com.augurlabs.core.facade_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } + app = Flask(__name__) + app.worker = FacadeWorker() - #create instance of the worker - app.facade_worker = FacadeWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") + create_server(app) + WorkerGunicornApplication(app).run() - app.run(debug=app.debug, host=host, port=worker_port) + if app.worker._child is not None: + app.worker._child.terminate() try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) except: pass - logging.info("Killing Flask App: " + str(os.getpid())) + os.kill(os.getpid(), 9) - diff --git a/workers/facade_worker/setup.py b/workers/facade_worker/setup.py --- a/workers/facade_worker/setup.py +++ b/workers/facade_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="facade_worker", - version="0.1", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -30,7 +30,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'facade_worker_start=facade_worker.runtime:main', + 'facade_worker_start=workers.facade_worker.facade_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/github_worker/github_worker/worker.py b/workers/github_worker/github_worker.py similarity index 56% rename from workers/github_worker/github_worker/worker.py rename to workers/github_worker/github_worker.py --- a/workers/github_worker/github_worker/worker.py +++ b/workers/github_worker/github_worker.py @@ -2,217 +2,61 @@ from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData import requests, time, logging, json, os from datetime import datetime -from sqlalchemy.ext.declarative import declarative_base -from workers.standard_methods import * +from workers.worker_base import Worker -class GitHubWorker: +class GitHubWorker(Worker): """ Worker that collects data from the Github API and stores it in our database task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - - self._task = task # task currently being worked on (dict) - self._child = None # process of currently running task (multiprocessing process) - self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes) - self.db = None # sql alchemy db session + def __init__(self, config={}): - # These 3 are included in every tuple the worker inserts (data collection info) - self.tool_source = 'GitHub API Worker' - self.tool_version = '0.0.3' # See __init__.py - self.data_source = 'GitHub API' - - self.results_counter = 0 # count of tuples inserted in the database (to store stats for each task in op tables) - self.finishing_task = True # if we are finishing a previous task, pagination works differenty - - self.specs = { - "id": self.config['id'], # what the broker knows this worker as - "location": self.config['location'], # host + port worker is running on (so broker can send tasks here) - "qualifications": [ - { - "given": [["github_url"]], # type of repo this worker can be given as a task - "models":["issues"] # models this worker can fill for a repo as a task - } - ], - "config": [self.config] - } - - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - # Create an sqlalchemy engine for both database schemas - logging.info("Making database connections... {}".format(DB_STR)) - db_schema = 'augur_data' - self.db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(db_schema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) + worker_type = 'github_worker' - metadata = MetaData() - helper_metadata = MetaData() + given = [['github_url']] + models = ['issues'] - # Reflect only the tables we will use for each schema's metadata object - metadata.reflect(self.db, only=['contributors', 'issues', 'issue_labels', 'message', + data_tables = ['contributors', 'issues', 'issue_labels', 'message', 'issue_message_ref', 'issue_events','issue_assignees','contributors_aliases', 'pull_request_assignees', 'pull_request_events', 'pull_request_reviewers', 'pull_request_meta', - 'pull_request_repo']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - # So we can access all our tables when inserting, updating, etc - self.contributors_table = Base.classes.contributors.__table__ - self.issues_table = Base.classes.issues.__table__ - self.issue_labels_table = Base.classes.issue_labels.__table__ - self.issue_events_table = Base.classes.issue_events.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.message_table = Base.classes.message.__table__ - self.issues_message_ref_table = Base.classes.issue_message_ref.__table__ - self.issue_assignees_table = Base.classes.issue_assignees.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.contributors_aliases_table = Base.classes.contributors_aliases.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ + 'pull_request_repo'] + operations_tables = ['worker_history', 'worker_job'] - # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's - logging.info("Querying starting ids info...\n") - - self.issue_id_inc = get_max_id(self, 'issues', 'issue_id') - - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - - self.msg_id_inc = get_max_id(self, 'message', 'msg_id') - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) + # These 3 are included in every tuple the worker inserts (data collection info) + self.tool_source = 'GitHub API Worker' + self.tool_version = '1.0.0' + self.data_source = 'GitHub API' - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5433/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - # If the task has one of our "valid" job types - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - # Setting that causes paginating through ALL pages, not just unknown ones - # This setting is set by the housekeeper and is attached to the task before it gets sent here - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - - self._task = value - self.run() + self.finishing_task = True # if we are finishing a previous task, pagination works differenty + self.platform_id = 25150 # GitHub - def cancel(self): - """ Delete/cancel current task - """ - self._task = None + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - # Spawn a subprocess to handle message reading and performing the tasks - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['github_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'issues': - self.issues_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass def issues_model(self, entry_info, repo_id): """ Data collection function Query the GitHub API for issues """ + + # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's + self.logger.info("Querying starting ids info...\n") + + self.issue_id_inc = self.get_max_id('issues', 'issue_id') + + self.msg_id_inc = self.get_max_id('message', 'msg_id') github_url = entry_info['given']['github_url'] - logging.info("Beginning filling the issues model for repo: " + github_url + "\n") - record_model_process(self, repo_id, 'issues') + self.logger.info("Beginning filling the issues model for repo: " + github_url + "\n") # Contributors are part of this model, and finding all for the repo saves us # from having to add them as we discover committers in the issue process - query_github_contributors(self, entry_info, repo_id) + self.query_github_contributors(entry_info, repo_id) # Extract the owner/repo for the endpoint path = urlparse(github_url) @@ -238,14 +82,14 @@ def issues_model(self, entry_info, repo_id): duplicate_col_map = {'gh_issue_id': 'id'} #list to hold issues needing insertion - issues = paginate(self, issues_url, duplicate_col_map, update_col_map, table, table_pkey, + issues = self.paginate(issues_url, duplicate_col_map, update_col_map, table, table_pkey, 'WHERE repo_id = {}'.format(repo_id)) - + self.logger.info(issues) # Discover and remove duplicates before we start inserting - logging.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") + self.logger.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") for issue_dict in issues: - logging.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") + self.logger.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") # Add the FK repo_id to the dict being inserted issue_dict['repo_id'] = repo_id @@ -253,17 +97,17 @@ def issues_model(self, entry_info, repo_id): # Figure out if this issue is a PR # still unsure about this key value pair/what it means pr_id = None - if "pull_request" in issue_dict: - logging.info("Issue is a PR\n") + if 'pull_request' in issue_dict: + self.logger.info("Issue is a PR\n") # Right now we are just storing our issue id as the PR id if it is one pr_id = self.issue_id_inc else: - logging.info("Issue is not a PR\n") + self.logger.info("Issue is not a PR\n") # Begin on the actual issue... issue = { "repo_id": issue_dict['repo_id'], - "reporter_id": find_id_from_login(self, issue_dict['user']['login']), + "reporter_id": self.find_id_from_login(issue_dict['user']['login']), "pull_request": pr_id, "pull_request_id": pr_id, "created_at": issue_dict['created_at'], @@ -292,20 +136,20 @@ def issues_model(self, entry_info, repo_id): if issue_dict['flag'] == 'need_update': result = self.db.execute(self.issues_table.update().where( self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) - logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( + self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( issue_dict['id'])) self.issue_id_inc = issue_dict['pkey'] elif issue_dict['flag'] == 'need_insertion': try: result = self.db.execute(self.issues_table.insert().values(issue)) - logging.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) self.results_counter += 1 self.issue_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + + self.logger.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + " and title of: {} and gh_issue_num of: {}\n".format(issue_dict['title'],issue_dict['number'])) except Exception as e: - logging.info("When inserting an issue, ran into the following error: {}\n".format(e)) - logging.info(issue) + self.logger.info("When inserting an issue, ran into the following error: {}\n".format(e)) + self.logger.info(issue) continue # Check if the assignee key's value is already recorded in the assignees key's value @@ -316,13 +160,13 @@ def issues_model(self, entry_info, repo_id): # Handles case if there are no assignees if collected_assignees[0] is not None: - logging.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") + self.logger.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") for assignee_dict in collected_assignees: if type(assignee_dict) != dict: continue assignee = { "issue_id": self.issue_id_inc, - "cntrb_id": find_id_from_login(self, assignee_dict['login']), + "cntrb_id": self.find_id_from_login(assignee_dict['login']), "tool_source": self.tool_source, "tool_version": self.tool_version, "data_source": self.data_source, @@ -331,13 +175,13 @@ def issues_model(self, entry_info, repo_id): } # Commit insertion to the assignee table result = self.db.execute(self.issue_assignees_table.insert().values(assignee)) - logging.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + + self.logger.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + " with login/cntrb_id: " + assignee_dict['login'] + " " + str(assignee['cntrb_id']) + "\n") else: - logging.info("Issue does not have any assignees\n") + self.logger.info("Issue does not have any assignees\n") # Insert the issue labels to the issue_labels table for label_dict in issue_dict['labels']: @@ -357,10 +201,10 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issue_labels_table.insert().values(label)) - logging.info("Primary key inserted into the issue_labels table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issue_labels table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted issue label with text: " + label_dict['name'] + "\n") + self.logger.info("Inserted issue label with text: " + label_dict['name'] + "\n") #### Messages/comments and events insertion @@ -375,19 +219,19 @@ def issues_model(self, entry_info, repo_id): duplicate_col_map = {'msg_timestamp': 'created_at'} #list to hold contributors needing insertion or update - issue_comments = paginate(self, comments_url, duplicate_col_map, update_col_map, table, table_pkey, + issue_comments = self.paginate(comments_url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="WHERE msg_id IN (SELECT msg_id FROM issue_message_ref WHERE issue_id = {})".format( self.issue_id_inc)) - logging.info("Number of comments needing insertion: {}\n".format(len(issue_comments))) + self.logger.info("Number of comments needing insertion: {}\n".format(len(issue_comments))) for comment in issue_comments: try: - commenter_cntrb_id = find_id_from_login(self, comment['user']['login']) + commenter_cntrb_id = self.find_id_from_login(comment['user']['login']) except: commenter_cntrb_id = None issue_comment = { - "pltfrm_id": 25150, + "pltfrm_id": self.platform_id, "msg_text": comment['body'], "msg_timestamp": comment['created_at'], "cntrb_id": commenter_cntrb_id, @@ -397,13 +241,13 @@ def issues_model(self, entry_info, repo_id): } try: result = self.db.execute(self.message_table.insert().values(issue_comment)) - logging.info("Primary key inserted into the message table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the message table: {}".format(result.inserted_primary_key)) self.results_counter += 1 self.msg_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted issue comment with id: {}\n".format(self.msg_id_inc)) + self.logger.info("Inserted issue comment with id: {}\n".format(self.msg_id_inc)) except Exception as e: - logging.info("Worker ran into error when inserting a message, likely had invalid characters. error: {}".format(e)) + self.logger.info("Worker ran into error when inserting a message, likely had invalid characters. error: {}".format(e)) ### ISSUE MESSAGE REF TABLE ### @@ -417,8 +261,8 @@ def issues_model(self, entry_info, repo_id): "issue_msg_ref_src_node_id": comment['node_id'] } - result = self.db.execute(self.issues_message_ref_table.insert().values(issue_message_ref)) - logging.info("Primary key inserted into the issue_message_ref table: {}".format(result.inserted_primary_key)) + result = self.db.execute(self.issue_message_ref_table.insert().values(issue_message_ref)) + self.logger.info("Primary key inserted into the issue_message_ref table: {}".format(result.inserted_primary_key)) self.results_counter += 1 # Base of the url for event endpoints @@ -434,7 +278,7 @@ def issues_model(self, entry_info, repo_id): pseudo_key_gh = 'url' pseudo_key_augur = 'node_url' table = 'issue_events' - event_table_values = get_table_values(self, [pseudo_key_augur], [table], "WHERE issue_id = {}".format(self.issue_id_inc)) + event_table_values = self.get_table_values([pseudo_key_augur], [table], "WHERE issue_id = {}".format(self.issue_id_inc)) # Paginate backwards through all the events but get first page in order # to determine if there are multiple pages and if the 1st page covers all @@ -442,29 +286,29 @@ def issues_model(self, entry_info, repo_id): multiple_pages = False while True: - logging.info("Hitting endpoint: " + events_url.format(i) + " ...\n") + self.logger.info("Hitting endpoint: " + events_url.format(i) + " ...\n") r = requests.get(url=events_url.format(i), headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) # Find last page so we can decrement from there if 'last' in r.links and not multiple_pages and not self.finishing_task: param = r.links['last']['url'][-6:] i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") + self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n") multiple_pages = True elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") + self.logger.info("Only 1 page of request\n") elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ... " + self.logger.info("Finishing a previous task, paginating forwards ... " "excess rate limit requests will be made\n") j = r.json() # Checking contents of requests with what we already have in the db - new_events = check_duplicates(j, event_table_values, pseudo_key_gh) + new_events = self.check_duplicates(j, event_table_values, pseudo_key_gh) if len(new_events) == 0 and multiple_pages and 'last' in r.links: if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown events, breaking from pagination.\n") + self.logger.info("No more pages with unknown events, breaking from pagination.\n") break elif len(new_events) != 0: to_add = [obj for obj in new_events if obj not in issue_events] @@ -474,29 +318,29 @@ def issues_model(self, entry_info, repo_id): # Since we already wouldve checked the first page... break if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") + self.logger.info("No more pages to check, breaking from pagination.\n") break - logging.info("Number of events needing insertion: " + str(len(issue_events)) + "\n") + self.logger.info("Number of events needing insertion: " + str(len(issue_events)) + "\n") # If the issue is closed, then we search for the closing event and store the user's id cntrb_id = None if 'closed_at' in issue_dict: for event in issue_events: if str(event['event']) != "closed": - logging.info("not closed, continuing") + self.logger.info("not closed, continuing") continue if not event['actor']: continue - cntrb_id = find_id_from_login(self, event['actor']['login']) + cntrb_id = self.find_id_from_login(event['actor']['login']) if cntrb_id is not None: break # Need to hit this single contributor endpoint to get extra created at data... cntrb_url = ("https://api.github.com/users/" + event['actor']['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) contributor = r.json() company = None @@ -543,20 +387,17 @@ def issues_model(self, entry_info, repo_id): # Commit insertion to table result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format( + self.logger.info("Primary key inserted into the contributors table: {}".format( result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - # Increment our global track of the cntrb id for the possibility of it being used as a FK - self.cntrb_id_inc = int(result.inserted_primary_key[0]) + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") for event in issue_events: if event['actor'] is not None: - event['cntrb_id'] = find_id_from_login(self, event['actor']['login']) + event['cntrb_id'] = self.find_id_from_login(event['actor']['login']) if event['cntrb_id'] is None: - logging.info("SOMETHING WRONG WITH FINDING ID FROM LOGIN") + self.logger.info("SOMETHING WRONG WITH FINDING ID FROM LOGIN") continue # event['cntrb_id'] = None else: @@ -578,10 +419,10 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issue_events_table.insert().values(issue_event)) - logging.info("Primary key inserted into the issue_events table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issue_events table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted issue event: " + event['event'] + " for issue id: {}\n".format(self.issue_id_inc)) + self.logger.info("Inserted issue event: " + event['event'] + " for issue id: {}\n".format(self.issue_id_inc)) if cntrb_id is not None: update_closing_cntrb = { @@ -589,11 +430,11 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issues_table.update().where( self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) - logging.info("Updated tuple in the issues table with contributor that closed it, issue_id: {}\n".format( + self.logger.info("Updated tuple in the issues table with contributor that closed it, issue_id: {}\n".format( issue_dict['id'])) self.issue_id_inc += 1 #Register this task as completed - register_task_completion(self, entry_info, repo_id, "issues") + self.register_task_completion(entry_info, repo_id, "issues") diff --git a/workers/github_worker/github_worker/__init__.py b/workers/github_worker/github_worker/__init__.py deleted file mode 100644 --- a/workers/github_worker/github_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.1.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/github_worker/github_worker/runtime.py b/workers/github_worker/github_worker/runtime.py deleted file mode 100644 --- a/workers/github_worker/github_worker/runtime.py +++ /dev/null @@ -1,107 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from github_worker.worker import GitHubWorker -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.github_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.github_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51236, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'github_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: # for multiple instances of workers - try: # trying each port for an already-alive worker until a free port is found - print("New github worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.github_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - app.github_worker = GitHubWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.github_worker._child is not None: - app.github_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/github_worker/runtime.py b/workers/github_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/github_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.github_worker.github_worker import GitHubWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitHubWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/github_worker/setup.py b/workers/github_worker/setup.py --- a/workers/github_worker/setup.py +++ b/workers/github_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="github_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'github_worker_start=github_worker.runtime:main', + 'github_worker_start=workers.github_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/template_worker/template_worker/__init__.py b/workers/gitlab_issues_worker/__init__.py similarity index 50% rename from workers/template_worker/template_worker/__init__.py rename to workers/gitlab_issues_worker/__init__.py --- a/workers/template_worker/template_worker/__init__.py +++ b/workers/gitlab_issues_worker/__init__.py @@ -1,4 +1,4 @@ -"""gh_repo_info_worker - Augur Worker that collects GitHub Repo Info data""" +"""gitlab_issues_worker - Augur Worker that collects Gitlab Issue Info""" __version__ = '0.0.0' __author__ = 'Augur Team <[email protected]>' diff --git a/workers/gitlab_issues_worker/gitlab_issues_worker.py b/workers/gitlab_issues_worker/gitlab_issues_worker.py new file mode 100644 --- /dev/null +++ b/workers/gitlab_issues_worker/gitlab_issues_worker.py @@ -0,0 +1,193 @@ +import logging, os, sys, time, requests, json +from datetime import datetime +from multiprocessing import Process, Queue +import pandas as pd +import sqlalchemy as s +from workers.worker_base import Worker + + +class GitLabIssuesWorker(Worker): + def __init__(self, config={}): + + # Define what this worker can be given and know how to interpret + + # given is usually either [['github_url']] or [['git_url']] (depending if your + # worker is exclusive to repos that are on the GitHub platform) + worker_type = "gitlab_issues_worker" + given = [['git_url']] + + # The name the housekeeper/broker use to distinguish the data model this worker can fill + # You will also need to name the method that does the collection for this model + # in the format *model name*_model() such as fake_data_model() for example + models = ['gitlab_issues'] + + # Define the tables needed to insert, update, or delete on + # The Worker class will set each table you define here as an attribute + # so you can reference all of them like self.message_table or self.repo_table + data_tables = ['contributors', 'issues', 'issue_labels', 'message', 'repo', + 'issue_message_ref', 'issue_events','issue_assignees','contributors_aliases', + 'pull_request_assignees', 'pull_request_events', 'pull_request_reviewers', 'pull_request_meta', + 'pull_request_repo'] + # For most workers you will only need the worker_history and worker_job tables + # from the operations schema, these tables are to log worker task histories + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Request headers updation + + gitlab_api_key = self.augur_config.get_value("Database", "gitlab_api_key") + self.config.update({ + "gitlab_api_key": gitlab_api_key + }) + self.headers = {"PRIVATE-TOKEN" : self.config['gitlab_api_key']} + + + # Define data collection info + self.tool_source = 'Gitlab API Worker' + self.tool_version = '0.0.0' + self.data_source = 'GitLab API' + + + def gitlab_issues_model(self, task, repo_id): + """ This is just an example of a data collection method. All data collection + methods for all workers currently accept this format of parameters. If you + want to change these parameters, you can re-define the collect() method to + overwrite the Worker class' version of it (which is the method that calls + this method). + + :param task: the task generated by the housekeeper and sent to the broker which + was then sent to this worker. Takes the example dict format of: + { + 'job_type': 'MAINTAIN', + 'models': ['fake_data'], + 'display_name': 'fake_data model for url: https://github.com/vmware/vivace', + 'given': { + 'git_url': 'https://github.com/vmware/vivace' + } + } + :param repo_id: the collect() method queries the repo_id given the git/github url + and passes it along to make things easier. An int such as: 27869 + """ + + # Collection and insertion of data happens here + + # Collecting issue info from Gitlab API + self.issue_id_inc = self.get_max_id('issues', 'issue_id') + self.msg_id_inc = self.get_max_id('message', 'msg_id') + self.logger.info('Beginning the process of GitLab Issue Collection...'.format(str(os.getpid()))) + gitlab_base = 'https://gitlab.com/api/v4' + intermediate_url = '{}/projects/{}/issues?per_page=100&state=opened&'.format(gitlab_base, 18754962) + gitlab_issues_url = intermediate_url + "page={}" + + + # Get issues that we already have stored + # Set pseudo key (something other than PK) to + # check dupicates with + table = 'issues' + table_pkey = 'issue_id' + update_col_map = {'issue_state': 'state'} + duplicate_col_map = {'gh_issue_id': 'id'} + + #list to hold issues needing insertion + issues = self.paginate(gitlab_issues_url, duplicate_col_map, update_col_map, table, table_pkey, + 'WHERE repo_id = {}'.format(repo_id), platform="gitlab") + + self.logger.info(issues) + self.logger.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") + for issue_dict in issues: + self.logger.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") + pr_id = None + if "pull_request" in issue_dict: + self.logger.info("This is an MR\n") + # Right now we are just storing our issue id as the MR id if it is one + pr_id = self.issue_id_inc + else: + self.logger.info("Issue is not an MR\n") + + # Insert data into models + issue = { + "repo_id": issue_dict['project_id'], + "reporter_id": self.find_id_from_login(issue_dict['author']['username'], platform='gitlab'), + "pull_request": pr_id, + "pull_request_id": pr_id, + "created_at": issue_dict['created_at'], + "issue_title": issue_dict['title'], + "issue_body": issue_dict['description'] if 'description' in issue_dict else None, + "comment_count": issue_dict['user_notes_count'], + "updated_at": issue_dict['updated_at'], + "closed_at": issue_dict['closed_at'], + "repository_url": issue_dict['_links']['project'], + "issue_url": issue_dict['_links']['self'], + "labels_url": issue_dict['labels'], + "comments_url": issue_dict['_links']['notes'], + "events_url": None, + "html_url": issue_dict['_links']['self'], + "issue_state": issue_dict['state'], + "issue_node_id": None, + "gh_issue_id": issue_dict['id'], + "gh_issue_number": issue_dict['iid'], + "gh_user_id": issue_dict['author']['id'], + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + # Commit insertion to the issues table + if issue_dict['flag'] == 'need_update': + self.logger.info("UPDATE FLAG") + result = self.db.execute(self.issues_table.update().where( + self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) + self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( + issue_dict['id'])) + self.issue_id_inc = issue_dict['pkey'] + elif issue_dict['flag'] == 'need_insertion': + self.logger.info("INSERT FLAG") + try: + result = self.db.execute(self.issues_table.insert().values(issue)) + self.logger.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) + self.results_counter += 1 + self.issue_id_inc = int(result.inserted_primary_key[0]) + self.logger.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + + " and title of: {} and gh_issue_num of: {}\n".format(issue_dict['title'], issue_dict['iid'])) + except Exception as e: + self.logger.info("When inserting an issue, ran into the following error: {}\n".format(e)) + self.logger.info(issue) + # continue + + # issue_assigness + self.logger.info("assignees", issue_dict['assignees']) + collected_assignees = issue_dict['assignees'] + if issue_dict['assignee'] not in collected_assignees: + collected_assignees.append(issue_dict['assignee']) + if collected_assignees[0] is not None: + self.logger.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") + for assignee_dict in collected_assignees: + if type(assignee_dict) != dict: + continue + assignee = { + "issue_id": self.issue_id_inc, + "cntrb_id": self.find_id_from_login(assignee_dict['username'], platform='gitlab'), + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source, + "issue_assignee_src_id": assignee_dict['id'], + "issue_assignee_src_node": None + } + self.logger.info("assignee info", assignee) + # Commit insertion to the assignee table + result = self.db.execute(self.issue_assignees_table.insert().values(assignee)) + self.logger.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) + self.results_counter += 1 + + self.logger.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + + " with login/cntrb_id: " + assignee_dict['username'] + " " + str(assignee['cntrb_id']) + "\n") + else: + self.logger.info("Issue does not have any assignees\n") + + # Register this task as completed. + # This is a method of the worker class that is required to be called upon completion + # of any data collection model, this lets the broker know that this worker is ready + # for another task + self.register_task_completion(task, repo_id, 'gitlab_issues') + diff --git a/workers/gitlab_issues_worker/runtime.py b/workers/gitlab_issues_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/gitlab_issues_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.gitlab_issues_worker.gitlab_issues_worker import GitLabIssuesWorker +from workers.util import WorkerGunicornApplication, create_server + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitLabIssuesWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) \ No newline at end of file diff --git a/workers/metric_status_worker/setup.py b/workers/gitlab_issues_worker/setup.py similarity index 72% rename from workers/metric_status_worker/setup.py rename to workers/gitlab_issues_worker/setup.py --- a/workers/metric_status_worker/setup.py +++ b/workers/gitlab_issues_worker/setup.py @@ -5,32 +5,30 @@ from setuptools import find_packages from setuptools import setup - def read(filename): filename = os.path.join(os.path.dirname(__file__), filename) text_type = type(u"") with io.open(filename, mode="r", encoding='utf-8') as fd: return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) - setup( - name="metric_status_worker", - version="0.1.0", + name="gitlab_issues_worker", + version="0.0.0", url="https://github.com/chaoss/augur", license='MIT', - author="Augurlabs", - author_email="[email protected]", - description="Augur Worker that collects GitHub data", + author="Augur Team", + author_email="", + description="Gitlab Worker", packages=find_packages(exclude=('tests',)), install_requires=[ - 'flask', - 'requests', - 'psycopg2-binary', + 'flask', + 'requests', + 'psycopg2-binary', 'click' ], entry_points={ 'console_scripts': [ - 'metric_status_worker_start=metric_status_worker.runtime:main', + 'gitlab_issues_worker_start=workers.gitlab_issues_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/insight_worker/insight_worker/__init__.py b/workers/insight_worker/__init__.py similarity index 100% rename from workers/insight_worker/insight_worker/__init__.py rename to workers/insight_worker/__init__.py diff --git a/workers/insight_worker/insight_worker/worker.py b/workers/insight_worker/insight_worker.py similarity index 79% rename from workers/insight_worker/insight_worker/worker.py rename to workers/insight_worker/insight_worker.py --- a/workers/insight_worker/insight_worker/worker.py +++ b/workers/insight_worker/insight_worker.py @@ -10,179 +10,55 @@ import scipy.stats import datetime from sklearn.ensemble import IsolationForest -from workers.standard_methods import * #init_oauths, get_table_values, get_max_id, register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process, paginate +from workers.worker_base import Worker import warnings warnings.filterwarnings('ignore') -class InsightWorker: +class InsightWorker(Worker): """ Worker that detects anomalies on a select few of our metrics task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self._task = task - self._child = None - self._queue = Queue() - self.db = None + def __init__(self, config={}): + + worker_type = "insight_worker" + + given = [['git_url']] + models = ['insights'] + + data_tables = ['chaoss_metric_status', 'repo_insights', 'repo_insights_records'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({ + 'api_host': self.augur_config.get_value('Server', 'host'), + 'api_port': self.augur_config.get_value('Server', 'port') + }) + + # These 3 are included in every tuple the worker inserts (data collection info) self.tool_source = 'Insight Worker' - self.tool_version = '0.0.3' # See __init__.py + self.tool_version = '1.0.0' self.data_source = 'Augur API' + self.refresh = True self.send_insights = True - self.finishing_task = False self.anomaly_days = self.config['anomaly_days'] self.training_days = self.config['training_days'] self.contamination = self.config['contamination'] self.confidence = self.config['confidence_interval'] / 100 self.metrics = self.config['metrics'] - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["insights"] - } - ], - "config": [self.config] - } - - self.results_counter = 0 - - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - # produce our own MetaData object - metadata = MetaData() - helper_metadata = MetaData() - - # we can reflect it ourselves from a database, using options - # such as 'only' to limit what tables we look at... - metadata.reflect(self.db, only=['chaoss_metric_status', 'repo_insights', 'repo_insights_records']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job']) - - # we can then produce a set of mappings from this MetaData. - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - # calling prepare() just sets up mapped classes and relationships. - Base.prepare() - HelperBase.prepare() - - # mapped classes are ready - self.chaoss_metric_status_table = Base.classes['chaoss_metric_status'].__table__ - self.repo_insights_table = Base.classes['repo_insights'].__table__ - self.repo_insights_records_table = Base.classes['repo_insights_records'].__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5432/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'insights': - self.insights_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - def insights_model(self, entry_info, repo_id): logging.info("Discovering insights for task with entry info: {}\n".format(entry_info)) - record_model_process(self, repo_id, 'insights') """ Collect data """ base_url = 'http://{}:{}/api/unstable/repo-groups/9999/repos/{}/'.format( - self.config['broker_host'], self.config['broker_port'], repo_id) + self.config['api_host'], self.config['api_port'], repo_id) # Dataframe to hold all endpoint results # Subtract configurable amount of time @@ -218,7 +94,7 @@ def insights_model(self, entry_info, repo_id): # If none of the endpoints returned data if df.size == 0: logging.info("None of the provided endpoints provided data for this repository. Anomaly detection is 'done'.\n") - register_task_completion(self, entry_info, repo_id, "insights") + self.register_task_completion(entry_info, repo_id, "insights") return """ Deletion of old insights """ @@ -258,7 +134,7 @@ def insights_model(self, entry_info, repo_id): result = self.db.execute(delete_points_SQL, repo_id=repo_id, min_date=min_date) # get table values to check for dupes later on - insight_table_values = get_table_values(self, ['*'], ['repo_insights_records'], where_clause="WHERE repo_id = {}".format(repo_id)) + insight_table_values = self.get_table_values(['*'], ['repo_insights_records'], where_clause="WHERE repo_id = {}".format(repo_id)) to_model_columns = df.columns[0:len(self.metrics)+1] @@ -415,7 +291,7 @@ def classify_anomalies(df,metric): logging.info("error occurred while storing datapoint: {}\n".format(repr(e))) break - register_task_completion(self, entry_info, repo_id, "insights") + self.register_task_completion(entry_info, repo_id, "insights") def confidence_interval_insights(self, entry_info): """ Anomaly detection method based on confidence intervals @@ -423,7 +299,6 @@ def confidence_interval_insights(self, entry_info): # Update table of endpoints before we query them all logging.info("Discovering insights for task with entry info: {}".format(entry_info)) - record_model_process(self, repo_id, 'insights') # Set the endpoints we want to discover insights for endpoints = [{'cm_info': "issues-new"}, {'cm_info': "code-changes"}, {'cm_info': "code-changes-lines"}, @@ -445,10 +320,10 @@ def confidence_interval_insights(self, entry_info): # If we are discovering insights for a group vs repo, the base url will change if 'repo_group_id' in entry_info and 'repo_id' not in entry_info: base_url = 'http://{}:{}/api/unstable/repo-groups/{}/'.format( - self.config['broker_host'],self.config['broker_port'], entry_info['repo_group_id']) + self.config['api_host'],self.config['api_port'], entry_info['repo_group_id']) else: base_url = 'http://{}:{}/api/unstable/repo-groups/9999/repos/{}/'.format( - self.config['broker_host'],self.config['broker_port'], repo_id) + self.config['api_host'],self.config['api_port'], repo_id) # Hit and discover insights for every endpoint we care about for endpoint in endpoints: @@ -610,50 +485,6 @@ def is_unique_key(key): self.register_task_completion(entry_info, "insights") - def register_task_completion(self, entry_info, model): - # Task to send back to broker - task_completed = { - 'worker_id': self.config['id'], - 'job_type': entry_info['job_type'], - 'repo_id': repo_id, - 'git_url': entry_info['git_url'] - } - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.config['zombie_id'], - "timestamp": datetime.datetime.now(), - "status": "Success", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job completion for: " + str(task_completed) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where( - self.job_table.c.job_model==model).values(updated_job)) - logging.info("Update job process for model: " + model + "\n") - - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - - # Reset results counter for next task - self.results_counter = 0 - def send_insight(self, insight, units_from_mean): try: repoSQL = s.sql.text(""" @@ -821,9 +652,9 @@ def confidence_interval(self, data, timeperiod='week', confidence=.95): def update_metrics(self): logging.info("Preparing to update metrics ...\n\n" + "Hitting endpoint: http://{}:{}/api/unstable/metrics/status ...\n".format( - self.config['broker_host'],self.config['broker_port'])) + self.config['api_host'],self.config['api_port'])) r = requests.get(url='http://{}:{}/api/unstable/metrics/status'.format( - self.config['broker_host'],self.config['broker_port'])) + self.config['api_host'],self.config['api_port'])) data = r.json() active_metrics = [metric for metric in data if metric['backend_status'] == 'implemented'] diff --git a/workers/insight_worker/insight_worker/runtime.py b/workers/insight_worker/insight_worker/runtime.py deleted file mode 100644 --- a/workers/insight_worker/insight_worker/runtime.py +++ /dev/null @@ -1,110 +0,0 @@ -from flask import Flask, jsonify, request -from insight_worker.worker import InsightWorker -import click, os, json, logging, requests -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}\n".format(str(request.json))) - app.insight_worker.task = request.json - - #set task - return jsonify({"success": "sucess"}) - - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": app.insight_worker._queue, - "tasks": [{ - "given": list(app.insight_worker._queue) - }] - }) - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.insight_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51252, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'insight_worker', None, {}) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.insight_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "anomaly_days": worker_info['anomaly_days'] if 'anomaly_days' in worker_info else 2, - "training_days": worker_info['training_days'] if 'training_days' in worker_info else 365, - "confidence_interval": worker_info['confidence_interval'] if 'confidence_interval' in worker_info else .95, - "contamination": worker_info['contamination'] if 'contamination' in worker_info else 0.041, - 'metrics': worker_info['metrics'] if 'metrics' in worker_info else {"issues-new": "issues", - "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"} - } - - #create instance of the worker - app.insight_worker = InsightWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - print("Starting Flask App on host {} with port {} with pid: ".format(broker_host, worker_port) + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - print("Killing Flask App: {} and telling broker that this worker is disconnected.".format(str(os.getpid()))) - try: - logging.info("Sending disconnected message to broker... @ -> {} with info: {}\n".format('http://{}:{}/api/unstable/workers'.format( - config['broker_host'], config['broker_port']), config)) - requests.post('http://{}:{}/api/unstable/workers/remove'.format( - config['broker_host'], config['broker_port']), json=config) #hello message - except Exception as e: - logging.info("Ran into error: {}".format(e)) - logging.info("Broker's port is busy, worker will not be able to accept tasks, " - "please restart Augur if you want this worker to attempt connection again.") - diff --git a/workers/insight_worker/runtime.py b/workers/insight_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/insight_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.insight_worker.insight_worker import InsightWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = InsightWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/insight_worker/setup.py b/workers/insight_worker/setup.py --- a/workers/insight_worker/setup.py +++ b/workers/insight_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="insight_worker", - version="0.0.2", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -31,7 +31,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'insight_worker_start=insight_worker.runtime:main', + 'insight_worker_start=workers.insight_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/linux_badge_worker/__init__.py b/workers/linux_badge_worker/__init__.py new file mode 100644 diff --git a/workers/linux_badge_worker/linux_badge_worker.py b/workers/linux_badge_worker/linux_badge_worker.py new file mode 100644 --- /dev/null +++ b/workers/linux_badge_worker/linux_badge_worker.py @@ -0,0 +1,63 @@ +import os +from datetime import datetime +import logging +import requests +import json +from urllib.parse import quote +from multiprocessing import Process, Queue + +import pandas as pd +import sqlalchemy as s +from sqlalchemy.ext.automap import automap_base +from sqlalchemy import MetaData +from workers.worker_base import Worker + +class LinuxBadgeWorker(Worker): + """ Worker that collects repo badging data from CII + config: database credentials, broker information, and ID + """ + def __init__(self, config={}): + + worker_type = "linux_badge_worker" + + given = [['git_url']] + models = ['badges'] + + data_tables = ['repo_badging'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({"endpoint": "https://bestpractices.coreinfrastructure.org/projects.json?pq="}) + self.tool_source = 'Linux Badge Worker' + self.tool_version = '1.0.0' + self.data_source = 'CII Badging API' + + + def badges_model(self, entry_info, repo_id): + """ Data collection and storage method + Query the CII API and store the result in the DB for the badges model + """ + git_url = entry_info['given']['git_url'] + self.logger.info("Collecting data for {}".format(git_url)) + extension = quote(git_url[0:-4]) + + url = self.config['endpoint'] + extension + self.logger.info("Hitting CII endpoint: " + url + " ...") + data = requests.get(url=url).json() + + if data != []: + self.logger.info("Inserting badging data for " + git_url) + self.db.execute(self.repo_badging_table.insert()\ + .values(repo_id=repo_id, + data=data, + tool_source=self.tool_source, + tool_version=self.tool_version, + data_source=self.data_source)) + + self.results_counter += 1 + else: + self.logger.info("No CII data found for {}\n".format(git_url)) + + self.register_task_completion(entry_info, repo_id, "badges") diff --git a/workers/linux_badge_worker/linux_badge_worker/__init__.py b/workers/linux_badge_worker/linux_badge_worker/__init__.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""linux_badge_worker - Augur worker that collects CII badging data""" - -__tool_source__ = 'Linux Badge Worker' -__tool_version__ = '0.1.0' -__data_source__ = 'CII Badging API' - -__author__ = 'Augur Team <[email protected]>' -__all__ = [] - diff --git a/workers/linux_badge_worker/linux_badge_worker/runtime.py b/workers/linux_badge_worker/linux_badge_worker/runtime.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/runtime.py +++ /dev/null @@ -1,107 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from linux_badge_worker.worker import BadgeWorker -from workers.standard_methods import read_config - -def create_server(app): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.linux_badge_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.linux_badge_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51235, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'linux_badge_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.linux_badge_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json?pq=", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - app.linux_badge_worker = BadgeWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - - if app.linux_badge_worker._child is not None: - app.linux_badge_worker._child.terminate() - - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/linux_badge_worker/linux_badge_worker/worker.py b/workers/linux_badge_worker/linux_badge_worker/worker.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/worker.py +++ /dev/null @@ -1,240 +0,0 @@ -import os -from datetime import datetime -import logging -import requests -import json -from urllib.parse import quote -from multiprocessing import Process, Queue - -from linux_badge_worker import __data_source__, __tool_source__, __tool_version__ -import pandas as pd -import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData -from workers.standard_methods import register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class BadgeWorker: - """ Worker that collects repo badging data from CII - config: database credentials, broker information, and ID - """ - def __init__(self, config, task=None): - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.config = config - - self.db = None - self.repo_badging_table = None - - self._task = task - self._queue = Queue() - self._child = None - - self.history_id = None - self.finishing_task = False - self.working_on = None - self.results_counter = 0 - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["badges"] - } - ], - "config": [self.config] - } - - self._db_str = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], - self.config['password'], - self.config['host'], - self.config['port'], - self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self._db_str, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self._db_str, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - logging.info("Database connection established...") - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['repo_badging']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - self.repo_badging_table = Base.classes.repo_badging.__table__ - logging.info("ORM setup complete...") - - # Organize different api keys/oauths available - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauth_sql = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(0)) - - for oauth in [{'oauth_id': 0, 'access_token': 0}] + json.loads(pd.read_sql(oauth_sql, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) \ - - datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - - # Send broker hello message - connect_to_broker(self) - logging.info("Connected to the broker...\n") - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "key": "", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['github_api_key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - - self._task = value - self.run() - - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def badges_model(self, entry_info, repo_id): - """ Data collection and storage method - Query the CII API and store the result in the DB for the badges model - """ - git_url = entry_info['given']['git_url'] - logging.info("Collecting data for {}".format(git_url)) - extension = quote(git_url[0:-4]) - - url = self.config['endpoint'] + extension - logging.info("Hitting CII endpoint: " + url + " ...") - data = requests.get(url=url).json() - - if data != []: - logging.info("Inserting badging data for " + git_url) - self.db.execute(self.repo_badging_table.insert()\ - .values(repo_id=repo_id, - data=data, - tool_source=__tool_source__, - tool_version=__tool_version__, - data_source=__data_source__)) - - self.results_counter += 1 - else: - logging.info("No CII data found for {}\n".format(git_url)) - - register_task_completion(self, entry_info, repo_id, "badges") - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'badges': - self.badges_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() diff --git a/workers/linux_badge_worker/runtime.py b/workers/linux_badge_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/linux_badge_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.linux_badge_worker.linux_badge_worker import LinuxBadgeWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = LinuxBadgeWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/linux_badge_worker/setup.py b/workers/linux_badge_worker/setup.py --- a/workers/linux_badge_worker/setup.py +++ b/workers/linux_badge_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="linux_badge_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'linux_badge_worker_start=linux_badge_worker.runtime:main', + 'linux_badge_worker_start=workers.linux_badge_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/metric_status_worker/metric_status_worker/__init__.py b/workers/metric_status_worker/metric_status_worker/__init__.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.1.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/metric_status_worker/metric_status_worker/runtime.py b/workers/metric_status_worker/metric_status_worker/runtime.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/runtime.py +++ /dev/null @@ -1,108 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, logging, requests, json -from metric_status_worker.worker import MetricStatusWorker -import os -import json -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.metric_status_worker.task = request.json - - #set task - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "success" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.metric_status_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51263, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'metric_status_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.metric_status_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - app.metric_status_worker = MetricStatusWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=port) - if app.metric_status_worker._child is not None: - app.metric_status_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/metric_status_worker/metric_status_worker/worker.py b/workers/metric_status_worker/metric_status_worker/worker.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/worker.py +++ /dev/null @@ -1,719 +0,0 @@ -import base64 -import logging -import os -import re -import sys -import json -import time -from abc import ABC -from datetime import datetime -from multiprocessing import Process, Queue -from urllib.parse import urlparse - -import pandas as pd -import requests -import sqlalchemy as s -from github import Github -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - - -class MetricStatusWorker: - def __init__(self, config, task=None): - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.db = None - self.table = None - self.API_KEY = self.config['key'] - self.tool_source = 'Metric Status Worker' - self.tool_version = '0.0.1' - self.data_source = 'GitHub API' - self.results_counter = 0 - self.working_on = None - - - # url = 'https://api.github.com' - # response = requests.get(url, headers=self.headers) - # self.rate_limit = int(response.headers['X-RateLimit-Remaining']) - - specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["chaoss_metric_status"] - } - ], - "config": [self.config] - } - - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - logging.info("Making database connections...") - - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['chaoss_metric_status']) - # helper_metadata.reflect(self.helper_db) - - Base = automap_base(metadata=metadata) - - Base.prepare() - - self.chaoss_metric_status_table = Base.classes['chaoss_metric_status'].__table__ - - try: - requests.post('http://{}:{}/api/unstable/workers'.format( - self.config['broker_host'],self.config['broker_port']), json=specs) - except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker') - sys.exit('Cannot connect to the broker! Quitting...') - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced """ - return self._task - - @task.setter - def task(self, value): - try: - if value['job_type'] == 'UPDATE': - self._queue.put(CollectorTask('TASK', {})) - elif value['job_type'] == 'MAINTAIN': - self._maintain_queue.put(CollectorTask('TASK', {})) - - if 'focused_task' in value: - if value['focused_task'] == 1: - self.finishing_task = True - except Exception as e: - logging.error("Error: {},".format(str(e))) - - self._task = CollectorTask(message_type='TASK', entry_info={}) - self.run() - - def cancel(self): - """ Delete/cancel current task """ - self._task = None - - def run(self): - logging.info("Running...") - if self._child is None: - self._child = Process(target=self.collect, args=()) - self._child.start() - requests.post("http://{}:{}/api/unstable/add_pids".format( - self.config['broker_host'],self.config['broker_port']), json={'pids': [self._child.pid, os.getpid()]}) - - def collect(self): - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = 'UPDATE' - elif not self._maintain_queue.empty(): - message = self._maintain_queue.get() - logging.info("Popped off message: {}".format(str(message.entry_info))) - self.working_on = "MAINTAIN" - else: - break - - - if message.type == 'EXIT': - break - if message.type != 'TASK': - raise ValueError( - f'{message.type} is not a recognized task type') - - if message.type == 'TASK': - self.update_metrics(message.entry_info) - - def update_metrics(self, entry_info): - """ Data colletction function - Query the github api for metric status - """ - status = MetricsStatus(self.API_KEY) - status.create_metrics_status() - metrics = status.metrics_status - - # convert to dict - dict_metrics = [] - for metric in metrics: - metric_info = { - 'cm_group': metric['group'], - 'cm_source': metric['data_source'], - 'cm_type': metric['metric_type'], - 'cm_backend_status': metric['backend_status'], - 'cm_frontend_status': metric['frontend_status'], - 'cm_api_endpoint_repo': metric['endpoint_repo'], - 'cm_api_endpoint_rg': metric['endpoint_group'], - 'cm_defined': metric['is_defined'], - 'cm_name': metric['display_name'], - 'cm_working_group': metric['group'], - 'cm_info': metric['tag'], - 'cm_working_group_focus_area': metric['focus_area'], - 'tool_source': self.tool_source, - 'tool_version': self.tool_version, - 'data_source': self.data_source, - } - dict_metrics.append(metric_info) - - need_insertion = self.filter_duplicates({'cm_api_endpoint_repo': "cm_api_endpoint_repo", 'cm_backend_status':'cm_api_endpoint_rg'}, ['chaoss_metric_status'], - dict_metrics) - logging.info("Count of contributors needing insertion: " + str(len(need_insertion)) + "\n") - for metric in need_insertion: - result = self.db.execute(self.chaoss_metric_status_table.insert().values(metric)) - logging.info("Primary key inserted into the metrics table: " + str(result.inserted_primary_key)) - self.results_counter += 1 - - self.register_task_completion() - - - # def filter_duplicates(self, og_data): - # need_insertion = [] - # colSQL = s.sql.text(""" - # SELECT * FROM chaoss_metric_status - # """) - # values = pd.read_sql(colSQL, self.db) - # for obj in og_data: - # location = values.loc[ (values['cm_name']==obj['cm_name'] ) & ( values['cm_working_group']==obj[ - # 'cm_working_group']) & ()] - # if not location.empty: - # logging.info("value of tuple exists: " + str(obj['cm_name'])) - # else: - # need_insertion.append(obj) - # - # logging.info("While filtering duplicates, we reduced the data size from " + str(len(og_data)) + - # " to " + str(len(need_insertion)) + "\n") - # - # return need_insertion - - def filter_duplicates(self, cols, tables, og_data): - need_insertion = [] - - table_str = tables[0] - del tables[0] - for table in tables: - table_str += ", " + table - for col in cols.keys(): - colSQL = s.sql.text(""" - SELECT {} FROM {} - """.format(col, table_str)) - values = pd.read_sql(colSQL, self.db, params={}) - - for obj in og_data: - if values.isin([obj[cols[col]]]).any().any(): - logging.info("value of tuple exists: " + str(obj[cols[col]]) + "\n") - elif obj not in need_insertion: - need_insertion.append(obj) - logging.info("While filtering duplicates, we reduced the data size from " + str(len(og_data)) + - " to " + str(len(need_insertion)) + "\n") - return need_insertion - - def update_exist_metrics(self, metrics): - need_update = [] - need_insert = [] - - for metric in metrics: - result = self.db.execute(self.chaoss_metric_status_table.update().where((self.chaoss_metric_status_table.c.cm_name == metric['cm_name'])&(self.chaoss_metric_status_table.c.cm_group == metric['cm_group']) & ((self.chaoss_metric_status_table.c.cm_api_endpoint_repo != metric['cm_api_endpoint_repo']) | (self.chaoss_metric_status_table.c.cm_api_endpoint_rg != metric['cm_api_endpoint_rg'])|(self.chaoss_metric_status_table.c.cm_source != metric['cm_source'])) - ).values(metric)) - - if result.rowcount: - logging.info("Update Metric {}-{}".format(metric['cm_group'], metric['cm_name'])) - - def register_task_completion(self): - task_completed = { - 'worker_id': self.config['id'], - 'job_type': self.working_on, - } - - logging.info("Telling broker we completed task: " + str(task_completed) + "\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - self.results_counter = 0 - - - - -class FrontendStatusExtractor(object): - - def __init__(self): - pass - self.api_text = open(os.path.abspath(os.path.dirname(os.path.dirname(os.getcwd()))) + - "/frontend/src/AugurAPI.ts", 'r').read() - self.attributes = re.findall( - r'(?:(GitEndpoint|Endpoint|Timeseries|addRepoMetric|addRepoGroupMetric)\()\'(.*)\', \'(.*)\'', - self.api_text) - self.timeseries = [ - attribute for attribute in self.attributes if attribute[0] == "Timeseries"] - self.endpoints = [ - attribute for attribute in self.attributes if attribute[0] == "Endpoint"] - self.git_endpoints = [ - attribute for attribute in self.attributes if attribute[0] == "GitEndpoint"] - self.repo_metrics = [ - attribute for attribute in self.attributes if attribute[0] == 'addRepoMetric'] - self.group_metric = [ - attribute for attribute in self.attributes if attribute[0] == 'addRepoMetric'] - - def determine_frontend_status(self, metric): - metric.frontend_status = 'unimplemented' - attribute = None - - if metric.metric_type == "timeseries": - attribute = next((attribute for attribute in self.timeseries if - "/api/unstable/<owner>/<repo>/timeseries/{}".format(attribute[2]) == metric.endpoint_repo), - None) - - elif metric.metric_type == "metric": - attribute = next((attribute for attribute in self.endpoints if - "/api/unstable/<owner>/<repo>/{}".format(attribute[2]) == metric.endpoint_repo), None) - if not attribute: - attribute = next((attribute for attribute in self.repo_metrics if - "/api/unstable/repo-groups/<repo_group_id>/repos/<repo_id>/{}".format( - attribute[2]) == metric.endpoint_repo), None) - if not attribute and metric.endpoint_group: - attribute = next((attribute for attribute in self.repo_metrics if - "/api/unstable/repo-groups/<repo_group_id>/{}".format(attribute[2]) == metric.endpoint_group), None) - - elif metric.metric_type == "git": - attribute = next((attribute for attribute in self.git_endpoints if - "/api/unstable/git/{}".format(attribute[2]) == metric.endpoint_repo), None) - - if attribute is not None: - metric.frontend_status = 'implemented' - metric.chart_mapping = attribute[1] - else: - metric.frontend_status = 'unimplemented' - - -class Metric(ABC): - - def __init__(self): - self.ID = None - self.tag = None - self.display_name = None - self.group = None - self.backend_status = 'unimplemented' - self.frontend_status = 'unimplemented' - self.chart_mapping = None - self.data_source = None - self.metric_type = None - self.documentation_url = None - self.is_defined = False - self.focus_area = None - self.endpoint_group = None - self.endpoint_repo = None - - -class GroupedMetric(Metric): - - def __init__(self, display_name, group, tag, focus_area): - Metric.__init__(self) - self.display_name = display_name - self.tag = tag - self.ID = re.sub(r'-$|\*', '', 'none' + '-' + self.tag) - self.group = group - self.focus_area = focus_area - - -class ImplementedMetric(Metric): - - def __init__(self, metadata, frontend_status_extractor): - Metric.__init__(self) - - self.ID = metadata['ID'] - self.tag = metadata['tag'] - self.display_name = metadata['metric_name'] - self.backend_status = 'implemented' - self.data_source = metadata['source'] - self.group = "experimental" - self.endpoint_group = None - self.endpoint_repo = None - - - if 'metric_type' in metadata: - self.metric_type = metadata['metric_type'] - else: - self.metric_type = 'metric' - - if 'endpoint' in metadata: - if 'group_endpoint' in metadata: - self.endpoint_group = metadata['group_endpoint'] - if 'repo_endpoint' in metadata: - self.endpoint_repo = metadata['repo_endpoint'] - else: - self.endpoint_repo = metadata['endpoint'] - frontend_status_extractor.determine_frontend_status(self) - - -class MetricsStatus(object): - wg_evolution = { - "repo": "chaoss/wg-evolution", - "focus_area": "focus_areas", - "name": 'evolution' - } - - wg_diversity_inclusion = { - "repo": "chaoss/wg-diversity-inclusion", - "focus_area": "focus-areas", - "name": "diversity-inclusion" - } - - wg_value = { - "repo": "chaoss/wg-value", - "focus_area": 'focus-areas', - "name": "value" - } - - wg_common = { - "repo": "chaoss/wg-common", - "focus_area": "focus-areas", - "name": "common" - } - - wg_risk = { - "repo": "chaoss/wg-risk", - "focus_area": "focus-areas", - "name": "risk" - } - - def __init__(self, githubapi): - self.__githubapi = githubapi - self.github = Github(self.__githubapi) - - # TODO: don't hardcode this - self.groups = { - "evolution": "Evolution", - "diversity-inclusion": "Diversity and Inclusion metrics", - "value": "Value", - "risk": "Risk", - "common": "Common", - "experimental": "Experimental", - "all": "All" - } - - self.implemented_metrics = [] - - self.evo_metrics = [] - self.di_metrics = [] - self.risk_metrics = [] - self.value_metrics = [] - self.common_metrics = [] - self.experimental_metrics = [] - - self.metrics_by_group = [] - - self.metrics_status = [] - - self.data_sources = [] - self.metric_types = [] - self.tags = {} - self.metadata = [] - - def create_metrics_status(self): - - self.build_implemented_metrics() - - self.evo_metrics = self.create_grouped_metrics( - self.wg_evolution, "evolution") - self.risk_metrics = self.create_grouped_metrics(self.wg_risk, "risk") - self.common_metrics = self.create_grouped_metrics( - self.wg_common, 'common') - self.di_metrics = self.create_grouped_metrics( - self.wg_diversity_inclusion, 'diversity-inclusion') - self.value_metrics = self.create_grouped_metrics( - self.wg_value, 'value') - - self.metrics_by_group = [self.evo_metrics, self.risk_metrics, - self.common_metrics, self.di_metrics, self.value_metrics] - - self.create_experimental_metrics() - self.metrics_by_group.append(self.experimental_metrics) - # - self.copy_implemented_metrics() - - self.find_defined_metrics() - - self.build_metrics_status() - - # self.build_metadata() - - def build_implemented_metrics(self): - frontend_status_extractor = FrontendStatusExtractor() - - r = requests.get( - url='http://{}:{}/api/unstable/batch/metadata'.format( - self.config['broker_host'],self.config['broker_port'])) - data = json.loads(r.text) - - for metric in data: - if "ID" in metric.keys(): - self.implemented_metrics.append( - ImplementedMetric(metric, frontend_status_extractor)) - - def create_grouped_metrics(self, group, group_name): - metrics = self.find_metrics_from_focus_area( - group['repo'], group['focus_area']) - - remote_metrics = [] - for metric in metrics: - remote_metrics.append(GroupedMetric(metric.display_name, group['name'], metric.tag, - metric.focus_area)) - - return remote_metrics - - def find_metrics_from_focus_area(self, repo_name, focus_area_path): - focus_areas = self.github.get_repo( - repo_name).get_dir_contents(focus_area_path) - metrics = [] - for area in focus_areas: - # get focus area name from filename - # focus_area_name = re.sub('.md','',re.sub('-', ' ',area.name)) - focus_area_name = None - focus_area_name_splited = [a.capitalize() for a in re.sub( - '.md', '', re.sub('[_]|[-]', ' ', area.name)).split()] - focus_area_name = ' '.join(focus_area_name_splited) - - # extract structure :focus_area_name/readme.md - if area.type == 'dir': - tmp = self.github.get_repo( - repo_name).get_dir_contents(area.path) - readme = [a for a in tmp if 'readme' in a.name.lower()] - if len(readme) == 0: - continue - else: - area = readme[0] - elif 'readme' in area.name.lower() or 'changelog' in area.name.lower(): - continue - - # decode content; github api return encoded content - decoded_content = base64.b64decode(area.content).decode('utf-8') - metric_name_tag = self.parse_table( - decoded_content) or self.parse_list(decoded_content) - - for name, tag in metric_name_tag.items(): - add_metric = Metric() - add_metric.display_name = name - add_metric.tag = tag - add_metric.focus_area = focus_area_name - - metrics.append(add_metric) - - if metric_name_tag is None: - continue - - return metrics - - def parse_table(self, md_content): - # group 0 is header, group 2 is |---|--|, and group 3 is table content - tables = re.findall( - r'^(\|?[^\n]+\|[^\n]+\|?\r?\n)((?:\|?\s*:?[-]+\s*:?)+\|?)(\n(?:\|?[^\n]+\|[^\n]+\|?\r?\n?)*)?$', md_content, - re.MULTILINE) - - if not tables: - return None - - box = [] - metrics_name_tag = {} - for table in tables: - # get metric name by 'metric_name' index in column - metric_index, length_in_row = self.get_metric_index_in_table_row( - table[0]) - table_content = [x.strip() - for x in table[2].replace('\n', '|').split('|')] - # remove two empty str - table_content.pop(0) - table_content.pop() - - raw_metrics = [table_content[a] for a in range( - metric_index, len(table_content), length_in_row)] - - for raw_metric in raw_metrics: - metric_name, metric_link = self.is_has_link( - raw_metric, md_content) - metric_name = re.sub('[\[]|[\]]', '', metric_name) - if not metric_link: - metric_link = re.sub(' ', '-', metric_name).lower() - metrics_name_tag[metric_name] = self.link_to_tag( - metric_name, str(metric_link)) - - return metrics_name_tag - - def get_metric_index_in_table_row(self, row): - header_names = [x.strip().lower() for x in row.split('|')] - # print(header_names) - index = None - if 'metric' in header_names: - index = header_names.index('metric') - elif 'name' in header_names: - index = header_names.index('name') - - return index, len(header_names) - - def parse_list(self, md_content): - matched_lists = re.findall(r'[-]\s+(.+)\n', md_content) - metric_names = {} - # print(matched_lists) - for matched in matched_lists: - # print(matched) - metirc_name = re.sub(r'.+:\s', '', matched) - metirc_name, metric_link = self.is_has_link( - metirc_name, md_content) - metirc_name = re.sub('[\[]|[\]]', '', metirc_name) - metric_names[metirc_name] = self.link_to_tag( - metirc_name, metric_link) - return metric_names - - def is_has_link(self, s, md_content): - # remove leading whitespace if exist - s = s.strip() - pattern_inline = re.compile(r'\[([^\[\]]+)\]\(([^)]+)') - match = pattern_inline.match(s) - - if match: - return match.group(1), match.group(2) - - pattern_ref = re.compile(r'\[([^\[\]]+)\]\[([^]]+)') - match2 = pattern_ref.match(s) - - if match2: - link = match2.group(2) - p = re.compile(r'\n\[' + link + r'\]:\s+(.+)\n') - res = p.search(md_content, re.DOTALL) - if res: - return match2.group(1), res.group(1) - else: - return s, None - - def link_to_tag(self, name, s): - - # generate tag if undefined metric - if not s: - return re.sub(' ', '-', name.lower()) - - pattern = re.compile(r'\/?([a-zA-Z_-]+)(\.md)?$') - m = pattern.search(s) - if m: - return re.sub('_', '-', re.sub('.md', '', m.group(1).lower())) - else: - return re.sub(' ', '-', re.sub('\(s\)', 's', name)) - - def create_experimental_metrics(self): - tags = [] - for group in self.metrics_by_group: - for metric in group: - tags.append(metric.tag) - - self.experimental_metrics = [ - metric for metric in self.implemented_metrics if metric.tag not in tags] - - def copy_implemented_metrics(self): - # takes implemented metrics and copies their data to the appropriate metric object - # I am so very sorry - # TODO: burn this into the ground - for group in enumerate(self.metrics_by_group): - if group[1] is not self.experimental_metrics: - for grouped_metric in group[1]: - defined_implemented_metrics = [ - metric for metric in self.implemented_metrics if grouped_metric.tag == metric.tag] - if defined_implemented_metrics != []: - for metric in defined_implemented_metrics: - metric.group = group[1][0].group - metric.focus_area = grouped_metric.focus_area - group[1].append(metric) - self.implemented_metrics.remove(metric) - grouped_metric.ID = 'n/a' - self.metrics_by_group[group[0]] = [ - metric for metric in group[1] if metric.ID != 'n/a'] - - def find_defined_metrics(self): - # return map {tag: html_url} - repo_names = [self.wg_common['repo'], self.wg_evolution['repo'], - self.wg_diversity_inclusion['repo'], self.wg_risk['repo'], self.wg_value['repo']] - - md_files = {} - - for repo_name in repo_names: - repo = self.github.get_repo(repo_name) - contents = repo.get_contents("") - - while len(contents) > 1: - file_content = contents.pop(0) - if file_content.type == "dir": - contents.extend(repo.get_contents(file_content.path)) - elif '.md' in file_content.name: - name = re.sub( - '_', '-', re.sub('.md', '', file_content.name)) - md_files[name.lower()] = file_content.html_url - - for group in self.metrics_by_group: - for metric in group: - if metric.tag in md_files.keys(): - metric.is_defined = True - metric.documentation_url = md_files[metric.tag] - - def build_metrics_status(self): - for group in self.metrics_by_group: - for metric in group: - self.metrics_status.append(metric.__dict__) - - def build_metadata(self): - self.get_metric_sources() - self.get_metric_types() - self.get_metric_tags() - - self.metadata = { - "remotes": { - "diversity_inclusion_urls": self.diversity_inclusion_urls, - "growth_maturity_decline_urls": self.growth_maturity_decline_urls, - "risk_urls": self.risk_urls, - "value_urls": self.value_urls, - "activity_repo_urls": self.activity_urls - }, - "groups": self.groups, - "data_sources": self.data_sources, - "metric_types": self.metric_types, - "tags": self.tags - } - - def get_metric_sources(self): - for data_source in [metric['data_source'] for metric in self.metrics_status]: - data_source = data_source.lower() - if data_source not in self.data_sources and data_source != "none": - self.data_sources.append(data_source) - self.data_sources.append("all") - - def get_metric_types(self): - for metric_type in [metric['metric_type'] for metric in self.metrics_status]: - metric_type = metric_type.lower() - if metric_type not in self.metric_types and metric_type != "none": - self.metric_types.append(metric_type) - self.metric_types.append("all") - - def get_metric_tags(self): - for tag in [(metric['tag'], metric['group']) for metric in self.metrics_status]: - # tag[0] = tag[0].lower() - if tag[0] not in [tag[0] for tag in self.tags] and tag[0] != "none": - self.tags[tag[0]] = tag[1] \ No newline at end of file diff --git a/workers/pull_request_worker/pull_request_worker/__init__.py b/workers/pull_request_worker/__init__.py similarity index 100% rename from workers/pull_request_worker/pull_request_worker/__init__.py rename to workers/pull_request_worker/__init__.py diff --git a/workers/pull_request_worker/pull_request_worker/worker.py b/workers/pull_request_worker/pull_request_worker.py similarity index 61% rename from workers/pull_request_worker/pull_request_worker/worker.py rename to workers/pull_request_worker/pull_request_worker.py --- a/workers/pull_request_worker/pull_request_worker/worker.py +++ b/workers/pull_request_worker/pull_request_worker.py @@ -1,225 +1,42 @@ import ast, json, logging, os, sys, time, traceback, requests from datetime import datetime from multiprocessing import Process, Queue -from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base -from workers.standard_methods import * from sqlalchemy.sql.expression import bindparam +from workers.worker_base import Worker -class GHPullRequestWorker: +class GitHubPullRequestWorker(Worker): """ Worker that collects Pull Request related data from the Github API and stores it in our database. :param task: most recent task the broker added to the worker's queue :param config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.working_on = None - self.config = config - LOG_FORMAT = '%(levelname)s:[%(name)s]: %(message)s' - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO, format=LOG_FORMAT) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self.db = None - self.table = None - self.API_KEY = self.config['key'] - self.tool_source = 'GitHub Pull Request Worker' - self.tool_version = '0.0.1' # See __init__.py - self.data_source = 'GitHub API' - self.results_counter = 0 - self.headers = {'Authorization': f'token {self.API_KEY}'} - self.history_id = None - self.finishing_task = True - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [['github_url']], - "models":['pull_requests', 'pull_request_commits', 'pull_request_files'] - } - ], - "config": [self.config] - } + def __init__(self, config={}): - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], - self.config['port'], self.config['database'] - ) + worker_type = "pull_request_worker" - #Database connections - logging.info("Making database connections...\n") - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) + # Define what this worker can be given and know how to interpret + given = [['github_url']] + models = ['pull_requests', 'pull_request_commits', 'pull_request_files'] - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['contributors', 'pull_requests', + # Define the tables needed to insert, update, or delete on + data_tables = ['contributors', 'pull_requests', 'pull_request_assignees', 'pull_request_events', 'pull_request_labels', 'pull_request_message_ref', 'pull_request_meta', 'pull_request_repo', 'pull_request_reviewers', 'pull_request_teams', 'message', 'pull_request_commits', - 'pull_request_files']) - - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) + 'pull_request_files'] + operations_tables = ['worker_history', 'worker_job'] - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.contributors_table = Base.classes.contributors.__table__ - self.pull_requests_table = Base.classes.pull_requests.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.pull_request_labels_table = Base.classes.pull_request_labels.__table__ - self.pull_request_message_ref_table = Base.classes.pull_request_message_ref.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_teams_table = Base.classes.pull_request_teams.__table__ - self.message_table = Base.classes.message.__table__ - self.pull_request_commits_table = Base.classes.pull_request_commits.__table__ - self.pull_request_files_table = Base.classes.pull_request_files.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - logging.info("Querying starting ids info...\n") - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - self.pr_id_inc = get_max_id(self, 'pull_requests', 'pull_request_id') - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - self.msg_id_inc = get_max_id(self, 'message', 'msg_id') - self.pr_msg_ref_id_inc = get_max_id(self, 'pull_request_message_ref', 'pr_msg_ref_id') - self.label_id_inc = get_max_id(self, 'pull_request_labels', 'pr_label_id') - self.event_id_inc = get_max_id(self, 'pull_request_events', 'pr_event_id') - self.reviewer_id_inc = get_max_id(self, 'pull_request_reviewers', 'pr_reviewer_map_id') - self.assignee_id_inc = get_max_id(self, 'pull_request_assignees', 'pr_assignee_map_id') - self.pr_meta_id_inc = get_max_id(self, 'pull_request_meta', 'pr_repo_meta_id') - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - # self.pull_requests_graphql({ - # 'job_type': 'MAINTAIN', - # 'models': ['pull_request_files'], - # 'display_name': 'pull_request_files model for url: https://github.com/zephyrproject-rtos/actions_sandbox.git', - # 'given': { - # 'github_url': 'https://github.com/zephyrproject-rtos/actions_sandbox.git' - # } - # }, 25201) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - github_url = value['given']['github_url'] - - repo_url_SQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(github_url)) - rs = pd.read_sql(repo_url_SQL, self.db, params={}) - - try: - repo_id = int(rs.iloc[0]['repo_id']) - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - if 'focused_task' in value: - if value['focused_task'] == 1: - self.finishing_task = True - - except Exception as e: - logging.error(f"error: {e}, or that repo is not in our database: {value}\n") - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query all repos with repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['github_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'pull_requests': - self.pull_requests_model(message, repo_id) - elif message['models'][0] == 'pull_request_commits': - self.pull_request_commits_model(message, repo_id) - elif message['models'][0] == 'pull_request_files': - self.pull_requests_graphql(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + # Define data collection info + self.tool_source = 'GitHub Pull Request Worker' + self.tool_version = '1.0.0' + self.data_source = 'GitHub API' + def graphql_paginate(self, query, data_subjects, before_parameters=None): """ Paginate a GitHub GraphQL query backwards @@ -227,7 +44,7 @@ def graphql_paginate(self, query, data_subjects, before_parameters=None): :rtype: A Pandas DataFrame, contains all data contained in the pages """ - logging.info(f'Start paginate with params: \n{data_subjects} ' + self.logger.info(f'Start paginate with params: \n{data_subjects} ' f'\n{before_parameters}') def all_items(dictionary): @@ -262,7 +79,7 @@ def find_root_of_subject(data, key_subject): for data_subject, nest in data_subjects.items(): - logging.info(f'Beginning paginate process for field {data_subject} ' + self.logger.info(f'Beginning paginate process for field {data_subject} ' f'for query: {query}') page_count = 0 @@ -274,13 +91,13 @@ def find_root_of_subject(data, key_subject): success = False for attempt in range(num_attempts): - logging.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint ' + self.logger.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint ' f'page number {page_count}\n') response = requests.post(base_url, json={'query': query.format( **before_parameters)}, headers=self.headers) - update_gh_rate_limit(self, response) + self.update_gh_rate_limit(response) try: data = response.json() @@ -288,9 +105,9 @@ def find_root_of_subject(data, key_subject): data = json.loads(json.dumps(response.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) + self.logger.info("Error!: {}".format(data['errors'])) if data['errors'][0]['type'] == 'RATE_LIMITED': - update_gh_rate_limit(self, response) + self.update_gh_rate_limit(response) num_attempts -= 1 continue @@ -302,18 +119,18 @@ def find_root_of_subject(data, key_subject): data = root['edges'] break else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url)) break if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': num_attempts -= 1 - update_gh_rate_limit(self, response, temporarily_disable=True) + self.update_gh_rate_limit(response, temporarily_disable=True) if data['message'] == 'Bad credentials': - update_gh_rate_limit(self, response, bad_credentials=True) + self.update_gh_rate_limit(response, bad_credentials=True) if not success: - logging.info('GraphQL query failed: {}'.format(query)) + self.logger.info('GraphQL query failed: {}'.format(query)) continue before_parameters.update({ @@ -323,7 +140,7 @@ def find_root_of_subject(data, key_subject): tuples += data - logging.info(f'Paged through {page_count} pages and ' + self.logger.info(f'Paged through {page_count} pages and ' f'collected {len(tuples)} data points\n') if not nest: @@ -333,9 +150,9 @@ def find_root_of_subject(data, key_subject): before_parameters=before_parameters) - def pull_requests_graphql(self, task_info, repo_id): + def pull_request_files_model(self, task_info, repo_id): - owner, repo = get_owner_repo(task_info['given']['github_url']) + owner, repo = self.get_owner_repo(task_info['given']['github_url']) # query existing PRs and the respective url we will append the commits url to pr_number_sql = s.sql.text(""" @@ -349,7 +166,7 @@ def pull_requests_graphql(self, task_info, repo_id): for index, pull_request in enumerate(pr_numbers.itertuples()): - logging.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}') + self.logger.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}') query = """ {{ @@ -394,26 +211,24 @@ def pull_requests_graphql(self, task_info, repo_id): WHERE pull_request_files.pull_request_id = pull_requests.pull_request_id AND repo_id = :repo_id """) - logging.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n') + self.logger.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n') table_values = pd.read_sql(table_values_sql, self.db, params={'repo_id': repo_id}) # Compare queried values against table values for dupes/updates if len(pr_file_rows) > 0: table_columns = pr_file_rows[0].keys() else: - logging.info(f'No rows need insertion for repo {repo_id}\n') - register_task_completion(self, task_info, repo_id, 'pull_request_files') + self.logger.info(f'No rows need insertion for repo {repo_id}\n') + self.register_task_completion(task_info, repo_id, 'pull_request_files') + return # Compare queried values against table values for dupes/updates pr_file_rows_df = pd.DataFrame(pr_file_rows) pr_file_rows_df = pr_file_rows_df.dropna(subset=['pull_request_id']) - pr_file_rows_df['need_update'] = 0 dupe_columns = ['pull_request_id', 'pr_file_path'] update_columns = ['pr_file_additions', 'pr_file_deletions'] - logging.info(f'{pr_file_rows_df}') - logging.info(f'{table_values}') need_insertion = pr_file_rows_df.merge(table_values, suffixes=('','_table'), how='outer', indicator=True, on=dupe_columns).loc[ lambda x : x['_merge']=='left_only'][table_columns] @@ -430,7 +245,7 @@ def pull_requests_graphql(self, task_info, repo_id): pr_file_insert_rows = need_insertion.to_dict('records') pr_file_update_rows = need_updates.to_dict('records') - logging.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and ' + self.logger.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and ' f'{len(need_updates)} updates.\n') if len(pr_file_update_rows) > 0: @@ -447,7 +262,7 @@ def pull_requests_graphql(self, task_info, repo_id): ) success = True except Exception as e: - logging.info('error: {}'.format(e)) + self.logger.info('error: {}'.format(e)) time.sleep(5) if len(pr_file_insert_rows) > 0: @@ -460,14 +275,22 @@ def pull_requests_graphql(self, task_info, repo_id): ) success = True except Exception as e: - logging.info('error: {}'.format(e)) + self.logger.info('error: {}'.format(e)) time.sleep(5) - register_task_completion(self, task_info, repo_id, 'pull_request_files') + self.register_task_completion(task_info, repo_id, 'pull_request_files') def pull_request_commits_model(self, task_info, repo_id): """ Queries the commits related to each pull request already inserted in the db """ + self.logger.info("Querying starting ids info...\n") + + # Increment so we are ready to insert the 'next one' of each of these most recent ids + self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 + self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id') + self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id') + + # query existing PRs and the respective url we will append the commits url to pr_url_sql = s.sql.text(""" SELECT DISTINCT pr_url, pull_requests.pull_request_id @@ -484,7 +307,7 @@ def pull_request_commits_model(self, task_info, repo_id): update_col_map = {} # Use helper paginate function to iterate the commits url and check for dupes - pr_commits = paginate(self, commits_url, duplicate_col_map, update_col_map, table, table_pkey, + pr_commits = self.paginate(commits_url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="where pull_request_id = {}".format(pull_request.pull_request_id)) for pr_commit in pr_commits: # post-pagination, iterate results @@ -500,9 +323,9 @@ def pull_request_commits_model(self, task_info, repo_id): 'data_source': 'GitHub API', } result = self.db.execute(self.pull_request_commits_table.insert().values(pr_commit_row)) - logging.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n") + self.logger.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n") - register_task_completion(self, task_info, repo_id, 'pull_request_commits') + self.register_task_completion(task_info, repo_id, 'pull_request_commits') def pull_requests_model(self, entry_info, repo_id): """Pull Request data collection function. Query GitHub API for PhubRs. @@ -510,11 +333,18 @@ def pull_requests_model(self, entry_info, repo_id): :param entry_info: A dictionary consisiting of 'git_url' and 'repo_id' :type entry_info: dict """ + + self.logger.info("Querying starting ids info...\n") + + # Increment so we are ready to insert the 'next one' of each of these most recent ids + self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 + self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id') + self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id') + github_url = entry_info['given']['github_url'] - logging.info('Beginning collection of Pull Requests...\n') - logging.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n') - record_model_process(self, repo_id, 'pull_requests') + self.logger.info('Beginning collection of Pull Requests...\n') + self.logger.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n') owner, repo = self.get_owner_repo(github_url) @@ -530,12 +360,12 @@ def pull_requests_model(self, entry_info, repo_id): duplicate_col_map = {'pr_src_id': 'id'} #list to hold pull requests needing insertion - prs = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, + prs = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey, where_clause='WHERE repo_id = {}'.format(repo_id), value_update_col_map={'pr_augur_contributor_id': float('nan')}) # Discover and remove duplicates before we start inserting - logging.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n") + self.logger.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n") for pr_dict in prs: @@ -553,7 +383,7 @@ def pull_requests_model(self, entry_info, repo_id): 'pr_src_state': pr_dict['state'], 'pr_src_locked': pr_dict['locked'], 'pr_src_title': pr_dict['title'], - 'pr_augur_contributor_id': find_id_from_login(self, pr_dict['user']['login']), + 'pr_augur_contributor_id': self.find_id_from_login(pr_dict['user']['login']), 'pr_body': pr_dict['body'], 'pr_created_at': pr_dict['created_at'], 'pr_updated_at': pr_dict['updated_at'], @@ -581,21 +411,21 @@ def pull_requests_model(self, entry_info, repo_id): } if pr_dict['flag'] == 'need_insertion': - logging.info(f'PR {pr_dict["id"]} needs to be inserted\n') + self.logger.info(f'PR {pr_dict["id"]} needs to be inserted\n') result = self.db.execute(self.pull_requests_table.insert().values(pr)) - logging.info(f"Added Pull Request: {result.inserted_primary_key}") + self.logger.info(f"Added Pull Request: {result.inserted_primary_key}") self.pr_id_inc = int(result.inserted_primary_key[0]) elif pr_dict['flag'] == 'need_update': result = self.db.execute(self.pull_requests_table.update().where( self.pull_requests_table.c.pr_src_id==pr_dict['id']).values(pr)) - logging.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format( + self.logger.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format( pr_dict['id'])) self.pr_id_inc = pr_dict['pkey'] else: - logging.info("PR does not need to be inserted. Fetching its id from DB") + self.logger.info("PR does not need to be inserted. Fetching its id from DB") pr_id_sql = s.sql.text(""" SELECT pull_request_id FROM pull_requests WHERE pr_src_id={} @@ -609,16 +439,16 @@ def pull_requests_model(self, entry_info, repo_id): self.query_reviewers(pr_dict['requested_reviewers'], self.pr_id_inc) self.query_pr_meta(pr_dict['head'], pr_dict['base'], self.pr_id_inc) - logging.info(f"Inserted PR data for {owner}/{repo}") + self.logger.info(f"Inserted PR data for {owner}/{repo}") self.results_counter += 1 - register_task_completion(self, entry_info, repo_id, 'pull_requests') + self.register_task_completion(entry_info, repo_id, 'pull_requests') def query_labels(self, labels, pr_id): - logging.info('Querying PR Labels\n') + self.logger.info('Querying PR Labels\n') if len(labels) == 0: - logging.info('No new labels to add\n') + self.logger.info('No new labels to add\n') return table = 'pull_request_labels' @@ -629,12 +459,12 @@ def query_labels(self, labels, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - pr_labels_table_values = get_table_values(self, cols_query, [table]) + pr_labels_table_values = self.get_table_values(cols_query, [table]) - new_labels = assign_tuple_action(self, labels, pr_labels_table_values, update_col_map, duplicate_col_map, + new_labels = self.assign_tuple_action(labels, pr_labels_table_values, update_col_map, duplicate_col_map, table_pkey) - logging.info(f'Found {len(new_labels)} labels\n') + self.logger.info(f'Found {len(new_labels)} labels\n') for label_dict in new_labels: @@ -653,14 +483,13 @@ def query_labels(self, labels, pr_id): if label_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_labels_table.insert().values(label)) - logging.info(f"Added PR Label: {result.inserted_primary_key}\n") - logging.info(f"Inserted PR Labels data for PR with id {pr_id}\n") + self.logger.info(f"Added PR Label: {result.inserted_primary_key}\n") + self.logger.info(f"Inserted PR Labels data for PR with id {pr_id}\n") self.results_counter += 1 - self.label_id_inc = int(result.inserted_primary_key[0]) def query_pr_events(self, owner, repo, gh_pr_no, pr_id): - logging.info('Querying PR Events\n') + self.logger.info('Querying PR Events\n') url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' + '/events?per_page=100&page={}') @@ -674,14 +503,14 @@ def query_pr_events(self, owner, repo, gh_pr_no, pr_id): duplicate_col_map = {'issue_event_src_id': 'id'} #list to hold contributors needing insertion or update - pr_events = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey) + pr_events = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n") + self.logger.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n") for pr_event_dict in pr_events: if pr_event_dict['actor']: - cntrb_id = find_id_from_login(self, pr_event_dict['actor']['login']) + cntrb_id = self.find_id_from_login(pr_event_dict['actor']['login']) else: cntrb_id = 1 @@ -700,18 +529,17 @@ def query_pr_events(self, owner, repo, gh_pr_no, pr_id): } result = self.db.execute(self.pull_request_events_table.insert().values(pr_event)) - logging.info(f"Added PR Event: {result.inserted_primary_key}\n") + self.logger.info(f"Added PR Event: {result.inserted_primary_key}\n") self.results_counter += 1 - self.event_id_inc = int(result.inserted_primary_key[0]) - logging.info(f"Inserted PR Events data for PR with id {pr_id}\n") + self.logger.info(f"Inserted PR Events data for PR with id {pr_id}\n") def query_reviewers(self, reviewers, pr_id): - logging.info('Querying Reviewers') + self.logger.info('Querying Reviewers') if reviewers is None or len(reviewers) == 0: - logging.info('No reviewers to add') + self.logger.info('No reviewers to add') return table = 'pull_request_reviewers' @@ -722,15 +550,15 @@ def query_reviewers(self, reviewers, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - reviewers_table_values = get_table_values(self, cols_query, [table]) + reviewers_table_values = self.get_table_values(cols_query, [table]) - new_reviewers = assign_tuple_action(self, reviewers, reviewers_table_values, update_col_map, duplicate_col_map, + new_reviewers = self.assign_tuple_action(reviewers, reviewers_table_values, update_col_map, duplicate_col_map, table_pkey) for reviewers_dict in new_reviewers: if 'login' in reviewers_dict: - cntrb_id = find_id_from_login(self, reviewers_dict['login']) + cntrb_id = self.find_id_from_login(reviewers_dict['login']) else: cntrb_id = 1 @@ -744,18 +572,17 @@ def query_reviewers(self, reviewers, pr_id): if reviewers_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_reviewers_table.insert().values(reviewer)) - logging.info(f"Added PR Reviewer {result.inserted_primary_key}") + self.logger.info(f"Added PR Reviewer {result.inserted_primary_key}") - self.reviewer_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 - logging.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}") + self.logger.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}") def query_assignee(self, assignees, pr_id): - logging.info('Querying Assignees') + self.logger.info('Querying Assignees') if assignees is None or len(assignees) == 0: - logging.info('No assignees to add') + self.logger.info('No assignees to add') return table = 'pull_request_assignees' @@ -766,15 +593,15 @@ def query_assignee(self, assignees, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - assignee_table_values = get_table_values(self, cols_query, [table]) + assignee_table_values = self.get_table_values(cols_query, [table]) - assignees = assign_tuple_action(self, assignees, assignee_table_values, update_col_map, duplicate_col_map, + assignees = self.assign_tuple_action(assignees, assignee_table_values, update_col_map, duplicate_col_map, table_pkey) for assignee_dict in assignees: if 'login' in assignee_dict: - cntrb_id = find_id_from_login(self, assignee_dict['login']) + cntrb_id = self.find_id_from_login(assignee_dict['login']) else: cntrb_id = 1 @@ -788,15 +615,14 @@ def query_assignee(self, assignees, pr_id): if assignee_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_assignees_table.insert().values(assignee)) - logging.info(f'Added PR Assignee {result.inserted_primary_key}') + self.logger.info(f'Added PR Assignee {result.inserted_primary_key}') - self.assignee_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 - logging.info(f'Finished inserting PR Assignee data for PR with id {pr_id}') + self.logger.info(f'Finished inserting PR Assignee data for PR with id {pr_id}') def query_pr_meta(self, head, base, pr_id): - logging.info('Querying PR Meta') + self.logger.info('Querying PR Meta') table = 'pull_request_meta' duplicate_col_map = {'pr_sha': 'sha'} @@ -808,12 +634,12 @@ def query_pr_meta(self, head, base, pr_id): update_keys += list(value_update_col_map.keys()) cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - meta_table_values = get_table_values(self, cols_query, [table]) + meta_table_values = self.get_table_values(cols_query, [table]) pr_meta_dict = { - 'head': assign_tuple_action(self, [head], meta_table_values, update_col_map, duplicate_col_map, + 'head': self.assign_tuple_action([head], meta_table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map=value_update_col_map)[0], - 'base': assign_tuple_action(self, [base], meta_table_values, update_col_map, duplicate_col_map, + 'base': self.assign_tuple_action([base], meta_table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map=value_update_col_map)[0] } @@ -824,7 +650,7 @@ def query_pr_meta(self, head, base, pr_id): 'pr_src_meta_label': pr_meta_data['label'], 'pr_src_meta_ref': pr_meta_data['ref'], 'pr_sha': pr_meta_data['sha'], - 'cntrb_id': find_id_from_login(self, pr_meta_data['user']['login']) if pr_meta_data['user'] \ + 'cntrb_id': self.find_id_from_login(pr_meta_data['user']['login']) if pr_meta_data['user'] \ and 'login' in pr_meta_data['user'] else None, 'tool_source': self.tool_source, 'tool_version': self.tool_version, @@ -836,13 +662,12 @@ def query_pr_meta(self, head, base, pr_id): self.pull_request_meta_table.c.pr_sha==pr_meta['pr_sha'] and self.pull_request_meta_table.c.pr_head_or_base==pr_side ).values(pr_meta)) - logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( - issue_dict['id'])) - self.issue_id_inc = issue_dict['pkey'] + # self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format(issue_dict['id'])) + self.pr_meta_id_inc = pr_meta_data['pkey'] elif pr_meta_data['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_meta_table.insert().values(pr_meta)) - logging.info(f'Added PR Head {result.inserted_primary_key}') + self.logger.info(f'Added PR Head {result.inserted_primary_key}') self.pr_meta_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 @@ -857,12 +682,12 @@ def query_pr_meta(self, head, base, pr_id): if pr_meta_data['repo']: self.query_pr_repo(pr_meta_data['repo'], pr_side, self.pr_meta_id_inc) else: - logging.info('No new PR Head data to add') + self.logger.info('No new PR Head data to add') - logging.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}') + self.logger.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}') def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): - logging.info('Querying PR Comments') + self.logger.info('Querying PR Comments') url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' + '/comments?per_page=100&page={}') @@ -876,14 +701,14 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): duplicate_col_map = {'pr_message_ref_src_comment_id': 'id'} #list to hold contributors needing insertion or update - pr_messages = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey) + pr_messages = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n") + self.logger.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n") for pr_msg_dict in pr_messages: if pr_msg_dict['user'] and 'login' in pr_msg_dict['user']: - cntrb_id = find_id_from_login(self, pr_msg_dict['user']['login']) + cntrb_id = self.find_id_from_login(pr_msg_dict['user']['login']) else: cntrb_id = 1 @@ -901,12 +726,11 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): } result = self.db.execute(self.message_table.insert().values(msg)) - logging.info(f'Added PR Comment {result.inserted_primary_key}') - self.msg_id_inc = int(result.inserted_primary_key[0]) + self.logger.info(f'Added PR Comment {result.inserted_primary_key}') pr_msg_ref = { 'pull_request_id': pr_id, - 'msg_id': self.msg_id_inc, + 'msg_id': int(result.inserted_primary_key[0]), 'pr_message_ref_src_comment_id': pr_msg_dict['id'], 'pr_message_ref_src_node_id': pr_msg_dict['node_id'], 'tool_source': self.tool_source, @@ -917,15 +741,14 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): result = self.db.execute( self.pull_request_message_ref_table.insert().values(pr_msg_ref) ) - logging.info(f'Added PR Message Ref {result.inserted_primary_key}') - self.pr_msg_ref_id_inc = int(result.inserted_primary_key[0]) + self.logger.info(f'Added PR Message Ref {result.inserted_primary_key}') self.results_counter += 1 - logging.info(f'Finished adding PR Message data for PR with id {pr_id}') + self.logger.info(f'Finished adding PR Message data for PR with id {pr_id}') def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): - logging.info(f'Querying PR {pr_repo_type} repo') + self.logger.info(f'Querying PR {pr_repo_type} repo') table = 'pull_request_repo' duplicate_col_map = {'pr_src_repo_id': 'id'} @@ -935,13 +758,13 @@ def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - pr_repo_table_values = get_table_values(self, cols_query, [table]) + pr_repo_table_values = self.get_table_values(cols_query, [table]) - new_pr_repo = assign_tuple_action(self, [pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map, + new_pr_repo = self.assign_tuple_action([pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map, table_pkey)[0] if new_pr_repo['owner'] and 'login' in new_pr_repo['owner']: - cntrb_id = find_id_from_login(self, new_pr_repo['owner']['login']) + cntrb_id = self.find_id_from_login(new_pr_repo['owner']['login']) else: cntrb_id = 1 @@ -962,20 +785,8 @@ def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): if new_pr_repo['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_repo_table.insert().values(pr_repo)) - logging.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}') + self.logger.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}') self.results_counter += 1 - logging.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}') - - def get_owner_repo(self, github_url): - split = github_url.split('/') - - owner = split[-2] - repo = split[-1] - - if '.git' in repo: - repo = repo[:-4] - - return owner, repo - + self.logger.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}') diff --git a/workers/pull_request_worker/pull_request_worker/runtime.py b/workers/pull_request_worker/pull_request_worker/runtime.py deleted file mode 100644 --- a/workers/pull_request_worker/pull_request_worker/runtime.py +++ /dev/null @@ -1,109 +0,0 @@ -import json, logging, os, click -import requests -from flask import Flask, Response, jsonify, request -from pull_request_worker.worker import GHPullRequestWorker -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': # POST a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.gh_pr_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.gh_pr_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51239, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'pull_request_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - print("New pull request worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.pull_request_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - - app.gh_pr_worker = GHPullRequestWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.gh_pr_worker._child is not None: - app.gh_pr_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/pull_request_worker/runtime.py b/workers/pull_request_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/pull_request_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.pull_request_worker.pull_request_worker import GitHubPullRequestWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitHubPullRequestWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/pull_request_worker/setup.py b/workers/pull_request_worker/setup.py --- a/workers/pull_request_worker/setup.py +++ b/workers/pull_request_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="pull_request_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'pull_request_worker_start=pull_request_worker.runtime:main', + 'pull_request_worker_start=workers.pull_request_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/release_worker/release_worker/__init__.py b/workers/release_worker/__init__.py similarity index 100% rename from workers/release_worker/release_worker/__init__.py rename to workers/release_worker/__init__.py diff --git a/workers/release_worker/release_worker/worker.py b/workers/release_worker/release_worker.py similarity index 75% rename from workers/release_worker/release_worker/worker.py rename to workers/release_worker/release_worker.py --- a/workers/release_worker/release_worker/worker.py +++ b/workers/release_worker/release_worker.py @@ -6,12 +6,14 @@ import sqlalchemy as s from sqlalchemy import MetaData from sqlalchemy.ext.automap import automap_base -from workers.worker_template import Worker +from workers.worker_base import Worker #TODO - fully edit to match releases class ReleaseWorker(Worker): - def __init__(self, config): - + def __init__(self, config={}): + + worker_type = "release_worker" + # Define what this worker can be given and know how to interpret given = [['github_url']] models = ['releases'] @@ -21,18 +23,18 @@ def __init__(self, config): operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) # Define data collection info self.tool_source = 'Release Worker' - self.tool_version = '0.0.1' + self.tool_version = '1.0.0' self.data_source = 'GitHub API' - def repo_info_model(self, task, repo_id): + def releases_model(self, task, repo_id): github_url = task['given']['github_url'] - logging.info("Beginning filling the releases model for repo: " + github_url + "\n") + self.logger.info("Beginning filling the releases model for repo: " + github_url + "\n") owner, repo = self.get_owner_repo(github_url) @@ -70,7 +72,7 @@ def repo_info_model(self, task, repo_id): num_attempts = 0 success = False while num_attempts < 3: - logging.info("Hitting endpoint: {} ...\n".format(url)) + self.logger.info("Hitting endpoint: {} ...\n".format(url)) r = requests.post(url, json={'query': query}, headers=self.headers) self.update_gh_rate_limit(r) @@ -80,8 +82,8 @@ def repo_info_model(self, task, repo_id): data = json.loads(json.dumps(r.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) - if data['errors']['message'] == 'API rate limit exceeded': + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': self.update_gh_rate_limit(r) continue @@ -90,9 +92,9 @@ def repo_info_model(self, task, repo_id): data = data['data']['repository'] break else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) break if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': self.update_gh_rate_limit(r, temporarily_disable=True) @@ -112,15 +114,15 @@ def repo_info_model(self, task, repo_id): if 'node' in n: release = n['node'] insert_release(self, repo_id, owner, release) - logging.info("There's no release to insert. Current node is not available in releases: {}\n".format(n)) - logging.info("There are no releases to insert for current repository: {}\n".format(data)) - logging.info("Graphql response does not contain releases: {}\n".format(data)) - logging.info("Graphql response does not contain repository: {}\n".format(data)) + self.logger.info("There's no release to insert. Current node is not available in releases: {}\n".format(n)) + self.logger.info("There are no releases to insert for current repository: {}\n".format(data)) + self.logger.info("Graphql response does not contain releases: {}\n".format(data)) + self.logger.info("Graphql response does not contain repository: {}\n".format(data)) def insert_release(self, repo_id, owner, release): author = release['author']['name']+'_'+release['author']['company'] # Put all data together in format of the table - logging.info(f'Inserting release for repo with id:{repo_id}, owner:{owner}, release name:{release['name']}\n') + self.logger.info(f'Inserting release for repo with id:{repo_id}, owner:{owner}, release name:{release["name"]}\n') release_inf = { 'release_id': release['id'], 'repo_id': repo_id, @@ -140,10 +142,10 @@ def insert_release(self, repo_id, owner, release): } result = self.db.execute(self.releases_table.insert().values(release_inf)) - logging.info(f"Primary Key inserted into releases table: {result.inserted_primary_key}\n") + self.logger.info(f"Primary Key inserted into releases table: {result.inserted_primary_key}\n") self.results_counter += 1 - logging.info(f"Inserted info for {owner}/{repo}/{release['name']}\n") + self.logger.info(f"Inserted info for {owner}/{repo}/{release['name']}\n") #Register this task as completed self.register_task_completion(task, release_id, "releases") diff --git a/workers/release_worker/release_worker/runtime.py b/workers/release_worker/release_worker/runtime.py deleted file mode 100644 --- a/workers/release_worker/release_worker/runtime.py +++ /dev/null @@ -1,101 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from release_worker.worker import ReleaseWorker -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.gh_release_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.gh_release_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'release_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.release_worker.{}".format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - 'gh_api_key': read_config('Database', 'key', 'AUGUR_GITHUB_API_KEY', 'key') - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database') - } - - #create instance of the worker - app.gh_release_worker = ReleaseWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - if app.gh_release_worker._child is not None: - app.gh_release_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - - - diff --git a/workers/release_worker/runtime.py b/workers/release_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/release_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.release_worker.release_worker import ReleaseWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ReleaseWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/release_worker/setup.py b/workers/release_worker/setup.py --- a/workers/release_worker/setup.py +++ b/workers/release_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="release_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'release_worker_start=release_worker.runtime:main', + 'release_worker_start=workers.release_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/repo_info_worker/__init__.py b/workers/repo_info_worker/__init__.py new file mode 100644 diff --git a/workers/repo_info_worker/repo_info_worker/worker.py b/workers/repo_info_worker/repo_info_worker.py similarity index 79% rename from workers/repo_info_worker/repo_info_worker/worker.py rename to workers/repo_info_worker/repo_info_worker.py --- a/workers/repo_info_worker/repo_info_worker/worker.py +++ b/workers/repo_info_worker/repo_info_worker.py @@ -1,45 +1,44 @@ import logging, os, sys, time, requests, json from datetime import datetime from multiprocessing import Process, Queue -from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base from workers.worker_base import Worker # NOTE: This worker primarily inserts rows into the REPO_INFO table, which serves the primary purposes of # 1. Displaying discrete metadata like "number of forks" and how they change over time # 2. Validating other workers, like those related to pull requests, issues, and commits. Our totals should be at or very near the totals in the repo_info table. -# This table also updates teh REPO table in 2 cases: +# This table also updates the REPO table in 2 cases: # 1. Recognizing when a repository is a forked repository by updating the "forked_from" field and # 2. Recognizing when a repository is archived, and recording the data we observed the change in status. class RepoInfoWorker(Worker): - def __init__(self, config): + def __init__(self, config={}): + + worker_type = "repo_info_worker" # Define what this worker can be given and know how to interpret given = [['github_url']] models = ['repo_info'] # Define the tables needed to insert, update, or delete on - data_tables = ['repo_info'] + data_tables = ['repo_info', 'repo'] operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) # Define data collection info self.tool_source = 'Repo Info Worker' - self.tool_version = '0.0.1' + self.tool_version = '1.0.0' self.data_source = 'GitHub API' def repo_info_model(self, task, repo_id): github_url = task['given']['github_url'] - logging.info("Beginning filling the repo_info model for repo: " + github_url + "\n") + self.logger.info("Beginning filling the repo_info model for repo: " + github_url + "\n") owner, repo = self.get_owner_repo(github_url) @@ -108,7 +107,7 @@ def repo_info_model(self, task, repo_id): num_attempts = 0 success = False while num_attempts < 3: - logging.info("Hitting endpoint: {} ...\n".format(url)) + self.logger.info("Hitting endpoint: {} ...\n".format(url)) r = requests.post(url, json={'query': query}, headers=self.headers) self.update_gh_rate_limit(r) @@ -118,8 +117,8 @@ def repo_info_model(self, task, repo_id): data = json.loads(json.dumps(r.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) - if data['errors']['message'] == 'API rate limit exceeded': + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': self.update_gh_rate_limit(r) continue @@ -128,9 +127,9 @@ def repo_info_model(self, task, repo_id): data = data['data']['repository'] break else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) break if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': self.update_gh_rate_limit(r, temporarily_disable=True) @@ -140,20 +139,14 @@ def repo_info_model(self, task, repo_id): continue num_attempts += 1 if not success: - self.register_task_failure(task, repo_id, "Failed to hit endpoint: {}".format(url)) + self.register_task_failure(self.task, repo_id, "Failed to hit endpoint: {}".format(url)) return # Get committers count info that requires seperate endpoint committers_count = self.query_committers_count(owner, repo) - # Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table. - forked = self.is_forked(owner, repo) - archived = self.is_archived(owner, repo) - if archived is not False: - archived_date_collected = archived - archived = True # Put all data together in format of the table - logging.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n') + self.logger.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n') rep_inf = { 'repo_id': repo_id, 'last_updated': data['updatedAt'] if 'updatedAt' in data else None, @@ -187,23 +180,38 @@ def repo_info_model(self, task, repo_id): 'pull_requests_merged': data['pr_merged']['totalCount'] if data['pr_merged'] else None, 'tool_source': self.tool_source, 'tool_version': self.tool_version, - 'data_source': self.data_source, - 'forked_from': forked, - 'repo_archived': archived, - 'repo_archived_date_collected': archived_date_collected + 'data_source': self.data_source } result = self.db.execute(self.repo_info_table.insert().values(rep_inf)) - logging.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n") + self.logger.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n") self.results_counter += 1 - logging.info(f"Inserted info for {owner}/{repo}\n") + # Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table. + forked = self.is_forked(owner, repo) + archived = self.is_archived(owner, repo) + archived_date_collected = None + if archived is not False: + archived_date_collected = archived + archived = 1 + else: + archived = 0 + + rep_additional_data = { + 'forked_from': forked, + 'repo_archived': archived, + 'repo_archived_date_collected': archived_date_collected + } + result = self.db.execute(self.repo_table.update().where(repo_table.c.repo_id==repo_id).values(rep_additional_data)) + self.logger.info(f"Primary Key inserted into repo table: {result.inserted_primary_key}\n") + + self.logger.info(f"Inserted info for {owner}/{repo}\n") - #Register this task as completed - self.register_task_completion(task, repo_id, "repo_info") + # Register this task as completed + self.register_task_completion(self.task, repo_id, "repo_info") def query_committers_count(self, owner, repo): - logging.info('Querying committers count\n') + self.logger.info('Querying committers count\n') url = f'https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100' committers = 0 @@ -218,18 +226,18 @@ def query_committers_count(self, owner, repo): else: url = r.links['next']['url'] except Exception: - logging.exception('An error occured while querying contributor count\n') + self.logger.exception('An error occured while querying contributor count\n') return committers def is_forked(self, owner, repo): #/repos/:owner/:repo parent - logging.info('Querying parent info to verify if the repo is forked\n') + self.logger.info('Querying parent info to verify if the repo is forked\n') url = f'https://api.github.com/repos/{owner}/{repo}' r = requests.get(url, headers=self.headers) self.update_gh_rate_limit(r) - data = self.get_repo_data(self, url, r) + data = self.get_repo_data(url, r) if 'fork' in data: if 'parent' in data: @@ -239,13 +247,13 @@ def is_forked(self, owner, repo): #/repos/:owner/:repo parent return False def is_archived(self, owner, repo): - logging.info('Querying committers count\n') + self.logger.info('Querying committers count\n') url = f'https://api.github.com/repos/{owner}/{repo}' r = requests.get(url, headers=self.headers) self.update_gh_rate_limit(r) - data = self.get_repo_data(self, url, r) + data = self.get_repo_data(url, r) if 'archived' in data: if data['archived']: @@ -264,21 +272,21 @@ def get_repo_data(self, url, response): data = json.loads(json.dumps(response.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) - if data['errors']['message'] == 'API rate limit exceeded': + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': self.update_gh_rate_limit(response) if 'id' in data: success = True else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': self.update_gh_rate_limit(r, temporarily_disable=True) if data['message'] == 'Bad credentials': self.update_gh_rate_limit(r, bad_credentials=True) if not success: - self.register_task_failure(task, repo_id, "Failed to hit endpoint: {}".format(url)) + self.register_task_failure(self.task, repo_id, "Failed to hit endpoint: {}".format(url)) return data diff --git a/workers/repo_info_worker/repo_info_worker/__init__.py b/workers/repo_info_worker/repo_info_worker/__init__.py deleted file mode 100644 --- a/workers/repo_info_worker/repo_info_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""gh_repo_info_worker - Augur Worker that collects GitHub Repo Info data""" - -__version__ = '0.0.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/repo_info_worker/repo_info_worker/runtime.py b/workers/repo_info_worker/repo_info_worker/runtime.py deleted file mode 100644 --- a/workers/repo_info_worker/repo_info_worker/runtime.py +++ /dev/null @@ -1,55 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from repo_info_worker.worker import RepoInfoWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'repo_info_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.repo_info_worker.{}".format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - 'gh_api_key': read_config('Database', 'key', 'AUGUR_GITHUB_API_KEY', 'key') - } - - #create instance of the worker - app.worker = RepoInfoWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - if app.worker._child is not None: - app.worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/repo_info_worker/runtime.py b/workers/repo_info_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/repo_info_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.repo_info_worker.repo_info_worker import RepoInfoWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = RepoInfoWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/repo_info_worker/setup.py b/workers/repo_info_worker/setup.py --- a/workers/repo_info_worker/setup.py +++ b/workers/repo_info_worker/setup.py @@ -13,22 +13,21 @@ def read(filename): setup( name="repo_info_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", author_email="[email protected]", description="Augur Worker that collects general data about a repo on GitHub", - packages=find_packages(exclude=('tests',)), + packages=find_packages(), install_requires=[ 'flask', 'requests', - 'psycopg2-binary', - 'click' + 'psycopg2-binary' ], entry_points={ 'console_scripts': [ - 'repo_info_worker_start=repo_info_worker.runtime:main', + 'repo_info_worker_start=workers.repo_info_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/standard_methods.py b/workers/standard_methods.py deleted file mode 100644 --- a/workers/standard_methods.py +++ /dev/null @@ -1,712 +0,0 @@ -""" Helper methods constant across all workers """ -import requests, datetime, time, traceback, json, os, sys, math -import sqlalchemy as s -import pandas as pd -import os -import sys, logging -from urllib.parse import urlparse - -def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}): - """ map objects => { *our db col* : *gh json key*} """ - need_insertion_count = 0 - need_update_count = 0 - for i, obj in enumerate(new_data): - if type(obj) != dict: - logging.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) - continue - - obj['flag'] = 'none' # default of no action needed - existing_tuple = None - for db_dupe_key in list(duplicate_col_map.keys()): - - if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any(): - if table_values[table_values[db_dupe_key].isin( - [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records'): - existing_tuple = table_values[table_values[db_dupe_key].isin( - [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0] - continue - - logging.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) - obj['flag'] = 'need_insertion' - need_insertion_count += 1 - break - - if obj['flag'] == 'need_insertion': - logging.info('Already determined that current tuple needs insertion, skipping checking updates. ' - 'Moving to next tuple.\n') - continue - - # If we need to check the values of the existing tuple to determine if an update is needed - for augur_col, value_check in value_update_col_map.items(): - not_nan_check = not (pd.isna(value_check) and pd.isna(existing_tuple[augur_col])) if value_check is not None else True - if existing_tuple[augur_col] != value_check and not_nan_check: - continue - logging.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) - obj['flag'] = 'need_update' - obj['pkey'] = existing_tuple[table_pkey] - need_update_count += 1 - - if obj['flag'] == 'need_update': - logging.info('Already determined that current tuple needs update, skipping checking further updates. ' - 'Moving to next tuple.\n') - continue - - # Now check the existing tuple's values against the response values to determine if an update is needed - for col in update_col_map.keys(): - if update_col_map[col] not in obj: - continue - if obj[update_col_map[col]] == existing_tuple[col]: - continue - logging.info("Found a tuple that needs an update for column: {}\n".format(col)) - obj['flag'] = 'need_update' - obj['pkey'] = existing_tuple[table_pkey] - need_update_count += 1 - - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + - "was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count)) - return new_data - -def check_duplicates(new_data, table_values, key): - need_insertion = [] - for obj in new_data: - if type(obj) == dict: - if not table_values.isin([obj[key]]).any().any(): - need_insertion.append(obj) - # else: - # logging.info("Tuple with github's {} key value already".format(key) + - # "exists in our db: {}\n".format(str(obj[key]))) - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + - "was reduced to {} tuples.\n".format(str(len(need_insertion)))) - return need_insertion - -def connect_to_broker(self): - connected = False - for i in range(5): - try: - logging.info("attempt {}\n".format(i)) - if i > 0: - time.sleep(10) - requests.post('http://{}:{}/api/unstable/workers'.format( - self.config['broker_host'],self.config['broker_port']), json=self.specs) - logging.info("Connection to the broker was successful\n") - connected = True - break - except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker. Trying again...\n') - if not connected: - sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n') - -def dump_queue(queue): - """ - Empties all pending items in a queue and returns them in a list. - """ - result = [] - queue.put("STOP") - for i in iter(queue.get, 'STOP'): - result.append(i) - # time.sleep(.1) - return result - -def find_id_from_login(self, login): - idSQL = s.sql.text(""" - SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' - """.format(login)) - rs = pd.read_sql(idSQL, self.db, params={}) - data_list = [list(row) for row in rs.itertuples(index=False)] - try: - return data_list[0][0] - except: - logging.info("contributor needs to be added...") - - cntrb_url = ("https://api.github.com/users/" + login) - logging.info("Hitting endpoint: {} ...\n".format(cntrb_url)) - r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) - contributor = r.json() - - company = None - location = None - email = None - if 'company' in contributor: - company = contributor['company'] - if 'location' in contributor: - location = contributor['location'] - if 'email' in contributor: - email = contributor['email'] - - cntrb = { - "cntrb_login": contributor['login'] if 'login' in contributor else None, - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, - "cntrb_canonical": None, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } - result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) - self.results_counter += 1 - self.cntrb_id_inc = int(result.inserted_primary_key[0]) - - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - return find_id_from_login(self, login) - -def get_owner_repo(github_url): - split = github_url.split('/') - - owner = split[-2] - repo = split[-1] - - if '.git' in repo: - repo = repo[:-4] - - return owner, repo - -def get_max_id(self, table, column, default=25150, operations_table=False): - maxIdSQL = s.sql.text(""" - SELECT max({0}.{1}) AS {1} - FROM {0} - """.format(table, column)) - db = self.db if not operations_table else self.helper_db - rs = pd.read_sql(maxIdSQL, db, params={}) - if rs.iloc[0][column] is not None: - max_id = int(rs.iloc[0][column]) + 1 - logging.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) - else: - max_id = default - logging.info("Could not find max id for {} column in the {} table... using default set to: \ - {}\n".format(column, table, max_id)) - return max_id - -def get_table_values(self, cols, tables, where_clause=""): - table_str = tables[0] - del tables[0] - - col_str = cols[0] - del cols[0] - - for table in tables: - table_str += ", " + table - for col in cols: - col_str += ", " + col - - tableValuesSQL = s.sql.text(""" - SELECT {} FROM {} {} - """.format(col_str, table_str, where_clause)) - logging.info("Getting table values with the following PSQL query: \n{}\n".format(tableValuesSQL)) - values = pd.read_sql(tableValuesSQL, self.db, params={}) - return values - -def init_oauths(self): - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauthSQL = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(self.config['key'])) - for oauth in [{'oauth_id': 0, 'access_token': self.config['key']}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}\n".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - -def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}): - # Paginate backwards through all the tuples but get first page in order - # to determine if there are multiple pages and if the 1st page covers all - update_keys = list(update_col_map.keys()) if update_col_map else [] - update_keys += list(value_update_col_map.keys()) if value_update_col_map else [] - cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - table_values = get_table_values(self, cols_query, [table], where_clause) - - i = 1 - multiple_pages = False - tuples = [] - while True: - num_attempts = 0 - success = False - while num_attempts < 3: - logging.info("Hitting endpoint: " + url.format(i) + " ...\n") - r = requests.get(url=url.format(i), headers=self.headers) - update_gh_rate_limit(self, r) - logging.info("Analyzing page {} of {}\n".format(i, int(r.links['last']['url'][-6:].split('=')[1]) + 1 if 'last' in r.links else '*last page not known*')) - - try: - j = r.json() - except: - j = json.loads(json.dumps(r.text)) - - if type(j) != dict and type(j) != str: - success = True - break - elif type(j) == dict: - logging.info("Request returned a dict: {}\n".format(j)) - if j['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) - break - if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': - num_attempts -= 1 - update_gh_rate_limit(self, r, temporarily_disable=True) - if j['message'] == 'Bad credentials': - update_gh_rate_limit(self, r, bad_credentials=True) - elif type(j) == str: - logging.info("J was string: {}\n".format(j)) - if '<!DOCTYPE html>' in j: - logging.info("HTML was returned, trying again...\n") - elif len(j) == 0: - logging.info("Empty string, trying again...\n") - else: - try: - j = json.loads(j) - success = True - break - except: - pass - num_attempts += 1 - if not success: - break - - # Find last page so we can decrement from there - if 'last' in r.links and not multiple_pages and not self.finishing_task: - param = r.links['last']['url'][-6:] - i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") - multiple_pages = True - elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") - elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ..." - " excess rate limit requests will be made\n") - - if len(j) == 0: - logging.info("Response was empty, breaking from pagination.\n") - break - - # Checking contents of requests with what we already have in the db - j = assign_tuple_action(self, j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map) - if not j: - logging.info("Assigning tuple action failed, moving to next page.\n") - i = i + 1 if self.finishing_task else i - 1 - continue - try: - to_add = [obj for obj in j if obj not in tuples and obj['flag'] != 'none'] - except Exception as e: - logging.info("Failure accessing data of page: {}. Moving to next page.\n".format(e)) - i = i + 1 if self.finishing_task else i - 1 - continue - if len(to_add) == 0 and multiple_pages and 'last' in r.links: - logging.info("{}".format(r.links['last'])) - if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown tuples, breaking from pagination.\n") - break - tuples += to_add - - i = i + 1 if self.finishing_task else i - 1 - - # Since we already wouldve checked the first page... break - if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") - break - - return tuples - -def query_github_contributors(self, entry_info, repo_id): - - """ Data collection function - Query the GitHub API for contributors - """ - logging.info("Querying contributors with given entry info: " + str(entry_info) + "\n") - - github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url'] - - # Extract owner/repo from the url for the endpoint - path = urlparse(github_url) - split = path[2].split('/') - - owner = split[1] - name = split[2] - - # Handles git url case by removing the extension - if ".git" in name: - name = name[:-4] - - # Set the base of the url and place to hold contributors to insert - contributors_url = ("https://api.github.com/repos/" + owner + "/" + - name + "/contributors?per_page=100&page={}") - - # Get contributors that we already have stored - # Set our duplicate and update column map keys (something other than PK) to - # check dupicates/needed column updates with - table = 'contributors' - table_pkey = 'cntrb_id' - update_col_map = {'cntrb_email': 'email'} - duplicate_col_map = {'cntrb_login': 'login'} - - #list to hold contributors needing insertion or update - contributors = paginate(self, contributors_url, duplicate_col_map, update_col_map, table, table_pkey) - - logging.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") - - for repo_contributor in contributors: - try: - # Need to hit this single contributor endpoint to get extra data including... - # `created at` - # i think that's it - cntrb_url = ("https://api.github.com/users/" + repo_contributor['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") - r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) - contributor = r.json() - - company = None - location = None - email = None - if 'company' in contributor: - company = contributor['company'] - if 'location' in contributor: - location = contributor['location'] - if 'email' in contributor: - email = contributor['email'] - canonical_email = contributor['email'] - - cntrb = { - "cntrb_login": contributor['login'], - "cntrb_created_at": contributor['created_at'], - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - # "cntrb_type": , dont have a use for this as of now ... let it default to null - "cntrb_canonical": canonical_email, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } - - # Commit insertion to table - if repo_contributor['flag'] == 'need_update': - result = self.db.execute(self.contributors_table.update().where( - self.history_table.c.cntrb_email==email).values(cntrb)) - logging.info("Updated tuple in the contributors table with existing email: {}".format(email)) - self.cntrb_id_inc = repo_contributor['pkey'] - elif repo_contributor['flag'] == 'need_insertion': - result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) - self.results_counter += 1 - - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - # Increment our global track of the cntrb id for the possibility of it being used as a FK - self.cntrb_id_inc = int(result.inserted_primary_key[0]) - - except Exception as e: - logging.info("Caught exception: {}".format(e)) - logging.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) - continue - -def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): - """ - Read a variable in specified section of the config file, unless provided an environment variable - - :param section: location of given variable - :param name: name of variable - """ - config_file_path = os.getenv("AUGUR_CONFIG_FILE", config_file_path) - _config_file_name = 'augur.config.json' - _config_bad = False - _already_exported = {} - _runtime_location = 'runtime/' - _default_config = {} - _config_file = None - - try: - _config_file = open(config_file_path, 'r+') - except: - print('Couldn\'t open {}'.format(_config_file_name)) - - # Load the config file - try: - config_text = _config_file.read() - _config = json.loads(config_text) - except json.decoder.JSONDecodeError as e: - if not _config_bad: - _using_config_file = False - print('{} could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: {}'.format(config_file_path, str(e))) - _config = _default_config - - value = None - if environment_variable is not None: - value = os.getenv(environment_variable) - if value is None: - try: - if name is not None: - value = _config[section][name] - else: - value = _config[section] - except Exception as e: - value = default - if not section in _config: - _config[section] = {} - - return value - - -def record_model_process(self, repo_id, model): - - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Stopped", - "total_results": self.results_counter - } - if self.finishing_task: - result = self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - self.history_id += 1 - else: - result = self.helper_db.execute(self.history_table.insert().values(task_history)) - logging.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) - self.history_id = int(result.inserted_primary_key[0]) - -def register_task_completion(self, task, repo_id, model): - # Task to send back to broker - task_completed = { - 'worker_id': self.config['id'], - 'job_type': "MAINTAIN", - 'repo_id': repo_id, - 'job_model': model - } - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] if 'git_url' in task['given'] else "INVALID_GIVEN" - if key == 'INVALID_GIVEN': - register_task_failure(self, task, repo_id, "INVALID_GIVEN: not github nor git url") - return - - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Success", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job completion for: " + str(task_completed) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where( - self.job_table.c.job_model==model).values(updated_job)) - logging.info("Updated job process for model: " + model + "\n") - - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - - # Reset results counter for next task - self.results_counter = 0 - -def register_task_failure(self, task, repo_id, e): - - logging.info("Worker ran into an error for task: {}\n".format(task)) - logging.info("Printing traceback...\n") - tb = traceback.format_exc() - logging.info(tb) - - logging.info(f'This task inserted {self.results_counter} tuples before failure.\n') - logging.info("Notifying broker and logging task failure in database...\n") - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - url = task['given'][key] - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(url)) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - task['worker_id'] = self.config['id'] - try: - requests.post("http://{}:{}/api/unstable/task_error".format( - self.config['broker_host'],self.config['broker_port']), json=task) - except requests.exceptions.ConnectionError: - logging.error('Could not send task failure message to the broker\n') - except Exception: - logging.exception('An error occured while informing broker about task failure\n') - - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": task['models'][0], - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Error", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job error in the history table for: " + str(task) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==task['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + task['models'][0] + "\n") - - # Reset results counter for next task - self.results_counter = 0 - -def retrieve_tuple(self, key_values, tables): - table_str = tables[0] - del tables[0] - - key_values_items = list(key_values.items()) - for col, value in [key_values_items[0]]: - where_str = col + " = '" + value + "'" - del key_values_items[0] - - for col, value in key_values_items: - where_str += ' AND ' + col + " = '" + value + "'" - for table in tables: - table_str += ", " + table - - retrieveTupleSQL = s.sql.text(""" - SELECT * FROM {} WHERE {} - """.format(table_str, where_str)) - values = json.loads(pd.read_sql(retrieveTupleSQL, self.db, params={}).to_json(orient="records")) - return values - -def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): - # Try to get rate limit from request headers, sometimes it does not work (GH's issue) - # In that case we just decrement from last recieved header count - if bad_credentials and len(self.oauths) > 1: - logging.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) - del self.oauths[0] - - if temporarily_disable: - logging.info("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") - self.oauths[0]['rate_limit'] = 0 - else: - try: - self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - logging.info("Recieved rate limit from headers\n") - except: - self.oauths[0]['rate_limit'] -= 1 - logging.info("Headers did not work, had to decrement\n") - logging.info("Updated rate limit, you have: " + - str(self.oauths[0]['rate_limit']) + " requests remaining.\n") - if self.oauths[0]['rate_limit'] <= 0: - try: - reset_time = response.headers['X-RateLimit-Reset'] - except Exception as e: - logging.info("Could not get reset time from headers because of error: {}".format(e)) - logging.info('Headers: {}'.format(response.headers)) - reset_time = 3600 - time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() - logging.info("Rate limit exceeded, checking for other available keys to use.\n") - - # We will be finding oauth with the highest rate limit left out of our list of oauths - new_oauth = self.oauths[0] - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] - for oauth in other_oauths: - logging.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - response = requests.get(url=url, headers=self.headers) - oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() - - # Update oauth to switch to if a higher limit is found - if oauth['rate_limit'] > new_oauth['rate_limit']: - logging.info("Higher rate limit found in oauth: {}\n".format(oauth)) - new_oauth = oauth - elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: - logging.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) - new_oauth = oauth - - if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: - logging.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) - time.sleep(new_oauth['seconds_to_reset']) - - # Make new oauth the 0th element in self.oauths so we know which one is in use - index = self.oauths.index(new_oauth) - self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] - logging.info("Using oauth: {}\n".format(self.oauths[0])) - - # Change headers to be using the new oauth's key - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} diff --git a/workers/template_worker/__init__.py b/workers/template_worker/__init__.py new file mode 100644 diff --git a/workers/template_worker/runtime.py b/workers/template_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/template_worker/runtime.py @@ -0,0 +1,23 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.template_worker.template_worker import TemplateWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ + Creates the Flask app and data collection worker, then starts the Gunicorn server + """ + app = Flask(__name__) + app.worker = TemplateWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/template_worker/setup.py b/workers/template_worker/setup.py --- a/workers/template_worker/setup.py +++ b/workers/template_worker/setup.py @@ -19,16 +19,15 @@ def read(filename): author="Augur Team", author_email="[email protected]", description="Template worker to be used as an example", - packages=find_packages(exclude=('tests',)), + packages=find_packages(), install_requires=[ 'flask', 'requests', - 'psycopg2-binary', - 'click' + 'psycopg2-binary' ], entry_points={ 'console_scripts': [ - 'template_worker_start=template_worker.runtime:main', + 'template_worker_start=workers.template_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/template_worker/template_worker/worker.py b/workers/template_worker/template_worker.py similarity index 76% rename from workers/template_worker/template_worker/worker.py rename to workers/template_worker/template_worker.py --- a/workers/template_worker/template_worker/worker.py +++ b/workers/template_worker/template_worker.py @@ -6,12 +6,16 @@ from workers.worker_base import Worker class TemplateWorker(Worker): - def __init__(self, config): + def __init__(self, config={}): - # Define what this worker can be given and know how to interpret + # Define the worker's type, which will be used for self identification. + # Should be unique among all workers and is the same key used to define + # this worker's settings in the configuration file. + worker_type = "template_worker" + # Define what this worker can be given and know how to interpret # given is usually either [['github_url']] or [['git_url']] (depending if your - # worker is exclusive to repos that are on the GitHub platform) + # worker is exclusive to repos that are on the GitHub platform) given = [[]] # The name the housekeeper/broker use to distinguish the data model this worker can fill @@ -28,7 +32,14 @@ def __init__(self, config): operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Do any additional configuration after the general initialization has been run + self.config.update(config) + + # If you need to do some preliminary interactions with the database, these MUST go + # in the model method. The database connection is instantiated only inside of each + # data collection process # Define data collection info self.tool_source = 'Fake Template Worker' @@ -54,8 +65,11 @@ def fake_data_model(self, task, repo_id): } :param repo_id: the collect() method queries the repo_id given the git/github url and passes it along to make things easier. An int such as: 27869 + """ + # Any initial database instructions, like finding the last tuple inserted or generate the next ID value + # Collection and insertion of data happens here # ... diff --git a/workers/template_worker/template_worker/runtime.py b/workers/template_worker/template_worker/runtime.py deleted file mode 100644 --- a/workers/template_worker/template_worker/runtime.py +++ /dev/null @@ -1,58 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from template_worker.worker import TemplateWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'repo_info_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.template_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port) - } - - #create instance of the worker - app.template_worker = TemplateWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - - if app.template_worker._child is not None: - app.template_worker._child.terminate() - - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - - - diff --git a/workers/util.py b/workers/util.py --- a/workers/util.py +++ b/workers/util.py @@ -1,5 +1,6 @@ import os, json, requests, logging from flask import Flask, Response, jsonify, request +import gunicorn.app.base def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): """ @@ -47,7 +48,7 @@ def read_config(section, name=None, environment_variable=None, default=None, con return value -def create_server(app, worker): +def create_server(app, worker=None): """ Consists of AUGWOP endpoints for the broker to communicate to this worker Can post a new task to be added to the workers queue Can retrieve current status of the worker @@ -83,4 +84,28 @@ def heartbeat(): def augwop_config(): """ Retrieve worker's config """ - return app.worker.config \ No newline at end of file + return app.worker.config + +class WorkerGunicornApplication(gunicorn.app.base.BaseApplication): + + def __init__(self, app): + self.options = { + 'bind': '%s:%s' % (app.worker.config["host"], app.worker.config["port"]), + 'workers': 1, + 'errorlog': app.worker.config['server_logfile'], + 'accesslog': app.worker.config['server_logfile'], + 'loglevel': app.worker.config['log_level'], + 'capture_output': app.worker.config['capture_output'] + } + + self.application = app + super().__init__() + + def load_config(self): + config = {key: value for key, value in self.options.items() + if key in self.cfg.settings and value is not None} + for key, value in config.items(): + self.cfg.set(key.lower(), value) + + def load(self): + return self.application diff --git a/workers/value_worker/__init__.py b/workers/value_worker/__init__.py new file mode 100644 diff --git a/workers/value_worker/runtime.py b/workers/value_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/value_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.value_worker.value_worker import ValueWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ValueWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) \ No newline at end of file diff --git a/workers/value_worker/setup.py b/workers/value_worker/setup.py --- a/workers/value_worker/setup.py +++ b/workers/value_worker/setup.py @@ -5,33 +5,23 @@ from setuptools import find_packages from setuptools import setup - -def read(filename): - filename = os.path.join(os.path.dirname(__file__), filename) - text_type = type(u"") - with io.open(filename, mode="r", encoding='utf-8') as fd: - return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) - - setup( name="value_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', - author="Augurlabs", author_email="[email protected]", - description="Augur Worker that gathers value data", - long_description=read("README.md"), - packages=find_packages(exclude=('tests',)), - - install_requires=['flask', 'requests', 'psycopg2-binary', 'click'], - + install_requires=[ + 'flask', + 'requests', + 'psycopg2-binary', + ], entry_points={ 'console_scripts': [ - 'value_worker_start=value_worker.runtime:main', + 'value_worker_start=workers.value_worker.runtime:main', ], }, diff --git a/workers/value_worker/value_worker.py b/workers/value_worker/value_worker.py new file mode 100644 --- /dev/null +++ b/workers/value_worker/value_worker.py @@ -0,0 +1,94 @@ +import os, subprocess +from datetime import datetime +import logging +import requests +import json +from urllib.parse import quote +from multiprocessing import Process, Queue + +import pandas as pd +import sqlalchemy as s +from sqlalchemy.ext.automap import automap_base +from sqlalchemy import MetaData +from workers.worker_base import Worker + +class ValueWorker(Worker): + def __init__(self, config={}): + + worker_type = "value_worker" + + # Define what this worker can be given and know how to interpret + given = [['git_url']] + models = ['value'] + + # Define the tables needed to insert, update, or delete on + data_tables = ['repo_labor'] + operations_tables = ['worker_history', 'worker_job'] + + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({ + 'repo_directory': self.augur_config.get_value('Workers', 'facade_worker')['repo_directory'] + }) + + self.tool_source = 'Value Worker' + self.tool_version = '1.0.0' + self.data_source = 'SCC' + + def value_model(self, entry_info, repo_id): + """ Data collection and storage method + """ + self.logger.info(entry_info) + self.logger.info(repo_id) + + repo_path_sql = s.sql.text(""" + SELECT repo_id, CONCAT(repo_group_id || chr(47) || repo_path || repo_name) AS path + FROM repo + WHERE repo_id = :repo_id + """) + + relative_repo_path = self.db.execute(repo_path_sql, {'repo_id': repo_id}).fetchone()[1] + absolute_repo_path = self.config['repo_directory'] + relative_repo_path + + try: + self.generate_value_data(repo_id, absolute_repo_path) + except Exception as e: + self.logger.error(e) + + self.register_task_completion(entry_info, repo_id, "value") + + def generate_value_data(self, repo_id, path): + """Runs scc on repo and stores data in database + + :param repo_id: Repository ID + :param path: Absolute path of the Repostiory + """ + self.logger.info('Running `scc`....') + self.logger.info(f'Repo ID: {repo_id}, Path: {path}') + + output = subprocess.check_output([self.config['scc_bin'], '-f', 'json', path]) + records = json.loads(output.decode('utf8')) + + for record in records: + for file in record['Files']: + repo_labor = { + 'repo_id': repo_id, + 'rl_analysis_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'), + 'programming_language': file['Language'], + 'file_path': file['Location'], + 'file_name': file['Filename'], + 'total_lines': file['Lines'], + 'code_lines': file['Code'], + 'comment_lines': file['Comment'], + 'blank_lines': file['Blank'], + 'code_complexity': file['Complexity'], + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source, + 'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') + } + + result = self.db.execute(self.repo_labor_table.insert().values(repo_labor)) + self.logger.info(f"Added Repo Labor Data: {result.inserted_primary_key}") diff --git a/workers/value_worker/value_worker/__init__.py b/workers/value_worker/value_worker/__init__.py deleted file mode 100644 --- a/workers/value_worker/value_worker/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""value_worker - Augur Worker that collects value data""" - -__tool_source__ = 'Value Worker' -__tool_version__ = '0.1.0' -__data_source__ = 'SCC' - -__author__ = 'Augur Team <[email protected]>' -__all__ = [] - diff --git a/workers/value_worker/value_worker/runtime.py b/workers/value_worker/value_worker/runtime.py deleted file mode 100644 --- a/workers/value_worker/value_worker/runtime.py +++ /dev/null @@ -1,122 +0,0 @@ -import json -import logging -import os -import subprocess -import sys - -import click -import requests -from flask import Flask, Response, jsonify, request - -from value_worker.worker import ValueWorker - -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - # POST a task to be added to the queue - if request.method == 'POST': - logging.info("Sending to work on task: {}".format(str(request.json))) - app.value_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.value_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51239, help='Port') [email protected]('--scc-bin', default=f'{os.environ["HOME"]}/go/bin/scc', help='scc binary') -def main(augur_url, host, port, scc_bin): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'value_worker', None, - { - "port": 37300, - "scc_bin": "/home/sean/go/bin/scc" - }) - - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.value_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - 'scc_bin': worker_info['scc_bin'], - 'repo_directory': read_config('Workers', 'facade_worker', None, None)['repo_directory'], - } - - # Create the worker that will be running on this server with specified config - app.value_worker = ValueWorker(config) - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - - app.run(debug=app.debug, host=host, port=worker_port) - if app.value_worker._child is not None: - app.value_worker._child.terminate() - try: - requests.post(f'http://{server["host"]}:{server["port"]}/api/unstable/workers/remove', json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/value_worker/value_worker/worker.py b/workers/value_worker/value_worker/worker.py deleted file mode 100644 --- a/workers/value_worker/value_worker/worker.py +++ /dev/null @@ -1,267 +0,0 @@ -import os, subprocess -from datetime import datetime -import logging -import requests -import json -from urllib.parse import quote -from multiprocessing import Process, Queue - -from value_worker import __data_source__, __tool_source__, __tool_version__ -import pandas as pd -import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData -from workers.standard_methods import register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class ValueWorker: - def __init__(self, config, task=None): - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.config = config - - self.db = None - self.value_table = None - - self._task = task - self._queue = Queue() - self._child = None - - self.history_id = None - self.finishing_task = False - self.working_on = None - self.results_counter = 0 - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["value"] - } - ], - "config": [self.config] - } - - self._db_str = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], - self.config['password'], - self.config['host'], - self.config['port'], - self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self._db_str, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self._db_str, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - logging.info("Database connection established...") - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['repo_labor']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.repo_labor_table = Base.classes.repo_labor.__table__ - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - logging.info("ORM setup complete...") - - # Organize different api keys/oauths available - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauth_sql = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(0)) - - for oauth in [{'oauth_id': 0, 'access_token': 0}] + json.loads(pd.read_sql(oauth_sql, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) \ - - datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - - # Send broker hello message - connect_to_broker(self) - logging.info("Connected to the broker...\n") - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "key": "", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['github_api_key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - - self._task = value - self.run() - - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def value_model(self, entry_info, repo_id): - """ Data collection and storage method - """ - logging.info(entry_info) - logging.info(repo_id) - - repo_path_sql = s.sql.text(""" - SELECT repo_id, CONCAT(repo_group_id || chr(47) || repo_path || repo_name) AS path - FROM repo - WHERE repo_id = :repo_id - """) - - relative_repo_path = self.db.execute(repo_path_sql, {'repo_id': repo_id}).fetchone()[1] - absolute_repo_path = self.config['repo_directory'] + relative_repo_path - - try: - self.generate_value_data(repo_id, absolute_repo_path) - except Exception as e: - logging.error(e) - - register_task_completion(self, entry_info, repo_id, "value") - - def generate_value_data(self, repo_id, path): - """Runs scc on repo and stores data in database - - :param repo_id: Repository ID - :param path: Absolute path of the Repostiory - """ - logging.info('Running `scc`....') - logging.info(f'Repo ID: {repo_id}, Path: {path}') - - output = subprocess.check_output([self.config['scc_bin'], '-f', 'json', path]) - records = json.loads(output.decode('utf8')) - - for record in records: - for file in record['Files']: - repo_labor = { - 'repo_id': repo_id, - 'rl_analysis_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'), - 'programming_language': file['Language'], - 'file_path': file['Location'], - 'file_name': file['Filename'], - 'total_lines': file['Lines'], - 'code_lines': file['Code'], - 'comment_lines': file['Comment'], - 'blank_lines': file['Blank'], - 'code_complexity': file['Complexity'], - 'tool_source': __tool_source__, - 'tool_version': __tool_version__, - 'data_source': __data_source__, - 'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') - } - - result = self.db.execute(self.repo_labor_table.insert().values(repo_labor)) - logging.info(f"Added Repo Labor Data: {result.inserted_primary_key}") - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'value': - self.value_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() diff --git a/workers/worker_base.py b/workers/worker_base.py --- a/workers/worker_base.py +++ b/workers/worker_base.py @@ -1,47 +1,92 @@ """ Helper methods constant across all workers """ -import requests, datetime, time, traceback, json, os, sys, math +import requests, datetime, time, traceback, json, os, sys, math, logging +from logging import FileHandler, Formatter, StreamHandler from multiprocessing import Process, Queue import sqlalchemy as s import pandas as pd -import os -import sys, logging -from urllib.parse import urlparse -from workers.util import read_config +from pathlib import Path +from urllib.parse import urlparse, quote from sqlalchemy import MetaData from sqlalchemy.ext.automap import automap_base +from augur.config import AugurConfig +from augur.logging import AugurLogging class Worker(): - def __init__(self, config={}, given=[], models=[], data_tables=[], operations_tables=[]): + ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + def __init__(self, worker_type, config={}, given=[], models=[], data_tables=[], operations_tables=[], platform="github"): + + self.worker_type = worker_type self._task = None # task currently being worked on (dict) self._child = None # process of currently running task (multiprocessing process) self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes) + self.data_tables = data_tables + self.operations_tables = operations_tables + self._root_augur_dir = Worker.ROOT_AUGUR_DIR + self.platform = platform # count of tuples inserted in the database (to store stats for each task in op tables) self.results_counter = 0 - # if we are finishing a previous task, certain operations work differenty + # if we are finishing a previous task, certain operations work differently self.finishing_task = False - # Update config with options that are general and not specific to any worker - self.config = config + self.augur_config = AugurConfig(self._root_augur_dir) + + self.config = { + 'worker_type': self.worker_type, + 'host': self.augur_config.get_value("Server", "host"), + 'gh_api_key': self.augur_config.get_value('Database', 'key'), + 'offline_mode': False + } + self.config.update(self.augur_config.get_section("Logging")) + + try: + worker_defaults = self.augur_config.get_default_config()['Workers'][self.config['worker_type']] + self.config.update(worker_defaults) + except KeyError as e: + logging.warn('Could not get default configuration for {}'.format(self.config['worker_type'])) + + worker_info = self.augur_config.get_value('Workers', self.config['worker_type']) + self.config.update(worker_info) + + worker_port = self.config['port'] + while True: + try: + r = requests.get('http://{}:{}/AUGWOP/heartbeat'.format( + self.config['host'], worker_port)).json() + if 'status' in r: + if r['status'] == 'alive': + worker_port += 1 + except: + break + self.config.update({ - 'port_broker': read_config('Server', 'port', 'AUGUR_PORT', 5000), - 'host_broker': read_config('Server', 'host', 'AUGUR_HOST', '0.0.0.0'), - 'host_database': read_config('Database', 'host', 'AUGUR_DB_HOST', 'host'), - 'port_database': read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - 'user_database': read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - 'name_database': read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - 'password_database': read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password') + "port": worker_port, + "id": "workers.{}.{}".format(self.worker_type, worker_port), + "capture_output": False, + 'location': 'http://{}:{}'.format(self.config["host"], worker_port), + 'port_broker': self.augur_config.get_value('Server', 'port'), + 'host_broker': self.augur_config.get_value('Server', 'host'), + 'host_database': self.augur_config.get_value('Database', 'host'), + 'port_database': self.augur_config.get_value('Database', 'port'), + 'user_database': self.augur_config.get_value('Database', 'user'), + 'name_database': self.augur_config.get_value('Database', 'name'), + 'password_database': self.augur_config.get_value('Database', 'password') }) + self.config.update(config) + + # Initialize logging in the main process + self.initialize_logging() + + # Clear log contents from previous runs + open(self.config["server_logfile"], "w").close() + open(self.config["collection_logfile"], "w").close() - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format( - self.config['id'].split('.')[len(self.config['id'].split('.')) - 1] - ), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) + # Get configured collection logger + self.logger = logging.getLogger(self.config["id"]) + self.logger.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) self.given = given self.models = models @@ -56,28 +101,100 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta ], 'config': self.config } - + + # Send broker hello message + if self.config["offline_mode"] is False: + self.connect_to_broker() + + try: + self.tool_source + self.tool_version + self.data_source + except: + self.tool_source = 'Augur Worker Testing' + self.tool_version = '0.0.0' + self.data_source = 'Augur Worker Testing' + + def __repr__(self): + return f"{self.config['id']}" + + def initialize_logging(self): + self.config["log_level"] = self.config["log_level"].upper() + if self.config["debug"]: + self.config["log_level"] = "DEBUG" + + if self.config["verbose"]: + format_string = AugurLogging.verbose_format_string + else: + format_string = AugurLogging.simple_format_string + + formatter = Formatter(fmt=format_string) + error_formatter = Formatter(fmt=AugurLogging.error_format_string) + + worker_dir = AugurLogging.get_log_directories(self.augur_config, reset_logfiles=False) + "/workers/" + Path(worker_dir).mkdir(exist_ok=True) + logfile_dir = worker_dir + f"/{self.worker_type}/" + Path(logfile_dir).mkdir(exist_ok=True) + + server_logfile = logfile_dir + '{}_{}_server.log'.format(self.worker_type, self.config["port"]) + collection_logfile = logfile_dir + '{}_{}_collection.log'.format(self.worker_type, self.config["port"]) + collection_errorfile = logfile_dir + '{}_{}_collection.err'.format(self.worker_type, self.config["port"]) + self.config.update({ + "logfile_dir": logfile_dir, + "server_logfile": server_logfile, + "collection_logfile": collection_logfile, + "collection_errorfile": collection_errorfile + }) + + collection_file_handler = FileHandler(filename=self.config["collection_logfile"], mode="a") + collection_file_handler.setFormatter(formatter) + collection_file_handler.setLevel(self.config["log_level"]) + + collection_errorfile_handler = FileHandler(filename=self.config["collection_errorfile"], mode="a") + collection_errorfile_handler.setFormatter(error_formatter) + collection_errorfile_handler.setLevel(logging.WARNING) + + logger = logging.getLogger(self.config["id"]) + logger.handlers = [] + logger.addHandler(collection_file_handler) + logger.addHandler(collection_errorfile_handler) + logger.setLevel(self.config["log_level"]) + logger.propagate = False + + if self.config["debug"]: + self.config["log_level"] = "DEBUG" + console_handler = StreamHandler() + console_handler.setFormatter(formatter) + console_handler.setLevel(self.config["log_level"]) + logger.addHandler(console_handler) + + if self.config["quiet"]: + logger.disabled = True + + self.logger = logger + + def initialize_database_connections(self): DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( self.config['user_database'], self.config['password_database'], self.config['host_database'], self.config['port_database'], self.config['name_database'] ) # Create an sqlalchemy engine for both database schemas - logging.info("Making database connections... {}".format(DB_STR)) + self.logger.info("Making database connections") db_schema = 'augur_data' - self.db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, + self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(db_schema)}) helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, + self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(helper_schema)}) metadata = MetaData() helper_metadata = MetaData() # Reflect only the tables we will use for each schema's metadata object - metadata.reflect(self.db, only=data_tables) - helper_metadata.reflect(self.helper_db, only=operations_tables) + metadata.reflect(self.db, only=self.data_tables) + helper_metadata.reflect(self.helper_db, only=self.operations_tables) Base = automap_base(metadata=metadata) HelperBase = automap_base(metadata=helper_metadata) @@ -86,28 +203,27 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta HelperBase.prepare() # So we can access all our tables when inserting, updating, etc - for table in data_tables: + for table in self.data_tables: setattr(self, '{}_table'.format(table), Base.classes[table].__table__) try: - logging.info(HelperBase.classes.keys()) + self.logger.info(HelperBase.classes.keys()) except: pass - for table in operations_tables: + for table in self.operations_tables: try: setattr(self, '{}_table'.format(table), HelperBase.classes[table].__table__) except Exception as e: - logging.info("Error setting attribute for table: {} : {}".format(table, e)) + self.logger.error("Error setting attribute for table: {} : {}".format(table, e)) # Increment so we are ready to insert the 'next one' of each of these most recent ids self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 # Organize different api keys/oauths available - if 'gh_api_key' in self.config: - self.init_oauths() - - # Send broker hello message - self.connect_to_broker() + if 'gh_api_key' in self.config or 'gitlab_api_key' in self.config: + self.init_oauths(self.platform) + else: + self.oauths = [{'oauth_id': 0}] @property def task(self): @@ -128,7 +244,7 @@ def task(self, value): # This setting is set by the housekeeper and is attached to the task before it gets sent here if 'focused_task' in value: if value['focused_task'] == 1: - logging.info("Focused task is ON\n") + self.logger.debug("Focused task is ON\n") self.finishing_task = True self._task = value @@ -143,21 +259,23 @@ def run(self): """ Kicks off the processing of the queue if it is not already being processed Gets run whenever a new task is added """ - logging.info("Running...\n") # Spawn a subprocess to handle message reading and performing the tasks self._child = Process(target=self.collect, args=()) self._child.start() - + def collect(self): """ Function to process each entry in the worker's task queue Determines what action to take based off the message type """ + self.initialize_logging() # need to initialize logging again in child process cause multiprocessing + self.logger.info("Starting data collection process\n") + self.initialize_database_connections() while True: if not self._queue.empty(): message = self._queue.get() # Get the task off our MP queue else: break - logging.info("Popped off message: {}\n".format(str(message))) + self.logger.info("Popped off message: {}\n".format(str(message))) if message['job_type'] == 'STOP': break @@ -172,13 +290,13 @@ def collect(self): SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' """.format(message['given'][self.given[0][0]])) repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - + self.logger.info("repo_id for which data collection is being initiated: {}".format(str(repo_id))) # Call method corresponding to model sent in task try: model_method = getattr(self, '{}_model'.format(message['models'][0])) self.record_model_process(repo_id, 'repo_info') except Exception as e: - logging.info('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) + + self.logger.error('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) + 'must have name of {}_model'.format(message['models'][0])) self.register_task_failure(message, repo_id, e) break @@ -186,18 +304,53 @@ def collect(self): # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught # and worker can move onto the next task without stopping try: + self.logger.info("Calling model method {}_models".format(message['models'][0])) model_method(message, repo_id) - except Exception as e: + except Exception as e: # this could be a custom exception, might make things easier self.register_task_failure(message, repo_id, e) - pass + break + + self.logger.debug('Closing database connections\n') + self.db.dispose() + self.helper_db.dispose() + self.logger.info("Collection process finished") def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}): - """ map objects => { *our db col* : *gh json key*} """ + """ Include an extra key-value pair on each element of new_data that represents + the action that should be taken with this element (i.e. 'need_insertion') + + :param new_data: List of dictionaries, data to be assigned an action to + :param table_values: Pandas DataFrame, existing data in the database to check + what action should be taken on the new_data depending on the presence of + each element in this DataFrame + :param update_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + updates (if source data value != value in existing database row, then an + update is needed). Key is source data column name, value is database field name. + Example: {'id': 'gh_issue_id'} + :param duplicate_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + duplicates (if source data value == value in existing database row, then this + element is a duplicate and would not need an insertion). Key is source data + column name, value is database field name. Example: {'id': 'gh_issue_id'} + :param table_pkey: String, the field name of the primary key of the table in + the database that we are checking the table_values for. + :param value_update_col_map: Dictionary, sometimes we add a new field to a table, + and we want to trigger an update of that row in the database even if all of the + data values are the same and would not need an update ordinarily. Checking for + a specific existing value in the database field allows us to do this. The key is the + name of the field in the database we are checking for a specific value to trigger + an update, the value is the value we are checking for equality to trigger an update. + Example: {'cntrb_id': None} + :return: List of dictionaries, contains all the same elements of new_data, except + each element now has an extra key-value pair with the key being 'flag', and + the value being 'need_insertion', 'need_update', or 'none' + """ need_insertion_count = 0 need_update_count = 0 for i, obj in enumerate(new_data): if type(obj) != dict: - logging.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) + self.logger.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) continue obj['flag'] = 'none' # default of no action needed @@ -206,31 +359,37 @@ def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_ if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any(): continue - logging.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) + self.logger.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) obj['flag'] = 'need_insertion' need_insertion_count += 1 break if obj['flag'] == 'need_insertion': - logging.info('Already determined that current tuple needs insertion, skipping checking updates. ' + self.logger.info('Already determined that current tuple needs insertion, skipping checking updates. ' 'Moving to next tuple.\n') continue - existing_tuple = table_values[table_values[db_dupe_key].isin( + try: + existing_tuple = table_values[table_values[db_dupe_key].isin( [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0] + except Exception as e: + self.logger.info('Special case assign_tuple_action error') + self.logger.info(f'Error: {e}') + self.logger.info(f'Related vars: {table_values}, ' + + f'{table_values[db_dupe_key].isin([obj[duplicate_col_map[db_dupe_key]]])}') # If we need to check the values of the existing tuple to determine if an update is needed for augur_col, value_check in value_update_col_map.items(): not_nan_check = not (math.isnan(value_check) and math.isnan(existing_tuple[augur_col])) if value_check is not None else True if existing_tuple[augur_col] != value_check and not_nan_check: continue - logging.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) + self.logger.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) obj['flag'] = 'need_update' obj['pkey'] = existing_tuple[table_pkey] need_update_count += 1 if obj['flag'] == 'need_update': - logging.info('Already determined that current tuple needs update, skipping checking further updates. ' + self.logger.info('Already determined that current tuple needs update, skipping checking further updates. ' 'Moving to next tuple.\n') continue @@ -240,25 +399,34 @@ def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_ continue if obj[update_col_map[col]] == existing_tuple[col]: continue - logging.info("Found a tuple that needs an update for column: {}\n".format(col)) + self.logger.info("Found a tuple that needs an update for column: {}\n".format(col)) obj['flag'] = 'need_update' obj['pkey'] = existing_tuple[table_pkey] need_update_count += 1 - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + + self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + "was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count)) return new_data - def check_duplicates(new_data, table_values, key): + def check_duplicates(self, new_data, table_values, key): + """ Filters what items of the new_data json (list of dictionaries) that are not + present in the table_values df + + :param new_data: List of dictionaries, new data to filter duplicates out of + :param table_values: Pandas DataFrame, existing data to check what data is already + present in the database + :param key: String, key of each dict in new_data whose value we are checking + duplicates with + :return: List of dictionaries, contains elements of new_data that are not already + present in the database + """ need_insertion = [] for obj in new_data: - if type(obj) == dict: - if not table_values.isin([obj[key]]).any().any(): - need_insertion.append(obj) - # else: - # logging.info("Tuple with github's {} key value already".format(key) + - # "exists in our db: {}\n".format(str(obj[key]))) - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + + if type(obj) != dict: + continue + if not table_values.isin([obj[key]]).any().any(): + need_insertion.append(obj) + self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + "was reduced to {} tuples.\n".format(str(len(need_insertion)))) return need_insertion @@ -266,16 +434,16 @@ def connect_to_broker(self): connected = False for i in range(5): try: - logging.info("attempt {}\n".format(i)) + self.logger.debug("Connecting to broker, attempt {}\n".format(i)) if i > 0: time.sleep(10) requests.post('http://{}:{}/api/unstable/workers'.format( self.config['host_broker'],self.config['port_broker']), json=self.specs) - logging.info("Connection to the broker was successful\n") + self.logger.info("Connection to the broker was successful\n") connected = True break except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker. Trying again...\n') + self.logger.error('Cannot connect to the broker. Trying again...\n') if not connected: sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n') @@ -290,23 +458,39 @@ def dump_queue(queue): # time.sleep(.1) return result - def find_id_from_login(self, login): + def find_id_from_login(self, login, platform='github'): + """ + Retrieves our contributor table primary key value for the contributor with + the given GitHub login credentials, if this contributor is not there, then + they get inserted. + + :param login: String, the GitHub login username to find the primary key id for + :return: Integer, the id of the row in our database with the matching GitHub login + """ idSQL = s.sql.text(""" - SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' - """.format(login)) + SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' \ + AND LOWER(data_source) = '{} api' + """.format(login, platform)) + + self.logger.info(idSQL) + rs = pd.read_sql(idSQL, self.db, params={}) data_list = [list(row) for row in rs.itertuples(index=False)] try: return data_list[0][0] except: - logging.info("contributor needs to be added...") + self.logger.info('contributor needs to be added...') - cntrb_url = ("https://api.github.com/users/" + login) - logging.info("Hitting endpoint: {} ...\n".format(cntrb_url)) + if platform == 'github': + cntrb_url = ("https://api.github.com/users/" + login) + elif platform == 'gitlab': + cntrb_url = ("https://gitlab.com/api/v4/users?username=" + login ) + self.logger.info("Hitting endpoint: {} ...\n".format(cntrb_url)) r = requests.get(url=cntrb_url, headers=self.headers) - self.update_gh_rate_limit(r) + self.update_rate_limit(r) contributor = r.json() + company = None location = None email = None @@ -317,46 +501,83 @@ def find_id_from_login(self, login): if 'email' in contributor: email = contributor['email'] - cntrb = { - "cntrb_login": contributor['login'] if 'login' in contributor else None, - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, - "cntrb_canonical": None, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } + if platform == 'github': + cntrb = { + "cntrb_login": contributor['login'] if 'login' in contributor else None, + "cntrb_email": email, + "cntrb_company": company, + "cntrb_location": location, + "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, + "cntrb_canonical": None, + "gh_user_id": contributor['id'], + "gh_login": contributor['login'], + "gh_url": contributor['url'], + "gh_html_url": contributor['html_url'], + "gh_node_id": contributor['node_id'], + "gh_avatar_url": contributor['avatar_url'], + "gh_gravatar_id": contributor['gravatar_id'], + "gh_followers_url": contributor['followers_url'], + "gh_following_url": contributor['following_url'], + "gh_gists_url": contributor['gists_url'], + "gh_starred_url": contributor['starred_url'], + "gh_subscriptions_url": contributor['subscriptions_url'], + "gh_organizations_url": contributor['organizations_url'], + "gh_repos_url": contributor['repos_url'], + "gh_events_url": contributor['events_url'], + "gh_received_events_url": contributor['received_events_url'], + "gh_type": contributor['type'], + "gh_site_admin": contributor['site_admin'], + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + + elif platform == 'gitlab': + cntrb = { + "cntrb_login": contributor[0]['username'] if 'username' in contributor[0] else None, + "cntrb_email": email, + "cntrb_company": company, + "cntrb_location": location, + "cntrb_created_at": contributor[0]['created_at'] if 'created_at' in contributor[0] else None, + "cntrb_canonical": None, + "gh_user_id": contributor[0]['id'], + "gh_login": contributor[0]['username'], + "gh_url": contributor[0]['web_url'], + "gh_html_url": None, + "gh_node_id": None, + "gh_avatar_url": contributor[0]['avatar_url'], + "gh_gravatar_id": None, + "gh_followers_url": None, + "gh_following_url": None, + "gh_gists_url": None, + "gh_starred_url": None, + "gh_subscriptions_url": None, + "gh_organizations_url": None, + "gh_repos_url": None, + "gh_events_url": None, + "gh_received_events_url": None, + "gh_type": None, + "gh_site_admin": None, + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted contributor: " + contributor['login'] + "\n") + self.logger.info("Inserted contributor: " + contributor[0]['username'] + "\n") - return self.find_id_from_login(login) + return self.find_id_from_login(login, platform) - def get_owner_repo(self, github_url): - split = github_url.split('/') + def get_owner_repo(self, git_url): + """ Gets the owner and repository names of a repository from a git url + + :param git_url: String, the git url of a repository + :return: Tuple, includes the owner and repository names in that order + """ + split = git_url.split('/') owner = split[-2] repo = split[-1] @@ -367,6 +588,19 @@ def get_owner_repo(self, github_url): return owner, repo def get_max_id(self, table, column, default=25150, operations_table=False): + """ Gets the max value (usually used for id/pk's) of any Integer column + of any table + + :param table: String, the table that consists of the column you want to + query a max value for + :param column: String, the column that you want to query the max value for + :param default: Integer, if there are no values in the + specified column, the value of this parameter will be returned + :param operations_table: Boolean, if True, this signifies that the table/column + that is wanted to be queried is in the augur_operations schema rather than + the augur_data schema. Default False + :return: Integer, the max value of the specified column/table + """ maxIdSQL = s.sql.text(""" SELECT max({0}.{1}) AS {1} FROM {0} @@ -375,14 +609,24 @@ def get_max_id(self, table, column, default=25150, operations_table=False): rs = pd.read_sql(maxIdSQL, db, params={}) if rs.iloc[0][column] is not None: max_id = int(rs.iloc[0][column]) + 1 - logging.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) + self.logger.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) else: max_id = default - logging.info("Could not find max id for {} column in the {} table... using default set to: \ - {}\n".format(column, table, max_id)) + self.logger.warning('Could not find max id for {} column in the {} table... ' + + 'using default set to: {}\n'.format(column, table, max_id)) return max_id def get_table_values(self, cols, tables, where_clause=""): + """ Can query all values of any column(s) from any table(s) + with an optional where clause + + :param cols: List of Strings, column(s) that user wants to query + :param tables: List of Strings, table(s) that user wants to query + :param where_clause: String, optional where clause to filter the values + queried + :return: Pandas DataFrame, contains all values queried in the columns, tables, and + optional where clause provided + """ table_str = tables[0] del tables[0] @@ -394,46 +638,103 @@ def get_table_values(self, cols, tables, where_clause=""): for col in cols: col_str += ", " + col - tableValuesSQL = s.sql.text(""" + table_values_sql = s.sql.text(""" SELECT {} FROM {} {} """.format(col_str, table_str, where_clause)) - logging.info("Getting table values with the following PSQL query: \n{}\n".format(tableValuesSQL)) - values = pd.read_sql(tableValuesSQL, self.db, params={}) + self.logger.info('Getting table values with the following PSQL query: \n{}\n'.format( + table_values_sql)) + values = pd.read_sql(table_values_sql, self.db, params={}) return values - def init_oauths(self): + def init_oauths(self, platform="github"): self.oauths = [] self.headers = None - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - # Make a list of api key in the config combined w keys stored in the database - oauthSQL = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(self.config['gh_api_key'])) - for oauth in [{'oauth_id': 0, 'access_token': self.config['gh_api_key']}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}\n".format(oauth)) + # Select endpoint to hit solely to retrieve rate limit information from headers of the response + # Adjust header keys needed to fetch rate limit information from the API responses + if platform == "github": + url = "https://api.github.com/users/gabe-heim" + oauthSQL = s.sql.text(""" + SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'github' + """.format(self.config['gh_api_key'])) + key_name = "gh_api_key" + rate_limit_header_key = "X-RateLimit-Remaining" + rate_limit_reset_header_key = "X-RateLimit-Reset" + elif platform == "gitlab": + url = "https://gitlab.com/api/v4/version" + oauthSQL = s.sql.text(""" + SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'gitlab' + """.format(self.config['gitlab_api_key'])) + key_name = "gitlab_api_key" + rate_limit_header_key = "ratelimit-remaining" + rate_limit_reset_header_key = "ratelimit-reset" + + for oauth in [{'oauth_id': 0, 'access_token': self.config[key_name]}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): + if platform == "github": + self.headers = {'Authorization': 'token %s' % oauth['access_token']} + elif platform == "gitlab": + self.headers = {'Authorization': 'Bearer %s' % oauth['access_token']} + self.logger.info("Getting rate limit info for oauth: {}\n".format(oauth)) response = requests.get(url=url, headers=self.headers) self.oauths.append({ 'oauth_id': oauth['oauth_id'], 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() + 'rate_limit': int(response.headers[rate_limit_header_key]), + 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers[rate_limit_reset_header_key])) - datetime.datetime.now()).total_seconds() }) - logging.info("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) + self.logger.debug("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") + self.logger.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") # First key to be used will be the one specified in the config (first element in # self.oauths array will always be the key in use) + if platform == "github": + self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} + elif platform == "gitlab": + self.headers = {'Authorization': 'Bearer %s' % self.oauths[0]['access_token']} + self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} + self.logger.info("OAuth initialized") + + def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}, platform="github"): + """ Paginate either backwards or forwards (depending on the value of the worker's + finishing_task attribute) through all the GitHub or GitLab api endpoint pages. + + :param url: String, the url of the API endpoint we are paginating through, expects + a curly brace string formatter within the string to format the Integer + representing the page number that is wanted to be returned + :param duplicate_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + duplicates (if source data value == value in existing database row, then this + element is a duplicate and would not need an insertion). Key is source data + column name, value is database field name. Example: {'id': 'gh_issue_id'} + :param update_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + updates (if source data value != value in existing database row, then an + update is needed). Key is source data column name, value is database field name. + Example: {'id': 'gh_issue_id'} + :param table: String, the name of the table that holds the values to check for + duplicates/updates against + :param table_pkey: String, the field name of the primary key of the table in + the database that we are getting the values for to cross-reference to check + for duplicates. + :param where_clause: String, optional where clause to filter the values + that are queried when preparing the values that will be cross-referenced + for duplicates/updates + :param value_update_col_map: Dictionary, sometimes we add a new field to a table, + and we want to trigger an update of that row in the database even if all of the + data values are the same and would not need an update ordinarily. Checking for + a specific existing value in the database field allows us to do this. The key is the + name of the field in the database we are checking for a specific value to trigger + an update, the value is the value we are checking for equality to trigger an update. + Example: {'cntrb_id': None} + :return: List of dictionaries, all data points from the pages of the specified API endpoint + each with a 'flag' key-value pair representing the required action to take with that + data point (i.e. 'need_insertion', 'need_update', 'none') + """ - def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}): - # Paginate backwards through all the tuples but get first page in order - # to determine if there are multiple pages and if the 1st page covers all update_keys = list(update_col_map.keys()) if update_col_map else [] update_keys += list(value_update_col_map.keys()) if value_update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] @@ -446,10 +747,18 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh num_attempts = 0 success = False while num_attempts < 3: - logging.info("Hitting endpoint: " + url.format(i) + " ...\n") + self.logger.info(f'Hitting endpoint: {url.format(i)}...\n') r = requests.get(url=url.format(i), headers=self.headers) - self.update_gh_rate_limit(r) - logging.info("Analyzing page {} of {}\n".format(i, int(r.links['last']['url'][-6:].split('=')[1]) + 1 if 'last' in r.links else '*last page not known*')) + + self.update_rate_limit(r, platform=platform) + if 'last' not in r.links: + last_page = None + else: + if platform == "github": + last_page = r.links['last']['url'][-6:].split('=')[1] + elif platform == "gitlab": + last_page = r.links['last']['url'].split('&')[2].split("=")[1] + self.logger.info("Analyzing page {} of {}\n".format(i, int(last_page) + 1 if last_page is not None else '*last page not known*')) try: j = r.json() @@ -460,21 +769,23 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh success = True break elif type(j) == dict: - logging.info("Request returned a dict: {}\n".format(j)) + self.logger.info("Request returned a dict: {}\n".format(j)) if j['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + self.logger.warning("Github repo was not found or does not exist for endpoint: {}\n".format(url)) break if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': num_attempts -= 1 - self.update_gh_rate_limit(r, temporarily_disable=True) + self.logger.info("rate limit update code goes here") + self.update_rate_limit(r, temporarily_disable=True,platform=platform) if j['message'] == 'Bad credentials': - self.update_gh_rate_limit(r, bad_credentials=True) + self.logger.info("rate limit update code goes here") + self.update_rate_limit(r, bad_credentials=True, platform=platform) elif type(j) == str: - logging.info("J was string: {}\n".format(j)) + self.logger.info(f'J was string: {j}\n') if '<!DOCTYPE html>' in j: - logging.info("HTML was returned, trying again...\n") + self.logger.info('HTML was returned, trying again...\n') elif len(j) == 0: - logging.info("Empty string, trying again...\n") + self.logger.warning('Empty string, trying again...\n') else: try: j = json.loads(j) @@ -488,44 +799,52 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh # Find last page so we can decrement from there if 'last' in r.links and not multiple_pages and not self.finishing_task: - param = r.links['last']['url'][-6:] - i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") + if platform == "github": + param = r.links['last']['url'][-6:] + i = int(param.split('=')[1]) + 1 + elif platform == "gitlab": + i = int(r.links['last']['url'].split('&')[2].split("=")[1]) + 1 + self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n") multiple_pages = True elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") + self.logger.info("Only 1 page of request\n") elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ..." + self.logger.info("Finishing a previous task, paginating forwards ..." " excess rate limit requests will be made\n") - + if len(j) == 0: - logging.info("Response was empty, breaking from pagination.\n") + self.logger.info("Response was empty, breaking from pagination.\n") break - + # Checking contents of requests with what we already have in the db j = self.assign_tuple_action(j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map) if not j: - logging.info("Assigning tuple action failed, moving to next page.\n") + self.logger.error("Assigning tuple action failed, moving to next page.\n") i = i + 1 if self.finishing_task else i - 1 continue try: - to_add = [obj for obj in j if obj not in tuples and obj['flag'] != 'none'] + to_add = [obj for obj in j if obj not in tuples and (obj['flag'] != 'none')] except Exception as e: - logging.info("Failure accessing data of page: {}. Moving to next page.\n".format(e)) + self.logger.error("Failure accessing data of page: {}. Moving to next page.\n".format(e)) i = i + 1 if self.finishing_task else i - 1 continue if len(to_add) == 0 and multiple_pages and 'last' in r.links: - logging.info("{}".format(r.links['last'])) - if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown tuples, breaking from pagination.\n") + self.logger.info("{}".format(r.links['last'])) + if platform == "github": + page_number = int(r.links['last']['url'][-6:].split('=')[1]) + elif platform == "gitlab": + page_number = int(r.links['last']['url'].split('&')[2].split("=")[1]) + if i - 1 != page_number: + self.logger.info("No more pages with unknown tuples, breaking from pagination.\n") break + tuples += to_add i = i + 1 if self.finishing_task else i - 1 # Since we already wouldve checked the first page... break if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") + self.logger.info("No more pages to check, breaking from pagination.\n") break return tuples @@ -535,24 +854,16 @@ def query_github_contributors(self, entry_info, repo_id): """ Data collection function Query the GitHub API for contributors """ - logging.info("Querying contributors with given entry info: " + str(entry_info) + "\n") + self.logger.info(f'Querying contributors with given entry info: {entry_info}\n') github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url'] # Extract owner/repo from the url for the endpoint - path = urlparse(github_url) - split = path[2].split('/') - - owner = split[1] - name = split[2] - - # Handles git url case by removing the extension - if ".git" in name: - name = name[:-4] + owner, name = self.get_owner_repo(github_url) # Set the base of the url and place to hold contributors to insert - contributors_url = ("https://api.github.com/repos/" + owner + "/" + - name + "/contributors?per_page=100&page={}") + contributors_url = (f'https://api.github.com/repos/{owner}/{name}/' + + 'contributors?per_page=100&page={}') # Get contributors that we already have stored # Set our duplicate and update column map keys (something other than PK) to @@ -565,7 +876,7 @@ def query_github_contributors(self, entry_info, repo_id): #list to hold contributors needing insertion or update contributors = self.paginate(contributors_url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") + self.logger.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") for repo_contributor in contributors: try: @@ -573,7 +884,7 @@ def query_github_contributors(self, entry_info, repo_id): # `created at` # i think that's it cntrb_url = ("https://api.github.com/users/" + repo_contributor['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) self.update_gh_rate_limit(r) contributor = r.json() @@ -624,69 +935,118 @@ def query_github_contributors(self, entry_info, repo_id): if repo_contributor['flag'] == 'need_update': result = self.db.execute(self.contributors_table.update().where( self.worker_history_table.c.cntrb_email==email).values(cntrb)) - logging.info("Updated tuple in the contributors table with existing email: {}".format(email)) + self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email)) self.cntrb_id_inc = repo_contributor['pkey'] elif repo_contributor['flag'] == 'need_insertion': result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: " + contributor['login'] + "\n") + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") # Increment our global track of the cntrb id for the possibility of it being used as a FK self.cntrb_id_inc = int(result.inserted_primary_key[0]) except Exception as e: - logging.info("Caught exception: {}".format(e)) - logging.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) + self.logger.error("Caught exception: {}".format(e)) + self.logger.error("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) continue - def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): - """ - Read a variable in specified section of the config file, unless provided an environment variable + def query_gitlab_contribtutors(self, entry_info, repo_id): - :param section: location of given variable - :param name: name of variable - """ - config_file_path = os.getenv("AUGUR_CONFIG_FILE", config_file_path) - _config_file_name = 'augur.config.json' - _config_bad = False - _already_exported = {} - _runtime_location = 'runtime/' - _default_config = {} - _config_file = None + gitlab_url = entry_info['given']['gitlab_url'] if 'gitlab_url' in entry_info['given'] else entry_info['given']['git_url'] - try: - _config_file = open(config_file_path, 'r+') - except: - print('Couldn\'t open {}'.format(_config_file_name)) + self.logger.info("Querying contributors with given entry info: " + str(entry_info) + "\n") - # Load the config file - try: - config_text = _config_file.read() - _config = json.loads(config_text) - except json.decoder.JSONDecodeError as e: - if not _config_bad: - _using_config_file = False - print('{} could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: {}'.format(config_file_path, str(e))) - _config = _default_config - - value = None - if environment_variable is not None: - value = os.getenv(environment_variable) - if value is None: + path = urlparse(gitlab_url) + split = path[2].split('/') + + owner = split[1] + name = split[2] + + # Handles git url case by removing the extension + if ".git" in name: + name = name[:-4] + + url_encoded_format = quote(owner + '/' + name, safe='') + + table = 'contributors' + table_pkey = 'cntrb_id' + update_col_map = {'cntrb_email': 'email'} + duplicate_col_map = {'cntrb_login': 'email'} + + # list to hold contributors needing insertion or update + contributors = self.paginate("https://gitlab.com/api/v4/projects/" + url_encoded_format + "/repository/contributors?per_page=100&page={}", duplicate_col_map, update_col_map, table, table_pkey, platform='gitlab') + + for repo_contributor in contributors: try: - if name is not None: - value = _config[section][name] - else: - value = _config[section] - except Exception as e: - value = default - if not section in _config: - _config[section] = {} + cntrb_compressed_url = ("https://gitlab.com/api/v4/users?search=" + repo_contributor['email']) + self.logger.info("Hitting endpoint: " + cntrb_compressed_url + " ...\n") + r = requests.get(url=cntrb_compressed_url, headers=self.headers) + contributor_compressed = r.json() + + email = repo_contributor['email'] + if len(contributor_compressed) == 0 or "id" not in contributor_compressed[0]: + continue + + self.logger.info("Fetching for user: " + str(contributor_compressed[0]["id"])) + + cntrb_url = ("https://gitlab.com/api/v4/users/" + str(contributor_compressed[0]["id"])) + self.logger.info("Hitting end point to get complete contributor info now: " + cntrb_url + "...\n") + r = requests.get(url=cntrb_url, headers=self.headers) + contributor = r.json() - return value + cntrb = { + "cntrb_login": contributor.get('username', None), + "cntrb_created_at": contributor.get('created_at', None), + "cntrb_email": email, + "cntrb_company": contributor.get('organization', None), + "cntrb_location": contributor.get('location', None), + # "cntrb_type": , dont have a use for this as of now ... let it default to null + "cntrb_canonical": contributor.get('public_email', None), + "gh_user_id": contributor.get('id', None), + "gh_login": contributor.get('username', None), + "gh_url": contributor.get('web_url', None), + "gh_html_url": contributor.get('web_url', None), + "gh_node_id": None, + "gh_avatar_url": contributor.get('avatar_url', None), + "gh_gravatar_id": None, + "gh_followers_url": None, + "gh_following_url": None, + "gh_gists_url": None, + "gh_starred_url": None, + "gh_subscriptions_url": None, + "gh_organizations_url": None, + "gh_repos_url": None, + "gh_events_url": None, + "gh_received_events_url": None, + "gh_type": None, + "gh_site_admin": None, + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + + # Commit insertion to table + if repo_contributor['flag'] == 'need_update': + result = self.db.execute(self.contributors_table.update().where( + self.worker_history_table.c.cntrb_email == email).values(cntrb)) + self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email)) + self.cntrb_id_inc = repo_contributor['pkey'] + elif repo_contributor['flag'] == 'need_insertion': + result = self.db.execute(self.contributors_table.insert().values(cntrb)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.results_counter += 1 + self.logger.info("Inserted contributor: " + contributor['username'] + "\n") + + # Increment our global track of the cntrb id for the possibility of it being used as a FK + self.cntrb_id_inc = int(result.inserted_primary_key[0]) + + except Exception as e: + self.logger.info("Caught exception: {}".format(e)) + self.logger.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) + continue def record_model_process(self, repo_id, model): @@ -705,7 +1065,7 @@ def record_model_process(self, repo_id, model): self.history_id += 1 else: result = self.helper_db.execute(self.worker_history_table.insert().values(task_history)) - logging.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) + self.logger.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) self.history_id = int(result.inserted_primary_key[0]) def register_task_completion(self, task, repo_id, model): @@ -716,10 +1076,12 @@ def register_task_completion(self, task, repo_id, model): 'repo_id': repo_id, 'job_model': model } - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] if 'git_url' in task['given'] else "INVALID_GIVEN" + key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else \ + 'gitlab_url' if 'gitlab_url' in task['given'] else 'INVALID_GIVEN' + task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] \ + if 'git_url' in task['given'] else task['given']['gitlab_url'] if 'gitlab_url' in task['given'] else 'INVALID_GIVEN' if key == 'INVALID_GIVEN': - self.register_task_failure(task, repo_id, "INVALID_GIVEN: not github nor git url") + self.register_task_failure(task, repo_id, "INVALID_GIVEN: Not a github/gitlab/git url.") return # Add to history table @@ -735,7 +1097,7 @@ def register_task_completion(self, task, repo_id, model): self.helper_db.execute(self.worker_history_table.update().where( self.worker_history_table.c.history_id==self.history_id).values(task_history)) - logging.info("Recorded job completion for: " + str(task_completed) + "\n") + self.logger.info("Recorded job completion for: " + str(task_completed) + "\n") # Update job process table updated_job = { @@ -746,28 +1108,31 @@ def register_task_completion(self, task, repo_id, model): } self.helper_db.execute(self.worker_job_table.update().where( self.worker_job_table.c.job_model==model).values(updated_job)) - logging.info("Updated job process for model: " + model + "\n") + self.logger.info("Updated job process for model: " + model + "\n") - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") + if self.config["offline_mode"] is False: + + # Notify broker of completion + self.logger.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + + "This task inserted: " + str(self.results_counter) + " tuples.\n") - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['host_broker'],self.config['port_broker']), json=task_completed) + requests.post('http://{}:{}/api/unstable/completed_task'.format( + self.config['host_broker'],self.config['port_broker']), json=task_completed) # Reset results counter for next task self.results_counter = 0 def register_task_failure(self, task, repo_id, e): - logging.info("Worker ran into an error for task: {}\n".format(task)) - logging.info("Printing traceback...\n") + self.logger.error("Worker ran into an error for task: {}\n".format(task)) + self.logger.error("Printing traceback...\n") tb = traceback.format_exc() - logging.info(tb) + self.logger.error(tb) - logging.info(f'This task inserted {self.results_counter} tuples before failure.\n') - logging.info("Notifying broker and logging task failure in database...\n") - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" + self.logger.info(f'This task inserted {self.results_counter} tuples before failure.\n') + self.logger.info("Notifying broker and logging task failure in database...\n") + key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else \ + 'gitlab_url' if 'gitlab_url' in task['given'] else 'INVALID_GIVEN' url = task['given'][key] """ Query all repos with repo url of given task """ @@ -781,9 +1146,11 @@ def register_task_failure(self, task, repo_id, e): requests.post("http://{}:{}/api/unstable/task_error".format( self.config['host_broker'],self.config['port_broker']), json=task) except requests.exceptions.ConnectionError: - logging.error('Could not send task failure message to the broker\n') + self.logger.error('Could not send task failure message to the broker\n') + self.logger.error(e) except Exception: - logging.exception('An error occured while informing broker about task failure\n') + self.logger.error('An error occured while informing broker about task failure\n') + self.logger.error(e) # Add to history table task_history = { @@ -797,7 +1164,7 @@ def register_task_failure(self, task, repo_id, e): } self.helper_db.execute(self.worker_history_table.update().where(self.worker_history_table.c.history_id==self.history_id).values(task_history)) - logging.info("Recorded job error in the history table for: " + str(task) + "\n") + self.logger.error("Recorded job error in the history table for: " + str(task) + "\n") # Update job process table updated_job = { @@ -807,7 +1174,7 @@ def register_task_failure(self, task, repo_id, e): "analysis_state": 0 } self.helper_db.execute(self.worker_job_table.update().where(self.worker_job_table.c.job_model==task['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + task['models'][0] + "\n") + self.logger.info("Updated job process for model: " + task['models'][0] + "\n") # Reset results counter for next task self.results_counter = 0 @@ -830,35 +1197,97 @@ def retrieve_tuple(self, key_values, tables): SELECT * FROM {} WHERE {} """.format(table_str, where_str)) values = json.loads(pd.read_sql(retrieveTupleSQL, self.db, params={}).to_json(orient="records")) - return values + return values + + def update_gitlab_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): + # Try to get rate limit from request headers, sometimes it does not work (GH's issue) + # In that case we just decrement from last recieved header count + if bad_credentials and len(self.oauths) > 1: + self.logger.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) + del self.oauths[0] + + if temporarily_disable: + self.logger.info("Gitlab rate limit reached. Temp. disabling...\n") + self.oauths[0]['rate_limit'] = 0 + else: + try: + self.oauths[0]['rate_limit'] = int(response.headers['RateLimit-Remaining']) + self.logger.info("Recieved rate limit from headers\n") + except: + self.oauths[0]['rate_limit'] -= 1 + self.logger.info("Headers did not work, had to decrement\n") + self.logger.info("Updated rate limit, you have: " + + str(self.oauths[0]['rate_limit']) + " requests remaining.\n") + if self.oauths[0]['rate_limit'] <= 0: + try: + reset_time = response.headers['RateLimit-Reset'] + except Exception as e: + self.logger.info("Could not get reset time from headers because of error: {}".format(e)) + reset_time = 3600 + time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() + self.logger.info("Rate limit exceeded, checking for other available keys to use.\n") + + # We will be finding oauth with the highest rate limit left out of our list of oauths + new_oauth = self.oauths[0] + # Endpoint to hit solely to retrieve rate limit information from headers of the response + url = "https://gitlab.com/api/v4/version" + + other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] + for oauth in other_oauths: + self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) + self.headers = {"PRIVATE-TOKEN" : oauth['access_token']} + response = requests.get(url=url, headers=self.headers) + oauth['rate_limit'] = int(response.headers['RateLimit-Remaining']) + oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() + + # Update oauth to switch to if a higher limit is found + if oauth['rate_limit'] > new_oauth['rate_limit']: + self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth)) + new_oauth = oauth + elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: + self.logger.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) + new_oauth = oauth + + if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: + self.logger.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) + time.sleep(new_oauth['seconds_to_reset']) + + # Make new oauth the 0th element in self.oauths so we know which one is in use + index = self.oauths.index(new_oauth) + self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] + self.logger.info("Using oauth: {}\n".format(self.oauths[0])) + + # Change headers to be using the new oauth's key + self.headers = {"PRIVATE-TOKEN" : self.oauths[0]['access_token']} + def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): # Try to get rate limit from request headers, sometimes it does not work (GH's issue) # In that case we just decrement from last recieved header count if bad_credentials and len(self.oauths) > 1: - logging.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) + self.logger.warning("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) del self.oauths[0] if temporarily_disable: - logging.info("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") + self.logger.debug("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") self.oauths[0]['rate_limit'] = 0 else: try: self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - logging.info("Recieved rate limit from headers\n") + self.logger.info("Recieved rate limit from headers\n") except: self.oauths[0]['rate_limit'] -= 1 - logging.info("Headers did not work, had to decrement\n") - logging.info("Updated rate limit, you have: " + + self.logger.info("Headers did not work, had to decrement\n") + self.logger.info("Updated rate limit, you have: " + str(self.oauths[0]['rate_limit']) + " requests remaining.\n") if self.oauths[0]['rate_limit'] <= 0: try: reset_time = response.headers['X-RateLimit-Reset'] except Exception as e: - logging.info("Could not get reset time from headers because of error: {}".format(error)) + self.logger.error("Could not get reset time from headers because of error: {}".format(e)) reset_time = 3600 time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() - logging.info("Rate limit exceeded, checking for other available keys to use.\n") + self.logger.info("Rate limit exceeded, checking for other available keys to use.\n") # We will be finding oauth with the highest rate limit left out of our list of oauths new_oauth = self.oauths[0] @@ -867,7 +1296,7 @@ def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disa other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] for oauth in other_oauths: - logging.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) + self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) self.headers = {'Authorization': 'token %s' % oauth['access_token']} response = requests.get(url=url, headers=self.headers) oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) @@ -875,20 +1304,28 @@ def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disa # Update oauth to switch to if a higher limit is found if oauth['rate_limit'] > new_oauth['rate_limit']: - logging.info("Higher rate limit found in oauth: {}\n".format(oauth)) + self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth)) new_oauth = oauth elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: - logging.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) + self.logger.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) new_oauth = oauth if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: - logging.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) + self.logger.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) time.sleep(new_oauth['seconds_to_reset']) # Make new oauth the 0th element in self.oauths so we know which one is in use index = self.oauths.index(new_oauth) self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] - logging.info("Using oauth: {}\n".format(self.oauths[0])) + self.logger.info("Using oauth: {}\n".format(self.oauths[0])) # Change headers to be using the new oauth's key self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} + + def update_rate_limit(self, response, bad_credentials=False, temporarily_disable=False, platform="gitlab"): + if platform == 'gitlab': + return self.update_gitlab_rate_limit(response, bad_credentials=bad_credentials, + temporarily_disable=temporarily_disable) + elif platform == 'github': + return self.update_gh_rate_limit(response, bad_credentials=bad_credentials, + temporarily_disable=temporarily_disable) \ No newline at end of file
diff --git a/test/api/test_experimental_routes.py b/test/api/test_experimental_routes.py deleted file mode 100644 --- a/test/api/test_experimental_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_insight_routes.py b/test/api/test_insight_routes.py deleted file mode 100644 --- a/test/api/test_insight_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_message_routes.py b/test/api/test_message_routes.py deleted file mode 100644 --- a/test/api/test_message_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_platform_routes.py b/test/api/test_platform_routes.py deleted file mode 100644 --- a/test/api/test_platform_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_util_routes.py b/test/api/test_util_routes.py deleted file mode 100644 --- a/test/api/test_util_routes.py +++ /dev/null @@ -1,31 +0,0 @@ -import requests -import pytest - [email protected](scope="session") -def metrics(): - pass - -def test_common(endpoint="http://localhost:5000/api/unstable/repos"): - response = requests.get(endpoint) - data = response.json() - assert response.status_code == 200 - assert len(data) >= 1 - -def test_get_all_repos(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repos') - -def test_repo_groups(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups') - -def test_repos_in_repo_groups(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups') - -def test_get_repo_for_dosocs(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/dosocs/repos') - -def test_aggregate_summary_by_repo(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups/10/repos/25430/aggregate-summary') - -def test_aggregate_summary_by_group(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups/10/aggregate-summary') - diff --git a/test/metrics/test_experimental_metrics.py b/test/metrics/test_experimental_metrics.py deleted file mode 100644 --- a/test/metrics/test_experimental_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_insight_metrics.py b/test/metrics/test_insight_metrics.py deleted file mode 100644 --- a/test/metrics/test_insight_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_message_metrics.py b/test/metrics/test_message_metrics.py deleted file mode 100644 --- a/test/metrics/test_message_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_platform_metrics.py b/test/metrics/test_platform_metrics.py deleted file mode 100644 --- a/test/metrics/test_platform_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_util_metrics.py b/test/metrics/test_util_metrics.py deleted file mode 100644 --- a/test/metrics/test_util_metrics.py +++ /dev/null @@ -1,14 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - -# def test_get_repos_for_dosocs(metrics): -# assert metrics.get_repos_for_dosocs().isin( -# ['/home/sean/git-repos/25430/github.com/rails/rails-dom-testing']).any().any() - diff --git a/augur/housekeeper/__init__.py b/tests/__init__.py similarity index 100% rename from augur/housekeeper/__init__.py rename to tests/__init__.py diff --git a/tests/test_application.py b/tests/test_application.py new file mode 100644 --- /dev/null +++ b/tests/test_application.py @@ -0,0 +1,20 @@ +import pytest +import augur.application +import sqlalchemy as s +import json + +from augur.application import Application + +def test_init_augur_regular(): + augur_app = Application(disable_logs=True) + assert augur_app is not None + +def test_connect_to_database(monkeypatch): + def mock_fail_connection(self): + raise(s.exc.OperationalError("fake", "error", "message")) + + monkeypatch.setattr(s.engine.Engine, "connect", mock_fail_connection) + monkeypatch.setenv("AUGUR_LOG_QUIET", "1") + + with pytest.raises(s.exc.OperationalError): + augur_app = Application(disable_logs=True) diff --git a/test/metrics/test_commit_metrics.py b/tests/test_metrics/test_commit_metrics.py similarity index 90% rename from test/metrics/test_commit_metrics.py rename to tests/test_metrics/test_commit_metrics.py --- a/test/metrics/test_commit_metrics.py +++ b/tests/test_metrics/test_commit_metrics.py @@ -2,12 +2,6 @@ import pytest [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_annual_commit_count_ranked_by_repo_in_repo_group(metrics): assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10).iloc[0].net > 0 assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10, 25430).iloc[0].net > 0 diff --git a/test/metrics/test_contributor_metrics.py b/tests/test_metrics/test_contributor_metrics.py similarity index 91% rename from test/metrics/test_contributor_metrics.py rename to tests/test_metrics/test_contributor_metrics.py --- a/test/metrics/test_contributor_metrics.py +++ b/tests/test_metrics/test_contributor_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_contributors(metrics): # repo group assert metrics.contributors(20).iloc[0]['total'] > 0 diff --git a/test/metrics/test_issue_metrics.py b/tests/test_metrics/test_issue_metrics.py similarity index 97% rename from test/metrics/test_issue_metrics.py rename to tests/test_metrics/test_issue_metrics.py --- a/test/metrics/test_issue_metrics.py +++ b/tests/test_metrics/test_issue_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_issues_new(metrics): #repo_id assert metrics.issues_new(10, 25430, period='year').iloc[0]['issues'] > 0 diff --git a/test/metrics/test_pull_request_metrics.py b/tests/test_metrics/test_pull_request_metrics.py similarity index 91% rename from test/metrics/test_pull_request_metrics.py rename to tests/test_metrics/test_pull_request_metrics.py --- a/test/metrics/test_pull_request_metrics.py +++ b/tests/test_metrics/test_pull_request_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_pull_requests_merge_contributor_new(metrics): # repo id assert metrics.pull_requests_merge_contributor_new(10, repo_id=25430, period='year').isin( diff --git a/test/metrics/test_repo_meta_metrics.py b/tests/test_metrics/test_repo_meta_metrics.py similarity index 96% rename from test/metrics/test_repo_meta_metrics.py rename to tests/test_metrics/test_repo_meta_metrics.py --- a/test/metrics/test_repo_meta_metrics.py +++ b/tests/test_metrics/test_repo_meta_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_code_changes(metrics): #repo_id assert metrics.code_changes(10, 25430, period='year').isin([pd.Timestamp('2019-01-01T00:00:00+00:00'), 2]).any().any() diff --git a/test/api/runner.py b/tests/test_routes/runner.py similarity index 84% rename from test/api/runner.py rename to tests/test_routes/runner.py --- a/test/api/runner.py +++ b/tests/test_routes/runner.py @@ -9,9 +9,10 @@ start = subprocess.Popen(["augur", "run", "--disable-housekeeper", "--skip-cleanup"], stdout=FNULL, stderr=subprocess.STDOUT) print("Waiting for the server to start...") time.sleep(5) -process = subprocess.run(["pytest", "-ra", "--tb=short", "-x", "test/metrics"]) + +process = subprocess.run(["pytest", "tests/test_routes/"]) time.sleep(2) + subprocess.Popen(["augur", "util", "kill"], stdout=FNULL, stderr=subprocess.STDOUT) print("Server successfully shutdown.") - sys.exit(process.returncode) diff --git a/test/api/test_commit_routes.py b/tests/test_routes/test_commit_routes.py similarity index 97% rename from test/api/test_commit_routes.py rename to tests/test_routes/test_commit_routes.py --- a/test/api/test_commit_routes.py +++ b/tests/test_routes/test_commit_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_annual_commit_count_ranked_by_new_repo_in_repo_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/annual-commit-count-ranked-by-new-repo-in-repo-group/') data = response.json() diff --git a/test/api/test_contributor_routes.py b/tests/test_routes/test_contributor_routes.py similarity index 95% rename from test/api/test_contributor_routes.py rename to tests/test_routes/test_contributor_routes.py --- a/test/api/test_contributor_routes.py +++ b/tests/test_routes/test_contributor_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_contributors_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/contributors') data = response.json() diff --git a/test/api/test_issue_routes.py b/tests/test_routes/test_issue_routes.py similarity index 99% rename from test/api/test_issue_routes.py rename to tests/test_routes/test_issue_routes.py --- a/test/api/test_issue_routes.py +++ b/tests/test_routes/test_issue_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_issues_new_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/issues-new') data = response.json() diff --git a/test/api/test_pull_request_routes.py b/tests/test_routes/test_pull_request_routes.py similarity index 94% rename from test/api/test_pull_request_routes.py rename to tests/test_routes/test_pull_request_routes.py --- a/test/api/test_pull_request_routes.py +++ b/tests/test_routes/test_pull_request_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_pull_requests_merge_contributor_new_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/pull-requests-merge-contributor-new') data = response.json() diff --git a/test/api/test_repo_meta_routes.py b/tests/test_routes/test_repo_meta_routes.py similarity index 98% rename from test/api/test_repo_meta_routes.py rename to tests/test_routes/test_repo_meta_routes.py --- a/test/api/test_repo_meta_routes.py +++ b/tests/test_routes/test_repo_meta_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_code_changes_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/code-changes') data = response.json() @@ -51,7 +47,6 @@ def test_sub_projects_by_repo(metrics): def test_cii_best_practices_badge_by_repo(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/repos/25430/cii-best-practices-badge') - print(response) data = response.json() assert response.status_code == 200 assert len(data) >= 1 diff --git a/tests/test_routes/test_util_routes.py b/tests/test_routes/test_util_routes.py new file mode 100644 --- /dev/null +++ b/tests/test_routes/test_util_routes.py @@ -0,0 +1,20 @@ +import requests +import pytest + +from conftest import create_full_routes + +util_routes = [\ +"repos",\ +"repo-groups",\ +"repo-groups",\ +"dosocs/repos",\ +"repo-groups/<default_repo_group_id>/aggregate-summary",\ +"repo-groups/<default_repo_group_id>/repos/<default_repo_id>/aggregate-summary",\ +] + [email protected]("endpoint", create_full_routes(util_routes)) +def test_base_test(client, endpoint): + response = client.get(endpoint) + data = response.get_json() + assert response.status_code == 200 + assert len(data) >= 1 diff --git a/tests/test_workers/test_repo_info_worker.py b/tests/test_workers/test_repo_info_worker.py new file mode 100644 --- /dev/null +++ b/tests/test_workers/test_repo_info_worker.py @@ -0,0 +1,29 @@ +import pytest +from time import sleep + +from workers.repo_info_worker.repo_info_worker import RepoInfoWorker + [email protected] +def test_task(): + return { + "given": { + "github_url": "https://github.com/chaoss/augur.git" + }, + "models": ["repo_info"], + "job_type": "MAINTAIN", + "display_name": "repo_info model for url: https://github.com/chaoss/augur.git", + "focused_task": 1 + } + [email protected] +def repo_info_worker(): + config = { + "offline_mode": True, + "quiet": True + } + + repo_info_worker = RepoInfoWorker(config=config) + return repo_info_worker + +def test_repo_info_worker(repo_info_worker, test_task): + assert repo_info_worker is not None diff --git a/test/__init__.py b/workers/contributor_worker/__init__.py similarity index 100% rename from test/__init__.py rename to workers/contributor_worker/__init__.py diff --git a/test/test_model.py b/workers/github_worker/__init__.py similarity index 100% rename from test/test_model.py rename to workers/github_worker/__init__.py diff --git a/workers/metric_status_worker/tests/tests_worker.py b/workers/metric_status_worker/tests/tests_worker.py deleted file mode 100644 --- a/workers/metric_status_worker/tests/tests_worker.py +++ /dev/null @@ -1,16 +0,0 @@ -import requests -import pytest - -from metric_status_worker.worker import MetricsStatus - -def test_get_metric_index_in_table_row(): - row = "metric |sTatuS|TestString" - metric_status = MetricsStatus("api.github.com") - result = metric_status.get_metric_index_in_table_row(row) - print(result) - assert result == (0, 3) - -def test_is_has_link(): - metric_status = MetricsStatus("api.github.com") - re_result = metric_status.is_has_link(" [oss](augur" , None) - assert re_result == ('oss', 'augur') diff --git a/workers/tests/test_standard_methods.py b/workers/tests/test_standard_methods.py deleted file mode 100644 --- a/workers/tests/test_standard_methods.py +++ /dev/null @@ -1,28 +0,0 @@ -# Sample Test passing with nose and pytest -import pandas as pd -import pytest -from workers.standard_methods import check_duplicates, dump_queue, read_config -from queue import Queue - - -def test_check_duplicates(): - obj = {"website":["walmart.com"]} - new_data = [obj] - table_values = pd.read_csv("augur/data/companies.csv") - assert check_duplicates(new_data, table_values, "website") == [obj] - -def test_dump_queues(): - sample_queue = Queue() - list_sample = ["[email protected]", "[email protected]", "[email protected]"] - for list_item in list_sample: - sample_queue.put(list_item) - queue_to_list = dump_queue(sample_queue) - assert queue_to_list == ["[email protected]", "[email protected]", "[email protected]"] - -def test_read_config_no_exception(): - db_name = read_config('Database', 'user', 'AUGUR_DB_USER', 'augur',config_file_path="augur.config.json") - assert db_name == "augur" - -def test_read_config_exception(): - with pytest.raises(AttributeError): - db_name = read_config('Server', 'username')
repo_info worker: dev/test branch Please help us help you by filling out the following sections as thoroughly as you can. **Description:** Looks like the new Fork information collection has some kind of mismatch between the method and parameters passed: ``` INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}} INFO:root:Printing traceback... INFO:root:Traceback (most recent call last): File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect model_method(message, repo_id) File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model forked = self.is_forked(owner, repo) File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked data = self.get_repo_data(self, url, r) TypeError: get_repo_data() takes 3 positional arguments but 4 were given INFO:root:This task inserted 0 tuples before failure. INFO:root:Notifying broker and logging task failure in database... INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 - INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'} INFO:root:Updated job process for model: repo_info ``` If the log does not provide enough info, let me know
2020-06-21T14:51:02Z
[]
[]
chaoss/augur
831
chaoss__augur-831
[ "737" ]
cfd4bd3801692de34011044d7a27d79f3d06a234
diff --git a/augur/__init__.py b/augur/__init__.py --- a/augur/__init__.py +++ b/augur/__init__.py @@ -1,10 +1,4 @@ #SPDX-License-Identifier: MIT -# Functions -from .util import logger - -# Classes -from .application import Application - -# Plugins -from .augurplugin import AugurPlugin +import os +ROOT_AUGUR_DIRECTORY = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) diff --git a/augur/application.py b/augur/application.py --- a/augur/application.py +++ b/augur/application.py @@ -4,75 +4,52 @@ """ import os -import time +from pathlib import Path import logging -import multiprocessing as mp +from logging import FileHandler, Formatter +import coloredlogs import json -import pkgutil from beaker.cache import CacheManager from beaker.util import parse_cache_config_options -from sqlalchemy import create_engine -from sqlalchemy.orm import sessionmaker, scoped_session -from augur.models.common import Base -from augur import logger -from augur.metrics import MetricDefinitions -import augur.plugins +import sqlalchemy as s +import psycopg2 -from augur.cli.configure import default_config +from augur import ROOT_AUGUR_DIRECTORY +from augur.metrics import Metrics +from augur.config import AugurConfig +from augur.logging import AugurLogging -logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) -class Application(object): +class Application(): """Initalizes all classes from Augur using a config file or environment variables""" - def __init__(self): + def __init__(self, given_config={}, disable_logs=False, offline_mode=False): """ Reads config, creates DB session, and initializes cache """ - self.config_file_name = 'augur.config.json' - self.__shell_config = None - self.__export_file = None - self.__env_file = None - self.config = default_config - self.env_config = {} - self.root_augur_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - default_config_path = self.root_augur_dir + '/' + self.config_file_name - using_config_file = False - - - config_locations = [self.config_file_name, default_config_path, f"/opt/augur/{self.config_file_name}"] - if os.getenv('AUGUR_CONFIG_FILE') is not None: - config_file_path = os.getenv('AUGUR_CONFIG_FILE') - using_config_file = True - else: - for index, location in enumerate(config_locations): - try: - f = open(location, "r+") - config_file_path = os.path.abspath(location) - using_config_file = True - f.close() - break - except FileNotFoundError: - pass - - if using_config_file: - try: - with open(config_file_path, 'r+') as config_file_handle: - self.config = json.loads(config_file_handle.read()) - except json.decoder.JSONDecodeError as e: - logger.warn('%s could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: %s', config_file_path, str(e)) - else: - logger.warn('%s could not be parsed, using defaults.') - - self.load_env_configuration() - - # List of data sources that can do periodic updates + self.logging = AugurLogging(disable_logs=disable_logs) + self.root_augur_dir = ROOT_AUGUR_DIRECTORY + self.config = AugurConfig(self.root_augur_dir, given_config) + + # we need these for later + self.housekeeper = None + self.manager = None + + self.gunicorn_options = { + 'bind': '%s:%s' % (self.config.get_value("Server", "host"), self.config.get_value("Server", "port")), + 'workers': int(self.config.get_value('Server', 'workers')), + 'timeout': int(self.config.get_value('Server', 'timeout')) + } + self.logging.configure_logging(self.config) + self.gunicorn_options.update(self.logging.gunicorn_logging_options) self.cache_config = { 'cache.type': 'file', 'cache.data_dir': 'runtime/cache/', 'cache.lock_dir': 'runtime/cache/' } + if not os.path.exists(self.cache_config['cache.data_dir']): os.makedirs(self.cache_config['cache.data_dir']) if not os.path.exists(self.cache_config['cache.lock_dir']): @@ -80,59 +57,56 @@ def __init__(self): cache_parsed = parse_cache_config_options(self.cache_config) self.cache = CacheManager(**cache_parsed) - self.metrics = MetricDefinitions(self) + if offline_mode is False: + logger.debug("Running in online mode") + self.database, self.operations_database, self.spdx_database = self._connect_to_database() - def read_config(self, section, name=None): - """ - Read a variable in specified section of the config file, unless provided an environment variable + self.metrics = Metrics(self) - :param section: location of given variable - :param name: name of variable - """ - if section is not None: - value = self.config[section] - if name is not None: - try: - value = self.config[section][name] - except KeyError as e: - pass - else: - value = None - - if os.getenv('AUGUR_DEBUG_LOG_ENV', '0') == '1': - logger.debug('{}:{} = {}'.format(section, name, value)) - - return value - - def load_env_configuration(self): - self.set_env_value(section='Database', name='key', environment_variable='AUGUR_GITHUB_API_KEY') - self.set_env_value(section='Database', name='host', environment_variable='AUGUR_DB_HOST') - self.set_env_value(section='Database', name='name', environment_variable='AUGUR_DB_NAME') - self.set_env_value(section='Database', name='port', environment_variable='AUGUR_DB_PORT') - self.set_env_value(section='Database', name='user', environment_variable='AUGUR_DB_USER') - self.set_env_value(section='Database', name='password', environment_variable='AUGUR_DB_PASSWORD') - - def set_env_value(self, section, name, environment_variable, sub_config=None): - """ - Sets names and values of specified config section according to their environment variables. - """ - # using sub_config lets us grab values from nested config blocks - if sub_config is None: - sub_config = self.config + def _connect_to_database(self): + logger.debug("Testing database connections") + user = self.config.get_value('Database', 'user') + host = self.config.get_value('Database', 'host') + port = self.config.get_value('Database', 'port') + dbname = self.config.get_value('Database', 'name') + + database_connection_string = 'postgresql://{}:{}@{}:{}/{}'.format( + user, self.config.get_value('Database', 'password'), host, port, dbname + ) + + csearch_path_options = 'augur_data' + + engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, + connect_args={'options': f'-csearch_path={csearch_path_options}'}, pool_pre_ping=True) + + csearch_path_options += ',spdx' + spdx_engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, + connect_args={'options': f'-csearch_path={csearch_path_options}'}, pool_pre_ping=True) + + helper_engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, + connect_args={'options': f'-csearch_path=augur_operations'}, pool_pre_ping=True) - if os.getenv(environment_variable) is not None: - if section is not None and name is not None: - sub_config[section][name] = os.getenv(environment_variable) try: - self.env_config[environment_variable] = os.getenv(environment_variable, sub_config[section][name]) - except KeyError as e: - print(environment_variable + " has no default value. Skipping...") - - @property - def shell(self, banner1='-- Augur Shell --', **kwargs): - from IPython.terminal.embed import InteractiveShellEmbed - if not self.__shell_config: - from augur.util import init_shell_config - self.__shell_config = init_shell_config() - return InteractiveShellEmbed(config=self.__shell_config, banner1=banner1, **kwargs) + engine.connect().close() + helper_engine.connect().close() + spdx_engine.connect().close() + return engine, helper_engine, spdx_engine + except s.exc.OperationalError as e: + logger.error("Unable to connect to the database. Terminating...") + raise(e) + + def shutdown(self): + if self.logging.stop_event is not None: + logger.debug("Stopping housekeeper logging listener...") + self.logging.stop_event.set() + + if self.housekeeper is not None: + logger.debug("Shutting down housekeeper updates...") + self.housekeeper.shutdown_updates() + self.housekeeper = None + + if self.manager is not None: + logger.debug("Shutting down manager...") + self.manager.shutdown() + self.manager = None diff --git a/augur/augurplugin.py b/augur/augurplugin.py deleted file mode 100644 --- a/augur/augurplugin.py +++ /dev/null @@ -1,25 +0,0 @@ -#SPDX-License-Identifier: MIT -""" -Provides a class that can be used to extend Augur -""" -class AugurPlugin(object): - """ - Defines a base class for Augur plugins to implement - """ - def __init__(self, augur_app): - self._augur = augur_app - - def create_routes(self, server): - """ - Creates a route for the given plugin and assigns it to the server - - :param server: desired server to create plugin route - """ - pass - - @staticmethod - def update(shared): - """ - Should implement a function that gathers data - """ - pass \ No newline at end of file diff --git a/augur/cli/__init__.py b/augur/cli/__init__.py --- a/augur/cli/__init__.py +++ b/augur/cli/__init__.py @@ -0,0 +1,34 @@ +import click +from functools import update_wrapper + +from augur.application import Application +from augur.config import AugurConfig +from augur.logging import AugurLogging, ROOT_AUGUR_DIRECTORY + +def pass_application(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + ctx.obj = Application() + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def pass_config(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + ctx.obj = Application(offline_mode=True).config + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def pass_logs_dir(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + config = AugurConfig(ROOT_AUGUR_DIRECTORY) + ctx.obj = AugurLogging.get_log_directories(config, reset_logfiles=False) + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def initialize_logging(f): + def new_func(*args, **kwargs): + AugurLogging(reset_logfiles=False) + return f(*args, **kwargs) + return update_wrapper(new_func, f) \ No newline at end of file diff --git a/augur/runtime.py b/augur/cli/_multicommand.py similarity index 63% rename from augur/runtime.py rename to augur/cli/_multicommand.py --- a/augur/runtime.py +++ b/augur/cli/_multicommand.py @@ -6,16 +6,14 @@ import os import sys import click +import importlib import augur.application CONTEXT_SETTINGS = dict(auto_envvar_prefix='AUGUR') class AugurMultiCommand(click.MultiCommand): - def __commands_folder(self): - return os.path.abspath( - os.path.join(os.path.dirname(__file__), 'cli') - ) + return os.path.abspath(os.path.dirname(__file__)) def list_commands(self, ctx): rv = [] @@ -26,13 +24,8 @@ def list_commands(self, ctx): return rv def get_command(self, ctx, name): - # try: - if sys.version_info[0] == 2: - name = name.encode('ascii', 'replace') - mod = __import__('augur.cli.' + name, - None, None, ['cli']) - - return mod.cli + module = importlib.import_module('.' + name, 'augur.cli') + return module.cli @click.command(cls=AugurMultiCommand, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -40,11 +33,4 @@ def run(ctx): """ Augur is an application for open source community health analytics """ - - app = augur.application.Application() - ctx.obj = app - return ctx.obj - - -if __name__ == '__main__': - run() + return ctx diff --git a/augur/cli/configure.py b/augur/cli/configure.py --- a/augur/cli/configure.py +++ b/augur/cli/configure.py @@ -6,186 +6,15 @@ import os import click import json +import logging +from augur.config import default_config, ENVVAR_PREFIX +from augur.cli import initialize_logging +from augur.logging import ROOT_AUGUR_DIRECTORY + +logger = logging.getLogger(__name__) ENVVAR_PREFIX = "AUGUR_" -default_config = { - "Database": { - "name": "augur", - "host": "localhost", - "key": "key", - "password": "augur", - "port": 5432, - "schema": "augur_data", - "user": "augur" - }, - "Facade": { - "check_updates": 1, - "clone_repos": 1, - "create_xlsx_summary_files": 1, - "delete_marked_repos": 0, - "fix_affiliations": 1, - "force_analysis": 1, - "force_invalidate_caches": 1, - "force_updates": 1, - "limited_run": 0, - "multithreaded": 0, - "nuke_stored_affiliations": 0, - "pull_repos": 1, - "rebuild_caches": 1, - "run_analysis": 1 - }, - "Housekeeper": { - "jobs": [ - { - "all_focused": 1, - "delay": 150000, - "given": [ - "github_url" - ], - "model": "issues", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "github_url" - ], - "model": "pull_request_commits", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "github_url" - ], - "model": "repo_info", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "repo_group" - ], - "model": "commits", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "github_url" - ], - "model": "pull_requests", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "contributors", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "insights", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "badges", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "value", - "repo_group_id": 0 - }, - { - "delay": 100000, - "given": [ - "github_url" - ], - "model": "pull_request_files", - "repo_group_id": 0 - } - ] - }, - "Server": { - "cache_expire": "3600", - "host": "0.0.0.0", - "port": "5000", - "workers": 4, - "timeout": 60 - }, - "Frontend": { - "host": "0.0.0.0", - "port": "5000" - }, - "Workers": { - "facade_worker": { - "port": 50100, - "repo_directory": "repos/", - "switch": 1, - "workers": 1 - }, - "github_worker": { - "port": 50200, - "switch": 1, - "workers": 1 - }, - "insight_worker": { - "port": 50300, - "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"}, - "contamination": 0.041, - "switch": 0, - "workers": 1, - "training_days": 365, - "anomaly_days": 2 - }, - "linux_badge_worker": { - "port": 50400, - "switch": 1, - "workers": 1 - }, - "metric_status_worker": { - "port": 50500, - "switch": 0, - "workers": 1 - }, - "pull_request_worker": { - "port": 50600, - "switch": 1, - "workers": 1 - }, - "repo_info_worker": { - "port": 50700, - "switch": 1, - "workers": 1 - }, - "value_worker": { - "port": 50800, - "scc_bin": "scc", - "switch": 0, - "workers": 1 - }, - "contributor_worker": { - "port": 50900, - "switch": 1, - "workers": 1 - } - } - } @click.group('configure', short_help='Generate an augur.config.json') def cli(): @@ -200,7 +29,9 @@ def cli(): @click.option('--github_api_key', help="GitHub API key for data collection from the GitHub API", envvar=ENVVAR_PREFIX + 'GITHUB_API_KEY') @click.option('--facade_repo_directory', help="Directory on the database server where Facade should clone repos", envvar=ENVVAR_PREFIX + 'FACADE_REPO_DIRECTORY') @click.option('--rc-config-file', help="File containing existing config whose values will be used as the defaults", type=click.Path(exists=True)) -def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file): [email protected]('--gitlab_api_key', help="GitLab API key for data collection from the GitLab API", envvar=ENVVAR_PREFIX + 'GITLAB_API_KEY') +@initialize_logging +def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file, gitlab_api_key): """ Generate an augur.config.json """ @@ -227,10 +58,10 @@ def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, fa for index, key in enumerate(list(item[1].keys())): config[item[0]][key] = list(item[1].values())[index] - print('Predefined config successfully loaded') + logger.info('Predefined config successfully loaded') except Exception as e: - print(f"Error opening {rc_config_file}: {str(e)}") + logger.error(f"Error opening {rc_config_file}: {str(e)}") if db_name is not None: config['Database']['database'] = db_name # this is for backwards compatibility @@ -246,12 +77,14 @@ def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, fa config['Database']['password'] = db_password if github_api_key is not None: config['Database']['key'] = github_api_key + if gitlab_api_key is not None: + config['Database']['gitlab_api_key'] = gitlab_api_key if facade_repo_directory is not None: config['Workers']['facade_worker']['repo_directory'] = facade_repo_directory try: - with open(os.path.abspath('augur.config.json'), 'w') as f: + with open(os.path.abspath(ROOT_AUGUR_DIRECTORY + '/augur.config.json'), 'w') as f: json.dump(config, f, indent=4) - print('augur.config.json successfully created') + logger.info('augur.config.json successfully created') except Exception as e: - print("Error writing augur.config.json " + str(e)) + logger.error("Error writing augur.config.json " + str(e)) diff --git a/augur/cli/db.py b/augur/cli/db.py --- a/augur/cli/db.py +++ b/augur/cli/db.py @@ -1,5 +1,6 @@ from os import walk, chdir, environ, chmod, path import os +import logging from sys import exit import stat from collections import OrderedDict @@ -12,23 +13,23 @@ import pandas as pd from sqlalchemy import exc +from augur.cli import pass_config, pass_application + +logger = logging.getLogger(__name__) + @click.group('db', short_help='Database utilities') def cli(): pass @cli.command('add-repos') @click.argument('filename', type=click.Path(exists=True)) [email protected]_context -def add_repos(ctx, filename): +@pass_application +def add_repos(augur_app, filename): """ Add repositories to Augur's database """ - app = ctx.obj - - db = get_db_connection(app) - - df = pd.read_sql(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups"), db) - repo_group_IDs = df['repo_group_id'].values.tolist() + df = augur_app.database.execute(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups")) + repo_group_IDs = [group[0] for group in df.fetchall()] insertSQL = s.sql.text(""" INSERT INTO augur_data.repo(repo_group_id, repo_git, repo_status, @@ -39,117 +40,85 @@ def add_repos(ctx, filename): with open(filename) as upload_repos_file: data = csv.reader(upload_repos_file, delimiter=',') for row in data: - print(f"Trying repo with Git URL `{row[1]}` to repo group {row[0]}...\n") - try: - if int(row[0]) in repo_group_IDs: - pd.read_sql(insertSQL, db, params={'repo_group_id': int(row[0]), 'repo_git': row[1]}) - else: - print(f"Invalid repo group id specified for {row[1]}, skipping.") - except exc.ResourceClosedError as error: - print(f"Successfully inserted {row[1]}.") - # pd.read_sql() will throw an AttributeError when it can't sucessfully "fetch" any rows from the result. - # Since there's no rows to fetch after a successful insert, this is how we know it worked. - # I know it's weird + logger.info(f"Inserting repo with Git URL `{row[1]}` into repo group {row[0]}") + if int(row[0]) in repo_group_IDs: + result = augur_app.database.execute(insertSQL, repo_group_id=int(row[0]), repo_git=row[1]) + else: + logger.warning(f"Invalid repo group id specified for {row[1]}, skipping.") @cli.command('get-repo-groups') [email protected]_context -def get_repo_groups(ctx): +@pass_application +def get_repo_groups(augur_app): """ List all repo groups and their associated IDs """ - app = ctx.obj - - db = get_db_connection(app) - - df = pd.read_sql(s.sql.text("SELECT repo_group_id, rg_name, rg_description FROM augur_data.repo_groups"), db) + df = pd.read_sql(s.sql.text("SELECT repo_group_id, rg_name, rg_description FROM augur_data.repo_groups"), augur_app.database) print(df) return df @cli.command('add-repo-groups') @click.argument('filename', type=click.Path(exists=True)) [email protected]_context -def add_repo_groups(ctx, filename): +@pass_application +def add_repo_groups(augur_app, filename): """ Create new repo groups in Augur's database """ - app = ctx.obj - - db = get_db_connection(app) - - df = pd.read_sql(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups"), db) + df = pd.read_sql(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups"), augur_app.database) repo_group_IDs = df['repo_group_id'].values.tolist() - insertSQL = s.sql.text(""" + insert_repo_group_sql = s.sql.text(""" INSERT INTO "augur_data"."repo_groups"("repo_group_id", "rg_name", "rg_description", "rg_website", "rg_recache", "rg_last_modified", "rg_type", "tool_source", "tool_version", "data_source", "data_collection_date") VALUES (:repo_group_id, :repo_group_name, '', '', 0, CURRENT_TIMESTAMP, 'Unknown', 'Loaded by user', '1.0', 'Git', CURRENT_TIMESTAMP); """) with open(filename) as create_repo_groups_file: data = csv.reader(create_repo_groups_file, delimiter=',') for row in data: - print(f"Trying repo group with name {row[1]} and ID {row[0]}...") - try: - if int(row[0]) not in repo_group_IDs: - repo_group_IDs.append(int(row[0])) - pd.read_sql(insertSQL, db, params={'repo_group_id': int(row[0]), 'repo_group_name': row[1]}) - else: - print(f"Repo group with ID {row[1]} for repo group {row[1]} already exists, skipping...") - except exc.ResourceClosedError as error: - print(f"Successfully inserted {row[1]}.\n") - # pd.read_sql() will throw an AttributeError when it can't sucessfully "fetch" any rows from the result. - # Since there's no rows to fetch after a successful insert, this is how we know it worked. - # I know it's weird, sue me (jk please don't) + logger.info(f"Inserting repo group with name {row[1]} and ID {row[0]}...") + if int(row[0]) not in repo_group_IDs: + repo_group_IDs.append(int(row[0])) + augur_app.database.execute(insert_repo_group_sql, repo_group_id=int(row[0]), repo_group_name=row[1]) + else: + logger.info(f"Repo group with ID {row[1]} for repo group {row[1]} already exists, skipping...") @cli.command('update-repo-directory') @click.argument('repo_directory') [email protected]_context -def update_repo_directory(ctx, repo_directory): +@pass_application +def update_repo_directory(augur_app, repo_directory): """ Update Facade worker repo cloning directory """ - app = ctx.obj - - db = get_db_connection(app) - updateRepoDirectorySQL = s.sql.text(""" UPDATE augur_data.settings SET VALUE = :repo_directory WHERE setting='repo_directory'; """) - try: - pd.read_sql(updateRepoDirectorySQL, db, params={'repo_directory': repo_directory}) - except exc.ResourceClosedError as error: - print(f"Successfully updated the Facade worker repo directory.") - # pd.read_sql() will throw an AttributeError when it can't sucessfully "fetch" any rows from the result. - # Since there's no rows to fetch after a successful insert, this is how we know it worked. - # I know it's weird, sue me (jk please don't) + augur_app.database.execute(updateRepoDirectorySQL, repo_directory=repo_directory) + logger.info(f"Updated Facade repo directory to: {repo_directory}") # get_db_version is a helper function to print_db_version and upgrade_db_version -def get_db_version(app): - db = get_db_connection(app) - +def get_db_version(augur_app): db_version_sql = s.sql.text(""" SELECT * FROM augur_operations.augur_settings WHERE setting = 'augur_data_version' """) - return int(db.execute(db_version_sql).fetchone()[2]) + return int(augur_app.database.execute(db_version_sql).fetchone()[2]) @cli.command('print-db-version') [email protected]_context -def print_db_version(ctx): +@pass_application +def print_db_version(augur_app): """ Get the version of the configured database """ - print(get_db_version(ctx.obj)) + print(get_db_version(augur_app)) @cli.command('upgrade-db-version') [email protected]_context -def upgrade_db_version(ctx): +@pass_application +def upgrade_db_version(augur_app): """ Upgrade the configured database to the latest version """ - app = ctx.obj - check_pgpass_credentials(app.config) - current_db_version = get_db_version(app) + check_pgpass_credentials(augur_app.config.get_raw_config()) + current_db_version = get_db_version(augur_app) update_scripts_filenames = [] for (_, _, filenames) in walk('schema/generate'): @@ -166,25 +135,24 @@ def upgrade_db_version(ctx): most_recent_version = list(target_version_script_map.keys())[-1] if current_db_version == most_recent_version: - print("Your database is already up to date. ") + logger.info("Your database is already up to date. ") elif current_db_version > most_recent_version: - print(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") + logger.error(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") for target_version, script_location in target_version_script_map.items(): if target_version == current_db_version + 1: - print("Upgrading from", current_db_version, "to", target_version) - run_psql_command_in_database(app, '-f', f"schema/generate/{script_location}") + logger.info(f"Upgrading from {current_db_version} to {target_version}") + run_psql_command_in_database(augur_app, '-f', f"schema/generate/{script_location}") current_db_version += 1 @cli.command('check-for-upgrade') [email protected]_context -def check_for_upgrade(ctx): +@pass_application +def check_for_upgrade(augur_app): """ Upgrade the configured database to the latest version """ - app = ctx.obj - check_pgpass_credentials(app.config) - current_db_version = get_db_version(app) + check_pgpass_credentials(augur_app.config.get_raw_config()) + current_db_version = get_db_version(augur_app) update_scripts_filenames = [] for (_, _, filenames) in walk('schema/generate'): @@ -201,23 +169,21 @@ def check_for_upgrade(ctx): most_recent_version = list(target_version_script_map.keys())[-1] if current_db_version == most_recent_version: - print("Database is already up to date.") + logger.info("Database is already up to date.") elif current_db_version < most_recent_version: - print(f"Current database version: v{current_db_version}\nPlease upgrade to the most recent version (v{most_recent_version}) with augur db upgrade-db-version.") + logger.info(f"Current database version: v{current_db_version}\nPlease upgrade to the most recent version (v{most_recent_version}) with augur db upgrade-db-version.") elif current_db_version > most_recent_version: - print(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") + logger.error(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") @cli.command('create-schema') [email protected]_context -def create_schema(ctx): +@pass_application +def create_schema(augur_app): """ Create schema in the configured database """ - app = ctx.obj - check_pgpass_credentials(app.config) - run_psql_command_in_database(app, '-f', 'schema/create_schema.sql') - + check_pgpass_credentials(augur_app.config.get_raw_config()) + run_psql_command_in_database(augur_app, '-f', 'schema/create_schema.sql') def generate_key(length): return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length)) @@ -228,75 +194,40 @@ def generate_api_key(ctx): """ Generate and set a new Augur API key """ - app = ctx.obj key = generate_key(32) ctx.invoke(update_api_key, api_key=key) print(key) @cli.command('update-api-key') @click.argument("api_key") [email protected]_context -def update_api_key(ctx, api_key): +@pass_application +def update_api_key(augur_app, api_key): """ Update the API key in the database to the given key """ - app = ctx.obj - - # we need to connect to augur_operations and not augur_data, so don't use - # get_db_connection - user = app.read_config('Database', 'user') - password = app.read_config('Database', 'password') - host = app.read_config('Database', 'host') - port = app.read_config('Database', 'port') - dbname = app.read_config('Database', 'name') - - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - user, password, host, port, dbname - ) - - db = s.create_engine(DB_STR, poolclass=s.pool.NullPool) - update_api_key_sql = s.sql.text(""" UPDATE augur_operations.augur_settings SET VALUE = :api_key WHERE setting='augur_api_key'; """) - db.execute(update_api_key_sql, api_key=api_key) + augur_app.database.execute(update_api_key_sql, api_key=api_key) + logger.info(f"Updated Augur API key to: {api_key}") @cli.command('get-api-key') [email protected]_context -def get_api_key(ctx): - app = ctx.obj - - # we need to connect to augur_operations and not augur_data, so don't use - # get_db_connection - user = app.read_config('Database', 'user') - password = app.read_config('Database', 'password') - host = app.read_config('Database', 'host') - port = app.read_config('Database', 'port') - dbname = app.read_config('Database', 'name') - - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - user, password, host, port, dbname - ) - - db = s.create_engine(DB_STR, poolclass=s.pool.NullPool) - +@pass_application +def get_api_key(augur_app): get_api_key_sql = s.sql.text(""" SELECT value FROM augur_operations.augur_settings WHERE setting='augur_api_key'; """) try: - print(db.execute(get_api_key_sql).fetchone()[0]) + print(augur_app.database.execute(get_api_key_sql).fetchone()[0]) except TypeError: - print("No Augur API key found.") - - + logger.error("No Augur API key found.") @cli.command('check-pgpass', short_help="Check the ~/.pgpass file for Augur's database credentials") [email protected]_context -def check_pgpass(ctx): - app = ctx.obj - check_pgpass_credentials(app.config) +@pass_config +def check_pgpass(config): + check_pgpass_credentials(config.get_raw_config()) @cli.command('init-database') @click.option('--default-db-name', default='postgres') @@ -307,12 +238,10 @@ def check_pgpass(ctx): @click.option('--target-password', default='augur') @click.option('--host', default='localhost') @click.option('--port', default='5432') [email protected]_context -def init_database(ctx, default_db_name, default_user, default_password, target_db_name, target_user, target_password, host, port): +def init_database(default_db_name, default_user, default_password, target_db_name, target_user, target_password, host, port): """ Create database with the given credentials using the given maintenance database """ - app = ctx.obj config = { 'Database': { 'name': default_db_name, @@ -331,15 +260,15 @@ def init_database(ctx, default_db_name, default_user, default_password, target_d def run_db_creation_psql_command(host, port, user, name, command): call(['psql', '-h', host, '-p', port, '-U', user, '-d', name, '-a', '-w', '-c', command]) -def run_psql_command_in_database(app, target_type, target): +def run_psql_command_in_database(augur_app, target_type, target): if target_type not in ['-f', '-c']: - print("Invalid target type. Exiting...") + logger.error("Invalid target type. Exiting...") exit(1) - call(['psql', '-h', app.read_config('Database', 'host'),\ - '-d', app.read_config('Database', 'name'),\ - '-U', app.read_config('Database', 'user'),\ - '-p', str(app.read_config('Database', 'port')),\ + call(['psql', '-h', augur_app.config.get_value('Database', 'host'),\ + '-d', augur_app.config.get_value('Database', 'name'),\ + '-U', augur_app.config.get_value('Database', 'user'),\ + '-p', str(augur_app.config.get_value('Database', 'port')),\ '-a', '-w', target_type, target ]) @@ -347,42 +276,30 @@ def check_pgpass_credentials(config): pgpass_file_path = environ['HOME'] + '/.pgpass' if not path.isfile(pgpass_file_path): - print("~/.pgpass does not exist, creating.") + logger.info("~/.pgpass does not exist, creating.") open(pgpass_file_path, 'w+') chmod(pgpass_file_path, stat.S_IWRITE | stat.S_IREAD) pgpass_file_mask = oct(os.stat(pgpass_file_path).st_mode & 0o777) if pgpass_file_mask != '0o600': - print("Updating ~/.pgpass file permissions.") + logger.info("Updating ~/.pgpass file permissions.") chmod(pgpass_file_path, stat.S_IWRITE | stat.S_IREAD) with open(pgpass_file_path, 'a+') as pgpass_file: end = pgpass_file.tell() + pgpass_file.seek(0) + credentials_string = str(config['Database']['host']) \ + ':' + str(config['Database']['port']) \ + ':' + str(config['Database']['name']) \ + ':' + str(config['Database']['user']) \ + ':' + str(config['Database']['password']) - pgpass_file.seek(0) + if credentials_string.lower() not in [''.join(line.split()).lower() for line in pgpass_file.readlines()]: - print("Database credentials not found in $HOME/.pgpass. Adding credentials...") + logger.info("Adding credentials to $HOME/.pgpass") pgpass_file.seek(end) pgpass_file.write(credentials_string + '\n') - -def get_db_connection(app): - - user = app.read_config('Database', 'user') - password = app.read_config('Database', 'password') - host = app.read_config('Database', 'host') - port = app.read_config('Database', 'port') - dbname = app.read_config('Database', 'name') - schema = app.read_config('Database', 'schema') - - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - user, password, host, port, dbname - ) - - return s.create_engine(DB_STR, poolclass=s.pool.NullPool) - + else: + logger.info("Credentials found in $HOME/.pgpass") diff --git a/augur/cli/logging.py b/augur/cli/logging.py new file mode 100644 --- /dev/null +++ b/augur/cli/logging.py @@ -0,0 +1,140 @@ +import click +import os +from os import walk + +from augur.cli import pass_logs_dir + [email protected]("logging", short_help="View Augur's log files") +def cli(): + pass + [email protected]("directory") +@pass_logs_dir +def directory(logs_dir): + """ + Print the location of Augur's logs directory + """ + print(logs_dir) + [email protected]("errors") [email protected]("worker", default="all") +@pass_logs_dir +def errors(logs_dir, worker): + """ + Output error messages of the main Augur and all worker logfiles or a specific worker logfile + """ + root_log_dir = logs_dir + worker_log_dir = logs_dir + "/workers/" + if worker is None: + worker = "all" + + if worker == "all": + files = [] + directories = [] + for (_, _, filenames) in walk(root_log_dir): + for file in filenames: + if file.endswith(".err"): + print_log(file, root_log_dir) + break + + files = [] + directories = [] + for (dirpath, dirnames, filenames) in walk(worker_log_dir): + directories.extend(dirnames) + break + + for directory in directories: + specific_worker_log_dir = worker_log_dir + directory + for (_, _, filenames) in walk(specific_worker_log_dir): + files.extend(filenames) + for file in [file for file in filenames if "collection" in file and file.endswith(".err")]: + print_log(file, specific_worker_log_dir) + break + else: + files = [] + specific_worker_log_dir = worker_log_dir + "/" + worker + "/" + for (_, _, filenames) in walk(specific_worker_log_dir): + files.extend(filenames) + for file in [file for file in filenames if "collection" in file and file.endswith(".err")]: + print_log(file, specific_worker_log_dir) + break + +def print_log(file, log_dir): + f = open(log_dir + "/" + file) + result = f.readlines() + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + [email protected]("tail") [email protected]("lines", default=20) +@pass_logs_dir +def tail(logs_dir, lines): + """ + Output the last n lines of the main Augur and worker logfiles + """ + root_log_dir = logs_dir + worker_log_dir = logs_dir + "/workers/" + if lines is None: + lines = 20 + + files = [] + directories = [] + for (_, _, filenames) in walk(root_log_dir): + for file in filenames: + result = _tail(open(root_log_dir + "/" + file), lines) + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + break + + files = [] + directories = [] + for (dirpath, dirnames, filenames) in walk(worker_log_dir): + directories.extend(dirnames) + break + + for directory in directories: + specific_worker_log_dir = worker_log_dir + directory + for (_, _, filenames) in walk(specific_worker_log_dir): + files.extend(filenames) + + for file in [file for file in filenames if "collection" in file]: + result = _tail(open(specific_worker_log_dir + "/" + file), lines) + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + break + +def _tail(f, lines=20, _buffer=4098): + lines_found = [] + + # block counter will be multiplied by buffer + # to get the block size from the end + block_counter = -1 + + # loop until we find X lines + while len(lines_found) < lines: + try: + f.seek(block_counter * _buffer, os.SEEK_END) + except IOError: # either file is too small, or too many lines requested + f.seek(0) + lines_found = f.readlines() + break + + lines_found = f.readlines() + + # we found enough lines, get out + # Removed this line because it was redundant the while will catch + # it, I left it for history + # if len(lines_found) > lines: + # break + + # decrement the block counter to get the + # next X bytes + block_counter -= 1 + + return lines_found[-lines:] \ No newline at end of file diff --git a/augur/cli/run.py b/augur/cli/run.py --- a/augur/cli/run.py +++ b/augur/cli/run.py @@ -4,187 +4,143 @@ """ from copy import deepcopy -import os, time, atexit, subprocess, click +import os, time, atexit, subprocess, click, atexit, logging, sys import multiprocessing as mp import gunicorn.app.base -from gunicorn.six import iteritems from gunicorn.arbiter import Arbiter -from augur.housekeeper.housekeeper import Housekeeper -from augur.util import logger +from augur.housekeeper import Housekeeper from augur.server import Server - from augur.cli.util import kill_processes -import time +from augur.application import Application + +logger = logging.getLogger("augur") @click.command("run") @click.option("--disable-housekeeper", is_flag=True, default=False, help="Turns off the housekeeper") @click.option("--skip-cleanup", is_flag=True, default=False, help="Disables the old process cleanup that runs before Augur starts") [email protected]_context -def cli(ctx, disable_housekeeper, skip_cleanup): +def cli(disable_housekeeper, skip_cleanup): """ Start Augur's backend server """ + augur_app = Application() + logger.info("Augur application initialized") if not skip_cleanup: - logger.info("Cleaning up old Augur processes. Just a moment please...") - ctx.invoke(kill_processes) + logger.debug("Cleaning up old Augur processes...") + kill_processes() time.sleep(2) else: - logger.info("Skipping cleanup processes.") - - def get_process_id(name): - """Return process ids found by name or command - """ - child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False) - response = child.communicate()[0] - return [int(pid) for pid in response.split()] + logger.debug("Skipping process cleanup") - app = ctx.obj + master = initialize_components(augur_app, disable_housekeeper) + logger.info('Starting Gunicorn server in the background...') + if not disable_housekeeper: + logger.info('Housekeeper update process logs will now take over.') + else: + logger.info("Gunicorn server logs will be written to gunicorn.log") + logger.info("Augur is still running...don't close this process!") + Arbiter(master).run() - mp.set_start_method('forkserver', force=True) +def initialize_components(augur_app, disable_housekeeper): master = None - manager = None broker = None housekeeper = None - - logger.info("Booting broker and its manager...") - manager = mp.Manager() - broker = manager.dict() - - controller = app.read_config('Workers') - worker_pids = [] worker_processes = [] + mp.set_start_method('forkserver', force=True) if not disable_housekeeper: - if not controller: - return + logger.info("Booting manager") + manager = mp.Manager() + + logger.info("Booting broker") + broker = manager.dict() + + housekeeper = Housekeeper(broker=broker, augur_app=augur_app) + + controller = augur_app.config.get_section('Workers') + for worker in controller.keys(): - if not controller[worker]['switch']: - continue - logger.info("Your config has the option set to automatically boot {} instances of the {}".format(controller[worker]['workers'], worker)) - pids = get_process_id("/bin/sh -c cd workers/{} && {}_start".format(worker, worker)) - worker_pids += pids - if len(pids) > 0: - worker_pids.append(pids[0] + 1) - pids.append(pids[0] + 1) - logger.info("Found and preparing to kill previous {} worker pids: {}".format(worker,pids)) - for pid in pids: - try: - os.kill(pid, 9) - except: - logger.info("Worker process {} already killed".format(pid)) - - @atexit.register - def exit(): - try: - for pid in worker_pids: - os.kill(pid, 9) - except: - logger.info("Worker process {} already killed".format(pid)) - for process in worker_processes: - logger.info("Shutting down worker process with pid: {} ...".format(process.pid)) - process.terminate() + if controller[worker]['switch']: + for i in range(controller[worker]['workers']): + logger.info("Booting {} #{}".format(worker, i + 1)) + worker_process = mp.Process(target=worker_start, name=f"{worker}_{i}", kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True) + worker_processes.append(worker_process) + worker_process.start() - if master is not None: - master.halt() - logger.info("Shutting down housekeeper updates...") - if housekeeper is not None: - housekeeper.shutdown_updates() - - # if hasattr(manager, "shutdown"): - # wait for the spawner and the worker threads to go down - # - if manager is not None: - manager.shutdown() - # check if it is still alive and kill it if necessary - # if manager._process.is_alive(): - manager._process.terminate() - - # Prevent multiprocessing's atexit from conflicting with gunicorn - logger.info("Killing main augur process with PID: {}".format(os.getpid())) - os.kill(os.getpid(), 9) - os._exit(0) + augur_app.manager = manager + augur_app.broker = broker + augur_app.housekeeper = housekeeper - if not disable_housekeeper: - logger.info("Booting housekeeper...") - jobs = deepcopy(app.read_config('Housekeeper', 'jobs')) - try: - housekeeper = Housekeeper( - jobs, - broker, - broker_host=app.read_config('Server', 'host'), - broker_port=app.read_config('Server', 'port'), - user=app.read_config('Database', 'user'), - password=app.read_config('Database', 'password'), - host=app.read_config('Database', 'host'), - port=app.read_config('Database', 'port'), - dbname=app.read_config('Database', 'name') - ) - except KeyboardInterrupt as e: - exit() - - logger.info("Housekeeper has finished booting.") - - if controller: - for worker in controller.keys(): - if controller[worker]['switch']: - for i in range(controller[worker]['workers']): - logger.info("Booting {} #{}".format(worker, i + 1)) - worker_process = mp.Process(target=worker_start, kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True) - worker_process.start() - worker_processes.append(worker_process) - - host = app.read_config('Server', 'host') - port = app.read_config('Server', 'port') - workers = int(app.read_config('Server', 'workers')) - timeout = int(app.read_config('Server', 'timeout')) - options = { - 'bind': '%s:%s' % (host, port), - 'workers': workers, - 'accesslog': '-', - 'access_log_format': '%(h)s - %(t)s - %(r)s', - 'timeout': timeout - } - logger.info('Starting server...') - master = Arbiter(AugurGunicornApp(options, manager=manager, broker=broker, housekeeper=housekeeper)).run() + atexit._clear() + atexit.register(exit, augur_app, worker_processes, master) + return AugurGunicornApp(augur_app.gunicorn_options, augur_app=augur_app) def worker_start(worker_name=None, instance_number=0, worker_port=None): - time.sleep(120 * instance_number) - destination = subprocess.DEVNULL try: - destination = open("workers/{}/worker_{}.log".format(worker_name, worker_port), "a+") - except IOError as e: - logger.error("Error opening log file for auto-started worker {}: {}".format(worker_name, e)) - process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT) - logger.info("{} booted.".format(worker_name)) + time.sleep(30 * instance_number) + destination = subprocess.DEVNULL + process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT) + logger.info("{} #{} booted.".format(worker_name,instance_number+1)) + except KeyboardInterrupt as e: + pass + +def exit(augur_app, worker_processes, master): + + logger.info("Shutdown started for this Gunicorn worker...") + augur_app.shutdown() + + if worker_processes: + for process in worker_processes: + logger.debug("Shutting down worker process with pid: {}...".format(process.pid)) + process.terminate() + + if master is not None: + logger.debug("Shutting down Gunicorn server") + master.halt() + master = None + + logger.info("Shutdown complete") + sys.exit(0) class AugurGunicornApp(gunicorn.app.base.BaseApplication): """ Loads configurations, initializes Gunicorn, loads server """ - def __init__(self, options=None, manager=None, broker=None, housekeeper=None): - self.options = options or {} - self.manager = manager - self.broker = broker - self.housekeeper = housekeeper + def __init__(self, options={}, augur_app=None): + self.options = options + self.augur_app = augur_app + self.manager = self.augur_app.manager + self.broker = self.augur_app.broker + self.housekeeper = self.augur_app.housekeeper + self.server = None + logger.debug(f"Gunicorn will start {self.options['workers']} worker processes") super(AugurGunicornApp, self).__init__() - # self.cfg.pre_request.set(pre_request) def load_config(self): """ Sets the values for configurations """ - config = dict([(key, value) for key, value in iteritems(self.options) - if key in self.cfg.settings and value is not None]) - for key, value in iteritems(config): + config = {key: value for key, value in self.options.items() + if key in self.cfg.settings and value is not None} + for key, value in config.items(): self.cfg.set(key.lower(), value) - def load(self): + def get_augur_app(self): """ Returns the loaded server """ - server = Server(manager=self.manager, broker=self.broker, housekeeper=self.housekeeper) - return server.app + self.load() + return self.server.augur_app + def load(self): + """ + Returns the loaded server + """ + if self.server is None: + try: + self.server = Server(augur_app=self.augur_app) + except Exception as e: + logger.error(f"An error occured when Gunicorn tried to load the server: {e}") + return self.server.app diff --git a/augur/cli/util.py b/augur/cli/util.py --- a/augur/cli/util.py +++ b/augur/cli/util.py @@ -5,44 +5,47 @@ import os import signal +import logging from subprocess import call, run +import time import psutil import click import pandas as pd import sqlalchemy as s +from augur.cli import initialize_logging, pass_config, pass_application -from augur.cli.configure import default_config -from augur.cli.db import get_db_connection +logger = logging.getLogger(__name__) @click.group('util', short_help='Miscellaneous utilities') def cli(): pass @cli.command('export-env') [email protected]_context -def export_env(ctx): +@pass_config +def export_env(config): """ Exports your GitHub key and database credentials """ - app = ctx.obj export_file = open(os.getenv('AUGUR_EXPORT_FILE', 'augur_export_env.sh'), 'w+') export_file.write('#!/bin/bash') export_file.write('\n') env_file = open(os.getenv('AUGUR_ENV_FILE', 'docker_env.txt'), 'w+') - for env_var in app.env_config.items(): - export_file.write('export ' + env_var[0] + '="' + str(env_var[1]) + '"\n') - env_file.write(env_var[0] + '=' + str(env_var[1]) + '\n') + for env_var in config.get_env_config().items(): + if "LOG" not in env_var[0]: + logger.info(f"Exporting {env_var[0]}") + export_file.write('export ' + env_var[0] + '="' + str(env_var[1]) + '"\n') + env_file.write(env_var[0] + '=' + str(env_var[1]) + '\n') export_file.close() env_file.close() @cli.command('kill') [email protected]_context -def kill_processes(ctx): +@initialize_logging +def cli_kill_processes(): """ Terminates all currently running backend Augur processes, including any workers. Will only work in a virtual environment. """ @@ -50,20 +53,70 @@ def kill_processes(ctx): if processes != []: for process in processes: if process.pid != os.getpid(): - print(f"Killing {process.pid}: {' '.join(process.info['cmdline'][1:])}") + logger.info(f"Terminating process {process.pid}") try: process.send_signal(signal.SIGTERM) + logger.info(f"sending SIGTERM Signal to {process.pid}") + except psutil.NoSuchProcess as e: + pass + + logger.info(f"Waiting to check if processes terminated.") + + time.sleep(15) + logger.info(f"Checking on process termination.") + + processes = get_augur_processes() + + if processes != []: + for process in processes: + + if process.pid != os.getpid(): + logger.info(f"Killing process {process.pid}") + try: + process.send_signal(signal.SIGKILL) + logger.info(f"sending SIGKILL Signal to {process.pid}") + except psutil.NoSuchProcess as e: + pass + +def kill_processes(): + logger = logging.getLogger("augur") + processes = get_augur_processes() + if processes != []: + for process in processes: + if process.pid != os.getpid(): + logger.info(f"Terminating process {process.pid}") + try: + process.send_signal(signal.SIGTERM) + logger.info(f"sending SIGTERM Signal to {process.pid}") + except psutil.NoSuchProcess as e: + logger.warning(e) + logger.info(f"Waiting to check if processes terminated.") + + time.sleep(15) + logger.info(f"Checking on process termination.") + + processes = get_augur_processes() + + if processes != []: + for process in processes: + if process.pid != os.getpid(): + logger.info(f"Killing process {process.pid}") + logger.info(f"Killing process {process.pid}") + try: + process.send_signal(signal.SIGKILL) + logger.info(f"sending SIGKILL Signal to {process.pid}") except psutil.NoSuchProcess as e: pass @cli.command('list',) +@initialize_logging def list_processes(): """ Outputs the name and process ID (PID) of all currently running backend Augur processes, including any workers. Will only work in a virtual environment. """ processes = get_augur_processes() for process in processes: - print(process.pid, " ".join(process.info['cmdline'][1:])) + logger.info(f"Found process {process.pid}") def get_augur_processes(): processes = [] @@ -78,14 +131,11 @@ def get_augur_processes(): return processes @cli.command('repo-reset') [email protected]_context -def repo_reset(ctx): +@pass_application +def repo_reset(augur_app): """ Refresh repo collection to force data collection """ - app = ctx.obj - db = get_db_connection(app) - - db.execute("UPDATE augur_data.repo SET repo_path = NULL, repo_name = NULL, repo_status = 'New'; TRUNCATE augur_data.commits CASCADE; ") + augur_app.database.execute("UPDATE augur_data.repo SET repo_path = NULL, repo_name = NULL, repo_status = 'New'; TRUNCATE augur_data.commits CASCADE; ") - print("Repos successfully reset.") + logger.info("Repos successfully reset") diff --git a/augur/config.py b/augur/config.py new file mode 100644 --- /dev/null +++ b/augur/config.py @@ -0,0 +1,349 @@ +import os +import json +import logging + +ENVVAR_PREFIX = "AUGUR_" + +default_config = { + "version": 1, + "Database": { + "name": "augur", + "host": "localhost", + "key": "key", + "password": "augur", + "port": 5432, + "user": "augur", + "gitlab_api_key":"gitlab_api_key" + }, + "Housekeeper": { + "jobs": [ + { + "all_focused": 1, + "delay": 150000, + "given": [ + "github_url" + ], + "model": "issues", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "github_url" + ], + "model": "pull_request_commits", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "github_url" + ], + "model": "repo_info", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "repo_group" + ], + "model": "commits", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "github_url" + ], + "model": "pull_requests", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "contributors", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "insights", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "badges", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "value", + "repo_group_id": 0 + }, + { + "delay": 100000, + "given": [ + "github_url" + ], + "model": "pull_request_files", + "repo_group_id": 0 + }, + { + "delay": 100000, + "given": [ + "github_url" + ], + "model": "releases", + "repo_group_id": 0 + } + ] + }, + "Workers": { + "facade_worker": { + "port": 50100, + "repo_directory": "repos/", + "switch": 1, + "workers": 1 + }, + "github_worker": { + "port": 50200, + "switch": 1, + "workers": 1 + }, + "insight_worker": { + "port": 50300, + "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", + "reviews": "pull_requests", "contributors-new": "new_contributors"}, + "confidence_interval": 95, + "contamination": 0.041, + "switch": 0, + "workers": 1, + "training_days": 365, + "anomaly_days": 2 + }, + "linux_badge_worker": { + "port": 50400, + "switch": 1, + "workers": 1 + }, + "metric_status_worker": { + "port": 50500, + "switch": 0, + "workers": 1 + }, + "pull_request_worker": { + "port": 50600, + "switch": 1, + "workers": 1 + }, + "repo_info_worker": { + "port": 50700, + "switch": 1, + "workers": 1 + }, + "value_worker": { + "port": 50800, + "scc_bin": "scc", + "switch": 0, + "workers": 1 + }, + "contributor_worker": { + "port": 50900, + "switch": 1, + "workers": 1 + }, + "gitlab_issues_worker": { + "port": 51000, + "switch": 1, + "workers": 1 + }, + "release_worker": { + "port": 51100, + "switch": 1, + "workers": 1 + }, + "gitlab_merge_request_worker": { + "port": 51200, + "switch": 1, + "workers": 1 + } + }, + "Facade": { + "check_updates": 1, + "clone_repos": 1, + "create_xlsx_summary_files": 1, + "delete_marked_repos": 0, + "fix_affiliations": 1, + "force_analysis": 1, + "force_invalidate_caches": 1, + "force_updates": 1, + "limited_run": 0, + "multithreaded": 0, + "nuke_stored_affiliations": 0, + "pull_repos": 1, + "rebuild_caches": 1, + "run_analysis": 1 + }, + "Server": { + "cache_expire": "3600", + "host": "0.0.0.0", + "port": "5000", + "workers": 4, + "timeout": 60 + }, + "Frontend": { + "host": "0.0.0.0", + "port": "5000" + }, + "Logging": { + "logs_directory": "logs/", + "log_level": "INFO", + "verbose": 0, + "quiet": 0, + "debug": 0 + } + } + +logger = logging.getLogger(__name__) + +class AugurConfig(): + """docstring for AugurConfig""" + def __init__(self, root_augur_dir, given_config={}): + self._default_config_file_name = 'augur.config.json' + self._root_augur_dir = root_augur_dir + self._default_config = default_config + self._env_config = {} + self.load_config() + self.version = self.get_version() + self._config.update(given_config) + + def get_section(self, section_name): + try: + return self._config[section_name] + except KeyError as e: + if not self.using_default_config: + logger.warn(f"{section_name} not found in loaded config. Checking default config") + try: + return self._default_config[section_name] + except KeyError as e: + logger.error(f"No defaults found for {section_name}") + raise(e) + else: + logger.debug(f"Already using default config, skipping check for {section_name}") + + def get_version(self): + try: + return self._config["version"] + except KeyError as e: + logger.warning("No config version found. Setting version to 0.") + return 0 + + def get_value(self, section_name, value): + try: + return self._config[section_name][value] + except KeyError as e: + if not self.using_default_config: + logger.warn(f"{section_name}:{value} not found in loaded config. Checking default config") + try: + return self._default_config[section_name][value] + except KeyError as e: + logger.error(f"No defaults found for {section_name}:{value}") + raise(e) + else: + logger.debug(f"Already using default config, skipping check for {section_name}:{value}") + + def load_config(self): + self._config = None + self.using_default_config = False + + logger.debug("Attempting to load config file") + try: + config_file_path = self.discover_config_file() + try: + with open(config_file_path, 'r+') as config_file_handle: + self._config = json.loads(config_file_handle.read()) + logger.debug("Config file loaded successfully") + except json.decoder.JSONDecodeError as e: + logger.warning("Unable to parse config. Using default configuration") + self.using_default_config = True + self._config = default_config + except AugurConfigFileNotFoundException as e: + logger.warning("Config file not found. Using default configuration") + self.using_default_config = True + self._config = default_config + + self.load_env_configuration() + + def discover_config_file(self): + default_config_path = self._root_augur_dir + '/' + self._default_config_file_name + config_file_path = None + + config_locations = [self._default_config_file_name, default_config_path + , f"/opt/augur/{self._default_config_file_name}"] + if os.getenv('AUGUR_CONFIG_FILE', None) is not None: + config_file_path = os.getenv('AUGUR_CONFIG_FILE') + else: + for location in config_locations: + try: + f = open(location, "r+") + config_file_path = os.path.abspath(location) + f.close() + break + except FileNotFoundError: + pass + if config_file_path: + return config_file_path + else: + raise(AugurConfigFileNotFoundException(message=f"{self._default_config_file_name} not found", errors=None)) + + def load_env_configuration(self): + self.set_env_value(section='Database', name='key', environment_variable='AUGUR_GITHUB_API_KEY') + self.set_env_value(section='Database', name='host', environment_variable='AUGUR_DB_HOST') + self.set_env_value(section='Database', name='name', environment_variable='AUGUR_DB_NAME') + self.set_env_value(section='Database', name='port', environment_variable='AUGUR_DB_PORT') + self.set_env_value(section='Database', name='user', environment_variable='AUGUR_DB_USER') + self.set_env_value(section='Database', name='password', environment_variable='AUGUR_DB_PASSWORD') + self.set_env_value(section='Logging', name='log_level', environment_variable='AUGUR_LOG_LEVEL') + self.set_env_value(section='Logging', name='quiet', environment_variable='AUGUR_LOG_QUIET') + self.set_env_value(section='Logging', name='debug', environment_variable='AUGUR_LOG_DEBUG') + self.set_env_value(section='Logging', name='verbose', environment_variable='AUGUR_LOG_VERBOSE') + + def set_env_value(self, section, name, environment_variable, sub_config=None): + """ + Sets names and values of specified config section according to their environment variables. + """ + # using sub_config lets us grab values from nested config blocks + if sub_config is None: + sub_config = self._config + + env_value = os.getenv(environment_variable) + + if env_value is not None: + self._env_config[environment_variable] = env_value + sub_config[section][name] = env_value + # logger.info(f"{section}:[\"{name}\"] set to {env_value} by: {environment_variable}") + else: + self._env_config[environment_variable] = self.get_value(section, name) + + def get_raw_config(self): + return self._config + + def get_default_config(self): + return self._default_config + + def get_env_config(self): + return self._env_config + +class AugurConfigFileNotFoundException(Exception): + def __init__(self, message, errors): + super().__init__(message) diff --git a/augur/housekeeper/housekeeper.py b/augur/housekeeper.py similarity index 81% rename from augur/housekeeper/housekeeper.py rename to augur/housekeeper.py --- a/augur/housekeeper/housekeeper.py +++ b/augur/housekeeper.py @@ -1,69 +1,85 @@ """ Keeps data up to date """ +import coloredlogs +from copy import deepcopy import logging, os, time, requests -from multiprocessing import Process +import logging.config +from multiprocessing import Process, get_start_method from sqlalchemy.ext.automap import automap_base import sqlalchemy as s import pandas as pd from sqlalchemy import MetaData -logging.basicConfig(filename='housekeeper.log') + +from augur.logging import AugurLogging + +import warnings +warnings.filterwarnings('ignore') + +logger = logging.getLogger(__name__) class Housekeeper: - def __init__(self, jobs, broker, broker_host, broker_port, user, password, host, port, dbname): + def __init__(self, broker, augur_app): + logger.info("Booting housekeeper") - self.broker_host = broker_host - self.broker_port = broker_port + self._processes = [] + self.augur_logging = augur_app.logging + self.jobs = deepcopy(augur_app.config.get_value("Housekeeper", "jobs")) + self.broker_host = augur_app.config.get_value("Server", "host") + self.broker_port = augur_app.config.get_value("Server", "port") self.broker = broker - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - user, password, host, port, dbname - ) - - dbschema='augur_data' - self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) + self.db = augur_app.database + self.helper_db = augur_app.operations_database helper_metadata = MetaData() helper_metadata.reflect(self.helper_db, only=['worker_job']) HelperBase = automap_base(metadata=helper_metadata) HelperBase.prepare() - self.job_table = HelperBase.classes.worker_job.__table__ repoUrlSQL = s.sql.text(""" SELECT repo_git FROM repo """) - rs = pd.read_sql(repoUrlSQL, self.db, params={}) - all_repos = rs['repo_git'].values.tolist() # List of tasks that need periodic updates - self.__updatable = self.prep_jobs(jobs) + self.schedule_updates() + + def schedule_updates(self): + """ + Starts update processes + """ + self.prep_jobs() + self.augur_logging.initialize_housekeeper_logging_listener() + logger.info("Scheduling update processes") + for job in self.jobs: + process = Process(target=self.updater_process, name=job["model"], args=(self.broker_host, self.broker_port, self.broker, job, (self.augur_logging.housekeeper_job_config, self.augur_logging.get_config()))) + self._processes.append(process) + process.start() - self.__processes = [] - self.__updater() @staticmethod - def updater_process(broker_host, broker_port, broker, job): + def updater_process(broker_host, broker_port, broker, job, logging_config): """ Controls a given plugin's update process - :param name: name of object to be updated - :param delay: time needed to update - :param shared: shared object that is to also be updated + """ - + logging.config.dictConfig(logging_config[0]) + logger = logging.getLogger(f"augur.jobs.{job['model']}") + coloredlogs.install(level=logging_config[1]["log_level"], logger=logger, fmt=logging_config[1]["format_string"]) + + if logging_config[1]["quiet"]: + logger.disabled + if 'repo_group_id' in job: repo_group_id = job['repo_group_id'] - logging.info('Housekeeper spawned {} model updater process for repo group id {} with PID {}\n'.format(job['model'], repo_group_id, os.getpid())) + logger.info('Housekeeper spawned {} model updater process for repo group id {}'.format(job['model'], repo_group_id)) else: repo_group_id = None - logging.info('Housekeeper spawned {} model updater process for repo ids {} with PID {}\n'.format(job['model'], job['repo_ids'], os.getpid())) + logger.info('Housekeeper spawned {} model updater process for repo ids {}'.format(job['model'], job['repo_ids'])) try: compatible_worker_found = False @@ -76,10 +92,10 @@ def updater_process(broker_host, broker_port, broker, job): time.sleep(3) continue - logging.info("Housekeeper recognized that the broker has a worker that " + - "can handle the {} model... beginning to distribute maintained tasks\n".format(job['model'])) + logger.info("Housekeeper recognized that the broker has a worker that " + + "can handle the {} model... beginning to distribute maintained tasks".format(job['model'])) while True: - logging.info('Housekeeper updating {} model with given {}...\n'.format( + logger.info('Housekeeper updating {} model with given {}...'.format( job['model'], job['given'][0])) if job['given'][0] == 'git_url' or job['given'][0] == 'github_url': @@ -100,9 +116,9 @@ def updater_process(broker_host, broker_port, broker, job): requests.post('http://{}:{}/api/unstable/task'.format( broker_host,broker_port), json=task, timeout=10) except Exception as e: - logging.info("Error encountered: {}\n".format(e)) + logger.error("Error encountered: {}".format(e)) - logging.info(task) + logger.debug(task) time.sleep(15) @@ -119,61 +135,33 @@ def updater_process(broker_host, broker_port, broker, job): requests.post('http://{}:{}/api/unstable/task'.format( broker_host,broker_port), json=task, timeout=10) except Exception as e: - logging.info("Error encountered: {}\n".format(e)) + logger.error("Error encountered: {}".format(e)) - logging.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)\n".format(len(job['repos']))) + logger.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)".format(len(job['repos']))) time.sleep(job['delay']) - - except KeyboardInterrupt: - os.kill(os.getpid(), 9) - os._exit(0) - except: - raise - def __updater(self, jobs=None): - """ - Starts update processes - """ - logging.info("Starting update processes...") - if jobs is None: - jobs = self.__updatable - for job in jobs: - up = Process(target=self.updater_process, args=(self.broker_host, self.broker_port, self.broker, job), daemon=True) - up.start() - self.__processes.append(up) - - def update_all(self): - """ - Updates all plugins - """ - for updatable in self.__updatable: - updatable['update']() - - def schedule_updates(self): - """ - Schedules updates - """ - # don't use this, - logging.debug('Scheduling updates...') - self.__updater() + except KeyboardInterrupt as e: + pass def join_updates(self): """ Join to the update processes """ - for process in self.__processes: + for process in self._processes: + logger.debug(f"Joining {process.name} update process") process.join() def shutdown_updates(self): """ Ends all running update processes """ - for process in self.__processes: + for process in self._processes: + # logger.debug(f"Terminating {process.name} update process") process.terminate() - def prep_jobs(self, jobs): - - for job in jobs: + def prep_jobs(self): + logger.info("Preparing housekeeper jobs") + for job in self.jobs: if 'repo_group_id' in job or 'repo_ids' in job: # If RG id is 0 then it just means to query all repos where_and = 'AND' if job['model'] == 'issues' and 'repo_group_id' in job else 'WHERE' @@ -269,7 +257,7 @@ def prep_jobs(self, jobs): reorganized_repos = pd.read_sql(repo_url_sql, self.db, params={}) if len(reorganized_repos) == 0: - logging.info("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql)) + logger.warning("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql)) job['repos'] = [] continue @@ -290,7 +278,7 @@ def prep_jobs(self, jobs): 'oauth_id': 0 } result = self.helper_db.execute(self.job_table.insert().values(job_tuple)) - logging.info("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple)) + logger.debug("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple)) # If a last id is not recorded, start from beginning of repos # (first id is not necessarily 0) @@ -347,5 +335,3 @@ def prep_jobs(self, jobs): job['repos'] = rs # time.sleep(120) - return jobs - diff --git a/augur/housekeeper/__init__.py b/augur/housekeeper/__init__.py deleted file mode 100644 --- a/augur/housekeeper/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# # SPDX-License-Identifier: MIT -# from augur.augurplugin import AugurPlugin -# from augur.application import Application - -# class HousekeeperPlugin(AugurPlugin): -# """ -# This plugin serves as an example as to how to load plugins into Augur -# """ -# def __init__(self, augur_app): -# super().__init__(augur_app) -# self.__housekeeper = self.__call__() - -# def __call__(self): -# from .housekeeper import Housekeeper -# return Housekeeper( -# user=self._augur.read_config('Database', 'user', 'AUGUR_DB_USER', 'root'), -# password=self._augur.read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), -# host=self._augur.read_config('Database', 'host', 'AUGUR_DB_HOST', '0.0.0.0'), -# port=self._augur.read_config('Database', 'port', 'AUGUR_DB_PORT', '3306'), -# dbname=self._augur.read_config('Database', 'name', 'AUGUR_DB_NAME', 'msr14') -# ) - - -# HousekeeperPlugin.augur_plugin_meta = { -# 'name': 'housekeeper', -# 'datasource': True -# } -# Application.register_plugin(HousekeeperPlugin) - -# __all__ = ['HousekeeperPlugin'] \ No newline at end of file diff --git a/augur/logging.py b/augur/logging.py new file mode 100644 --- /dev/null +++ b/augur/logging.py @@ -0,0 +1,305 @@ +import logging +import logging.config +import logging.handlers +from logging import FileHandler, StreamHandler, Formatter +from multiprocessing import Process, Queue, Event, current_process +from time import sleep +import os +from pathlib import Path +import atexit +import shutil +import coloredlogs +from copy import deepcopy + +from augur import ROOT_AUGUR_DIRECTORY + +logger = logging.getLogger(__name__) + +class AugurLogging(): + + simple_format_string = "[%(process)d] %(name)s [%(levelname)s] %(message)s" + verbose_format_string = "%(asctime)s,%(msecs)dms [PID: %(process)d] %(name)s [%(levelname)s] %(message)s" + cli_format_string = "CLI: [%(module)s.%(funcName)s] [%(levelname)s] %(message)s" + config_format_string = "[%(levelname)s] %(message)s" + error_format_string = "%(asctime)s [PID: %(process)d] %(name)s [%(funcName)s() in %(filename)s:L%(lineno)d] [%(levelname)s]: %(message)s" + + @staticmethod + def get_log_directories(augur_config, reset_logfiles=True): + LOGS_DIRECTORY = augur_config.get_value("Logging", "logs_directory") + + if LOGS_DIRECTORY[0] != "/": + LOGS_DIRECTORY = ROOT_AUGUR_DIRECTORY + "/" + LOGS_DIRECTORY + + if LOGS_DIRECTORY[-1] != "/": + LOGS_DIRECTORY += "/" + + if reset_logfiles is True: + try: + shutil.rmtree(LOGS_DIRECTORY) + except FileNotFoundError as e: + pass + + Path(LOGS_DIRECTORY).mkdir(exist_ok=True) + + return LOGS_DIRECTORY + + def __init__(self, disable_logs=False, reset_logfiles=True): + self.stop_event = None + self.LOGS_DIRECTORY = None + self.WORKER_LOGS_DIRECTORY = None + self.LOG_LEVEL = None + self.VERBOSE = None + self.QUIET = None + self.DEGBUG = None + + self.logfile_config = None + self.housekeeper_job_config = None + + self._reset_logfiles = reset_logfiles + + self.formatters = { + "simple": { + "class": "logging.Formatter", + "format": AugurLogging.simple_format_string + }, + "verbose": { + "class": "logging.Formatter", + "format": AugurLogging.verbose_format_string + }, + "cli": { + "class": "logging.Formatter", + "format": AugurLogging.cli_format_string + }, + "config": { + "class": "logging.Formatter", + "format": AugurLogging.config_format_string + }, + "error": { + "class": "logging.Formatter", + "format": AugurLogging.error_format_string + } + } + + self._configure_cli_logger() + + level = logging.INFO + config_handler = StreamHandler() + config_handler.setFormatter(Formatter(fmt=AugurLogging.config_format_string)) + config_handler.setLevel(level) + + config_initialization_logger = logging.getLogger("augur.config") + config_initialization_logger.setLevel(level) + config_initialization_logger.handlers = [] + config_initialization_logger.addHandler(config_handler) + config_initialization_logger.propagate = False + + coloredlogs.install(level=level, logger=config_initialization_logger, fmt=AugurLogging.config_format_string) + + if disable_logs: + self._disable_all_logging() + + + def _disable_all_logging(self): + for logger in ["augur", "augur.application", "augur.housekeeper", "augur.config", "augur.cli", "root"]: + lg = logging.getLogger(logger) + lg.disabled = True + + def _configure_cli_logger(self): + cli_handler = StreamHandler() + cli_handler.setLevel(logging.INFO) + + cli_logger = logging.getLogger("augur.cli") + cli_logger.setLevel(logging.INFO) + cli_logger.handlers = [] + cli_logger.addHandler(cli_handler) + cli_logger.propagate = False + + coloredlogs.install(level=logging.INFO, logger=cli_logger, fmt=AugurLogging.cli_format_string) + + def _set_config(self, augur_config): + self.LOGS_DIRECTORY = AugurLogging.get_log_directories(augur_config, self._reset_logfiles) + self.LOG_LEVEL = augur_config.get_value("Logging", "log_level") + self.QUIET = int(augur_config.get_value("Logging", "quiet")) + self.DEBUG = int(augur_config.get_value("Logging", "debug")) + self.VERBOSE = int(augur_config.get_value("Logging", "verbose")) + # self.JOB_NAMES = [job["model"] for job in deepcopy(augur_config.get_value("Housekeeper", "jobs"))] + + if self.QUIET: + self._disable_all_logging() + + if self.DEBUG: + self.LOG_LEVEL = "DEBUG" + self.VERBOSE = True + + if self.VERBOSE: + self.FORMATTER = "verbose" + else: + self.FORMATTER = "simple" + self.format_string = self.formatters[self.FORMATTER]["format"] + + def configure_logging(self, augur_config): + self._set_config(augur_config) + self._configure_logfiles() + self._configure_cli_logger() + self._configure_gunicorn_logging() + logger.debug("Loggers are fully configured") + + def _configure_logfiles(self): + self.logfile_config = { + "version": 1, + "disable_existing_loggers": True, + "formatters": self.formatters, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "formatter": self.FORMATTER, + "level": self.LOG_LEVEL + }, + "logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "augur.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "errorfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "augur.err", + "mode": "a", + "level": logging.WARNING, + "formatter": "error" + }, + "server_logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "gunicorn.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "housekeeper_logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "housekeeper.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "housekeeper_errorfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "housekeeper.err", + "mode": "a", + "level": logging.WARNING, + "formatter": "error", + }, + }, + "loggers": { + "augur": { + "handlers": ["console", "logfile", "errorfile"], + "level": self.LOG_LEVEL + }, + "augur.server": { + "handlers": ["server_logfile"], + "level": self.LOG_LEVEL, + "propagate": False + }, + "augur.housekeeper": { + "handlers": ["housekeeper_logfile", "housekeeper_errorfile"], + "level": self.LOG_LEVEL, + }, + "augur.jobs": { + "handlers": ["housekeeper_logfile", "housekeeper_errorfile", "logfile", "errorfile"], + "level": self.LOG_LEVEL, + "propagate": False + } + }, + "root": { + "handlers": [], + "level": self.LOG_LEVEL + } + } + + logging.config.dictConfig(self.logfile_config) + for logger_name in ["augur", "augur.housekeeper", "augur.jobs"]: + coloredlogs.install(logger=logging.getLogger(logger_name), level=self.LOG_LEVEL, fmt=self.format_string) + + logger.debug("Logfiles initialized") + logger.debug("Logs will be written to: " + self.LOGS_DIRECTORY) + + def initialize_housekeeper_logging_listener(self): + queue = Queue() + self.housekeeper_job_config = { + "version": 1, + "disable_existing_loggers": True, + "formatters": self.formatters, + "handlers": { + "queue": { + "class": "logging.handlers.QueueHandler", + "queue": queue + } + }, + "root": { + "handlers": ["queue"], + "level": self.LOG_LEVEL + } + } + + stop_event = Event() + self.lp = Process(target=logging_listener_process, name='housekeeper_logging_listener', + args=(queue, stop_event, self.logfile_config)) + self.lp.start() + sleep(2) # just to let it fully start up + self.stop_event = stop_event + logger.debug("Houseekeeper logging listener initialized") + + def get_config(self): + return { + "log_level": self.LOG_LEVEL, + "quiet": self.QUIET, + "verbose": self.VERBOSE, + "debug": self.DEBUG, + "format_string": self.format_string + } + + def _configure_gunicorn_logging(self): + gunicorn_log_file = self.LOGS_DIRECTORY + "gunicorn.log" + self.gunicorn_logging_options = { + "errorlog": gunicorn_log_file, + "accesslog": gunicorn_log_file, + "loglevel": self.LOG_LEVEL, + "capture_output": False + } + +def logging_listener_process(queue, stop_event, config): + """ + This could be done in the main process, but is just done in a separate + process for illustrative purposes. + + This initialises logging according to the specified configuration, + starts the listener and waits for the main process to signal completion + via the event. The listener is then stopped, and the process exits. + """ + logging.config.dictConfig(config) + listener = logging.handlers.QueueListener(queue, AugurLoggingHandler()) + listener.start() + try: + stop_event.wait() + except KeyboardInterrupt: + pass + finally: + listener.stop() + +class AugurLoggingHandler: + """ + A simple handler for logging events. It runs in the listener process and + dispatches events to loggers based on the name in the received record, + which then get dispatched, by the logging system, to the handlers + configured for those loggers. + """ + + def handle(self, record): + if record.name == "root": + logger = logging.getLogger() + else: + logger = logging.getLogger(record.name) + + record.processName = '%s (for %s)' % (current_process().name, record.processName) + logger.handle(record) diff --git a/augur/metrics/__init__.py b/augur/metrics/__init__.py --- a/augur/metrics/__init__.py +++ b/augur/metrics/__init__.py @@ -1 +1,38 @@ -from .metrics import MetricDefinitions \ No newline at end of file +import os +import glob +import sys +import inspect +import types +import importlib +import logging + +logger = logging.getLogger(__name__) + +class Metrics(): + def __init__(self, app): + logger.debug("Loading metrics") + self.database = app.database + self.spdx_db = app.spdx_database + + self.models = [] #TODO: standardize this + for filename in glob.iglob("augur/metrics/**"): + file_id = get_file_id(filename) + if not file_id.startswith('__') and filename.endswith('.py') and file_id != "metrics": + self.models.append(file_id) + + for model in self.models: + importlib.import_module(f"augur.metrics.{model}") + add_metrics(self, f"augur.metrics.{model}") + +def get_file_id(path): + return os.path.splitext(os.path.basename(path))[0] + +def add_metrics(metrics, module_name): + # find all unbound endpoint functions objects + # (ones that have metadata) defined the given module_name + # and bind them to the metrics class + for name, obj in inspect.getmembers(sys.modules[module_name]): + if inspect.isfunction(obj) == True: + if hasattr(obj, 'is_metric') == True: + setattr(metrics, name, types.MethodType(obj, metrics)) + diff --git a/augur/metrics/commit/commit.py b/augur/metrics/commit.py similarity index 97% rename from augur/metrics/commit/commit.py rename to augur/metrics/commit.py --- a/augur/metrics/commit/commit.py +++ b/augur/metrics/commit.py @@ -5,9 +5,9 @@ import datetime import sqlalchemy as s import pandas as pd -from augur.util import annotate, add_metrics +from augur.util import register_metric -@annotate(tag='committers') +@register_metric() def committers(self, repo_group_id, repo_id=None, begin_date=None, end_date=None, period='month'): """ :param repo_id: The repository's id @@ -92,7 +92,7 @@ def committers(self, repo_group_id, repo_id=None, begin_date=None, end_date=None return results -@annotate(tag='annual-commit-count-ranked-by-new-repo-in-repo-group') +@register_metric() def annual_commit_count_ranked_by_new_repo_in_repo_group(self, repo_group_id, repo_id=None, begin_date=None, end_date=None, period='month'): """ For each repository in a collection of repositories being managed, each REPO that first appears in the parameterized @@ -167,7 +167,7 @@ def annual_commit_count_ranked_by_new_repo_in_repo_group(self, repo_group_id, re 'repo_group_id': repo_group_id,'begin_date': begin_date, 'end_date': end_date}) return results -@annotate(tag='annual-commit-count-ranked-by-repo-in-repo-group') +@register_metric() def annual_commit_count_ranked_by_repo_in_repo_group(self, repo_group_id, repo_id=None, timeframe=None): """ For each repository in a collection of repositories being managed, each REPO's total commits during the current Month, @@ -225,7 +225,7 @@ def annual_commit_count_ranked_by_repo_in_repo_group(self, repo_group_id, repo_i if timeframe == 'all': cdRgTpRankedCommitsSQL = s.sql.text(""" SELECT repo.repo_id, repo_name as name, SUM(added - removed - whitespace) as net, patches - FROM dm_repo_annual, repo, repo_groups + FROM augur_data.dm_repo_annual, repo, repo_groups WHERE repo.repo_group_id = :repo_group_id AND repo.repo_group_id = repo_groups.repo_group_id AND dm_repo_annual.repo_id = repo.repo_id @@ -261,12 +261,11 @@ def annual_commit_count_ranked_by_repo_in_repo_group(self, repo_group_id, repo_i LIMIT 10 """) - results = pd.read_sql(cdRgTpRankedCommitsSQL, self.database, params={ "repo_group_id": repo_group_id, "repo_id": repo_id}) return results -@annotate(tag='top-committers') +@register_metric() def top_committers(self, repo_group_id, repo_id=None, year=None, threshold=0.5): """ Returns a list of contributors contributing N% of all commits. @@ -367,7 +366,3 @@ def top_committers(self, repo_group_id, repo_id=None, year=None, threshold=0.5): int(total_commits - cumsum)] return results - - -def create_commit_metrics(metrics): - add_metrics(metrics, __name__) diff --git a/augur/metrics/commit/__init__.py b/augur/metrics/commit/__init__.py deleted file mode 100644 --- a/augur/metrics/commit/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .commit import create_commit_metrics - -from .routes import create_commit_routes \ No newline at end of file diff --git a/augur/metrics/commit/routes.py b/augur/metrics/commit/routes.py deleted file mode 100644 --- a/augur/metrics/commit/routes.py +++ /dev/null @@ -1,113 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_commit_routes(server): - - metrics = server._augur.metrics - - """ - @api {get} /repo-groups/:repo_group_id/annual-commit-count-ranked-by-new-repo-in-repo-group Annual Commit Count Ranked by New Repo in Repo Group(Repo Group) - @apiName annual-commit-count-ranked-by-new-repo-in-repo-group - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. Source: Git Repository - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string=day, week, month, year, all} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_group_id": 20, - "rg_name": "Rails (wg-value)", - "year": 2004, - "net": 21996, - "commits": 289 - }, - { - "repo_group_id": 20, - "rg_name": "Rails (wg-value)", - "year": 2005, - "net": 27470, - "commits": 2455 - } - ] - """ - server.addRepoGroupMetric(metrics.annual_commit_count_ranked_by_new_repo_in_repo_group,'annual-commit-count-ranked-by-new-repo-in-repo-group') - - """ - @api {get} /repo-groups/:repo_group_id/annual-commit-count-ranked-by-new-repo-in-repo-group Annual Commit Count Ranked by New Repo in Repo Group(Repo) - @apiName annual-commit-count-ranked-by-new-repo-in-repo-group - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. Source: Git Repository - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year, all} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21000, - "repo_name": "rails", - "year": 2004, - "net": 21996, - "commits": 289 - }, - { - "repo_id": 21000, - "repo_name": "rails", - "year": 2005, - "net": 26504, - "commits": 2428 - } - ] - """ - server.addRepoMetric(metrics.annual_commit_count_ranked_by_new_repo_in_repo_group,'annual-commit-count-ranked-by-new-repo-in-repo-group') - - """ - @api {get} /repo-groups/:repo_group_id/annual-commit-count-ranked-by-repo-in-repo-group Annual Commit Count Ranked by Repo in Repo Group(Repo Group) - @apiName annual-commit-count-ranked-by-repo-in-repo-group - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. Source: Git Repository - @apiParam {String} repo_url_base Base64 version of the URL of the GitHub repository as it appears in the Facade DB - @apiSuccessExample {json} Success-Response: - [ - { - "repos_id": 1, - "net": 2479124, - "patches": 1, - "repo_name": "twemoji" - }, - { - "repos_id": 63, - "net": 2477911, - "patches": 1, - "repo_name": "twemoji-1" - } - ] - """ - server.addRepoGroupMetric(metrics.annual_commit_count_ranked_by_repo_in_repo_group,'annual-commit-count-ranked-by-repo-in-repo-group') - - """ - @api {get} /repo-groups/:repo_group_id/annual-commit-count-ranked-by-repo-in-repo-group Annual Commit Count Ranked by Repo in Repo Group(Repo) - @apiName annual-commit-count-ranked-by-repo-in-repo-group - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. Source: Git Repository - @apiParam {String} repo_url_base Base64 version of the URL of the GitHub repository as it appears in the Facade DB - @apiSuccessExample {json} Success-Response: - [ - { - "repos_id": 1, - "net": 2479124, - "patches": 1, - "name": "twemoji" - }, - { - "repos_id": 63, - "net": 2477911, - "patches": 1, - "name": "twemoji-1" - } - ] - """ - server.addRepoMetric(metrics.annual_commit_count_ranked_by_repo_in_repo_group,'annual-commit-count-ranked-by-repo-in-repo-group') - diff --git a/augur/metrics/contributor/contributor.py b/augur/metrics/contributor.py similarity index 98% rename from augur/metrics/contributor/contributor.py rename to augur/metrics/contributor.py --- a/augur/metrics/contributor/contributor.py +++ b/augur/metrics/contributor.py @@ -5,9 +5,9 @@ import datetime import sqlalchemy as s import pandas as pd -from augur.util import annotate, add_metrics +from augur.util import register_metric -@annotate(tag='contributors') +@register_metric() def contributors(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): """ Returns a timeseries of all the contributions to a project. @@ -211,7 +211,7 @@ def contributors(self, repo_group_id, repo_id=None, period='day', begin_date=Non 'begin_date': begin_date, 'end_date': end_date}) return results -@annotate(tag='contributors-new') +@register_metric() def contributors_new(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): """ Returns a timeseries of new contributions to a project. @@ -330,7 +330,7 @@ def contributors_new(self, repo_group_id, repo_id=None, period='day', begin_date 'begin_date': begin_date, 'end_date': end_date}) return results -@annotate(tag='lines-changed-by-author') +@register_metric() def lines_changed_by_author(self, repo_group_id, repo_id=None): """ Returns number of lines changed per author per day @@ -361,7 +361,7 @@ def lines_changed_by_author(self, repo_group_id, repo_id=None): results = pd.read_sql(linesChangedByAuthorSQL, self.database, params={"repo_group_id": repo_group_id}) return results -@annotate(tag='contributors-code-development') +@register_metric() def contributors_code_development(self, repo_group_id, repo_id=None, period='all', begin_date=None, end_date=None): """ Returns a timeseries of all the contributions to a project. @@ -454,7 +454,3 @@ def contributors_code_development(self, repo_group_id, repo_id=None, period='all results = pd.read_sql(contributorsSQL, self.database, params={'repo_group_id': repo_group_id, 'period': period, 'begin_date': begin_date, 'end_date': end_date}) return results - -def create_contributor_metrics(metrics): - add_metrics(metrics, __name__) - diff --git a/augur/metrics/contributor/__init__.py b/augur/metrics/contributor/__init__.py deleted file mode 100644 --- a/augur/metrics/contributor/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .contributor import create_contributor_metrics - -from .routes import create_contributor_routes \ No newline at end of file diff --git a/augur/metrics/contributor/routes.py b/augur/metrics/contributor/routes.py deleted file mode 100644 --- a/augur/metrics/contributor/routes.py +++ /dev/null @@ -1,336 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_contributor_routes(server): - - metrics = server._augur.metrics - - """ - @api {get} /repo-groups/:repo_group_id/contributors Contributors (Repo Group) - @apiName Contributors(Repo Group) - @apiGroup Evolution - @apiDescription List of contributors and their contributions. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/contributors.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "user_id": 1, - "commits": 0, - "issues": 2, - "commit_comments": 0, - "issue_comments": 0, - "pull_requests": 0, - "pull_request_comments": 0, - "total": 2, - "repo_name": "rails", - "repo_id": 21000 - }, - { - "user_id": 2, - "commits": 0, - "issues": 2, - "commit_comments": 0, - "issue_comments": 0, - "pull_requests": 0, - "pull_request_comments": 0, - "total": 2, - "repo_name": "rails", - "repo_id": 21000 - } - ] - """ - server.addRepoGroupMetric(metrics.contributors, 'contributors') - - """ - @api {get} /repos/:repo_id/contributors Contributors (Repo) - @apiName Contributors(Repo) - @apiGroup Evolution - @apiDescription List of contributors and their contributions. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/contributors.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "user": 1, - "commits": 0, - "issues": 2, - "commit_comments": 0, - "issue_comments": 0, - "pull_requests": 0, - "pull_request_comments": 0, - "total": 2 - }, - { - "user": 2, - "commits": 0, - "issues": 2, - "commit_comments": 0, - "issue_comments": 0, - "pull_requests": 0, - "pull_request_comments": 0, - "total": 2 - } - ] - """ - server.addRepoMetric(metrics.contributors, 'contributors') - - """ - @api {get} /repo-groups/:repo_group_id/contributors-new New Contributors (Repo Group) - @apiName New Contributors(Repo Group) - @apiGroup Evolution - @apiDescription Time series of number of new contributors during a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/contributors-new.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2018-05-20T00:00:00.000Z", - "new_contributors": 3, - "repo_name": "rails", - "repo_id": 21000 - }, - { - "date": "2019-06-03T00:00:00.000Z", - "new_contributors": 23, - "repo_name": "rails", - "repo_id": 21000 - } - ] - """ - server.addRepoGroupMetric(metrics.contributors_new, 'contributors-new') - - """ - @api {get} /repos/:repo_id/contributors-new New Contributors (Repo) - @apiName New Contributors(Repo) - @apiGroup Evolution - @apiDescription Time series of number of new contributors during a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/contributors-new.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2018-05-20T00:00:00.000Z", - "new_contributors": 3, - "repo_name": "rails", - "repo_id": 21000 - }, - { - "date": "2019-06-03T00:00:00.000Z", - "new_contributors": 23, - "repo_name": "rails", - "repo_id": 21000 - } - ] - """ - server.addRepoMetric(metrics.contributors_new, 'contributors-new') - - """ - @api {get} /repos/:repo_id/committers Committers(Repo) - @apiName committers-repo - @apiGroup Risk - @apiDescription Number of persons contributing with an accepted commit for the first time. - <a href="https://github.com/chaoss/wg-risk/blob/master/metrics/Committers.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year} [period="week"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "date":"2018-10-25T00:00:00.000Z", - "repo_name":"weasel", - "rg_name":"Comcast", - "count":1 - }, - { - "date":"2018-10-17T00:00:00.000Z","repo_name":"weasel","rg_name":"Comcast","count":11 - }, - { - "date":"2018-06-21T00:00:00.000Z", - "repo_name":"weasel", - "rg_name":"Comcast", - "count":6 - } - ] - """ - server.addRepoMetric(metrics.committers, 'committers') - - """ - @api {get} /repo-groups/:repo_group_id/Committers (Repo Group) - @apiName committers-repo-group - @apiGroup Risk - @apiDescription Number of persons opening an issue for the first time. - <a href="https://github.com/chaoss/wg-risk/blob/master/metrics/Committers.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string=day, week, month, year} [period="week"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2007-01-01T00:00:00.000Z", - "rg_name": "Comcast", - "count": 372 - }, - { - "date": "2008-01-01T00:00:00.000Z", - "rg_name": "Comcast", - "count": 964 - }, - { - "date": "2009-01-01T00:00:00.000Z", - "rg_name": "Comcast", - "count": 28038 - } - ] - """ - server.addRepoGroupMetric(metrics.committers, 'committers') - - """ - @api {get} /repo-groups/:repo_group_id/lines-changed-by-author Lines Changed by Author(Repo) - @apiName lines-changed-by-author - @apiGroup Experimental - @apiDescription Returns number of lines changed per author per day - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "cmt_author_email": "[email protected]", - "cmt_author_date": "2004-11-24", - "affiliation": "NULL", - "additions": 25611, - "deletions": 296, - "whitespace": 5279 - }, - { - "cmt_author_email": "[email protected]", - "cmt_author_date": "2004-11-25", - "affiliation": "NULL", - "additions": 163, - "deletions": 179, - "whitespace": 46 - } - ] - """ - server.addRepoMetric(metrics.lines_changed_by_author,'lines-changed-by-author') - - """ - @api {get} /repo-groups/:repo_group_id/lines-changed-by-author Lines Changed by Author(Repo) - @apiName lines-changed-by-author - @apiGroup Experimental - @apiDescription Count of closed issues. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/contributors-new.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "cmt_author_email": "[email protected]", - "cmt_author_date": "2004-11-24", - "affiliation": "NULL", - "additions": 25611, - "deletions": 296, - "whitespace": 5279 - }, - { - "cmt_author_email": "[email protected]", - "cmt_author_date": "2004-11-25", - "affiliation": "NULL", - "additions": 163, - "deletions": 179, - "whitespace": 46 - } - ] - """ - server.addRepoGroupMetric(metrics.lines_changed_by_author,'lines-changed-by-author') - - """ - @api {get} /repo-groups/:repo_group_id/top-committers Top Committers (Repo Group) - @apiName top-committers-repo-group - @apiGroup Experimental - @apiDescription Returns a list of contributors contributing N% of all commits. - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string} [year] Specify the year to return the results for. Default value: `current year` - @apiParam {string} [threshold=0.5] Specify N%. Accepts a value between `0` & `1` where `0` specifies - `0%` and `1` specifies `100%`. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_group_id": 20, - "repo_group_name": "Rails", - "email": "[email protected]", - "commits": 502 - }, - { - "repo_group_id": 20, - "repo_group_name": "Rails", - "email": "[email protected]", - "commits": 246 - }, - { - "repo_group_id": 20, - "repo_group_name": "Rails", - "email": "[email protected]", - "commits": 119 - }, - { - "repo_group_id": "20", - "repo_group_name": "Rails", - "email": "other_contributors", - "commits": 1774 - } - ] - """ - server.addRepoGroupMetric(metrics.top_committers, 'top-committers') - - """ - @api {get} /repos/:repo_id/top-committers Top Committers (Repo) - @apiName top-committers-repo - @apiGroup Experimental - @apiDescription Returns a list of contributors contributing N% of all commits. - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string} [year] Specify the year to return the results for. Default value: `current year` - @apiParam {string} [threshold=0.5] Specify N%. Accepts a value between `0` & `1` where `0` specifies - `0%` and `1` specifies `100%`. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21334, - "repo_name": "graphql", - "email": "[email protected]", - "commits": 4 - }, - { - "repo_id": 21334, - "repo_name": "graphql", - "email": "[email protected]", - "commits": 3 - }, - { - "repo_id": "21334", - "repo_name": "graphql", - "email": "other_contributors", - "commits": 5 - } - ] - """ - server.addRepoMetric(metrics.top_committers, 'top-committers') - - server.addRepoGroupMetric(metrics.contributors_code_development, 'contributors-code-development') - - server.addRepoMetric(metrics.contributors_code_development, 'contributors-code-development') \ No newline at end of file diff --git a/augur/metrics/experimental.py b/augur/metrics/experimental.py new file mode 100644 --- /dev/null +++ b/augur/metrics/experimental.py @@ -0,0 +1,4 @@ +""" +Metrics that are still heavily WIP, or don't clearly fall into one of the other categories +""" + diff --git a/augur/metrics/experimental/__init__.py b/augur/metrics/experimental/__init__.py deleted file mode 100644 --- a/augur/metrics/experimental/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .experimental import create_experimental_metrics - -from .routes import create_experimental_routes \ No newline at end of file diff --git a/augur/metrics/experimental/experimental.py b/augur/metrics/experimental/experimental.py deleted file mode 100644 --- a/augur/metrics/experimental/experimental.py +++ /dev/null @@ -1,8 +0,0 @@ -""" -Metrics that are still heavily WIP, or don't clearly fall into one of the other categories -""" - -from augur.util import add_metrics - -def create_experimental_metrics(metrics): - add_metrics(metrics, __name__) diff --git a/augur/metrics/experimental/routes.py b/augur/metrics/experimental/routes.py deleted file mode 100644 --- a/augur/metrics/experimental/routes.py +++ /dev/null @@ -1,6 +0,0 @@ - -def create_experimental_routes(server): - - metrics = server._augur.metrics - - diff --git a/augur/metrics/insight/insight.py b/augur/metrics/insight.py similarity index 89% rename from augur/metrics/insight/insight.py rename to augur/metrics/insight.py --- a/augur/metrics/insight/insight.py +++ b/augur/metrics/insight.py @@ -4,10 +4,9 @@ import sqlalchemy as s import pandas as pd -from augur.util import annotate, add_metrics +from augur.util import register_metric - -@annotate(tag='top-insights') +@register_metric(type="repo_group_only") def top_insights(self, repo_group_id, num_repos=6): """ Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date) @@ -29,6 +28,3 @@ def top_insights(self, repo_group_id, num_repos=6): """) results = pd.read_sql(topInsightsSQL, self.database, params={'repo_group_id': repo_group_id, 'num_repos': num_repos}) return results - -def create_insight_metrics(metrics): - add_metrics(metrics, __name__) diff --git a/augur/metrics/insight/__init__.py b/augur/metrics/insight/__init__.py deleted file mode 100644 --- a/augur/metrics/insight/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .insight import create_insight_metrics - -from .routes import create_insight_routes \ No newline at end of file diff --git a/augur/metrics/insight/routes.py b/augur/metrics/insight/routes.py deleted file mode 100644 --- a/augur/metrics/insight/routes.py +++ /dev/null @@ -1,42 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_insight_routes(server): - - metrics = server._augur.metrics - - """ - @api {get} /top-insights Top Insights - @apiName top-insights - @apiGroup Utility - @apiDescription Get all the downloaded repo groups. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_group_id": 20, - "rg_name": "Rails", - "rg_description": "Rails Ecosystem.", - "rg_website": "", - "rg_recache": 0, - "rg_last_modified": "2019-06-03T15:55:20.000Z", - "rg_type": "GitHub Organization", - "tool_source": "load", - "tool_version": "one", - "data_source": "git", - "data_collection_date": "2019-06-05T13:36:25.000Z" - }, - { - "repo_group_id": 23, - "rg_name": "Netflix", - "rg_description": "Netflix Ecosystem.", - "rg_website": "", - "rg_recache": 0, - "rg_last_modified": "2019-06-03T15:55:20.000Z", - "rg_type": "GitHub Organization", - "tool_source": "load", - "tool_version": "one", - "data_source": "git", - "data_collection_date": "2019-06-05T13:36:36.000Z" - } - ] - """ - server.addRepoGroupMetric(metrics.top_insights, 'top-insights') diff --git a/augur/metrics/issue/issue.py b/augur/metrics/issue.py similarity index 98% rename from augur/metrics/issue/issue.py rename to augur/metrics/issue.py --- a/augur/metrics/issue/issue.py +++ b/augur/metrics/issue.py @@ -5,9 +5,9 @@ import datetime import sqlalchemy as s import pandas as pd -from augur.util import annotate, add_metrics +from augur.util import register_metric -@annotate(tag='issues-first-time-opened') +@register_metric() def issues_first_time_opened(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): """ Returns a timeseries of the count of persons opening an issue for the first time. @@ -78,7 +78,7 @@ def issues_first_time_opened(self, repo_group_id, repo_id=None, period='day', be 'begin_date': begin_date, 'end_date': end_date}) return results -@annotate(tag='issues-first-time-closed') +@register_metric() def issues_first_time_closed(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None, ): """ Returns a timeseries of the count of persons closing an issue for the first time. @@ -143,7 +143,7 @@ def issues_first_time_closed(self, repo_group_id, repo_id=None, period='day', be return results -@annotate(tag='issues-new') +@register_metric() def issues_new(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): """Returns a timeseries of new issues opened. @@ -199,7 +199,7 @@ def issues_new(self, repo_group_id, repo_id=None, period='day', begin_date=None, 'begin_date': begin_date, 'end_date': end_date}) return results -@annotate(tag='issues-active') +@register_metric() def issues_active(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): """Returns a timeseries of issues active. @@ -256,7 +256,7 @@ def issues_active(self, repo_group_id, repo_id=None, period='day', begin_date=No 'begin_date': begin_date, 'end_date':end_date}) return results -@annotate(tag='issues-closed') +@register_metric() def issues_closed(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): """Returns a timeseries of issues closed. @@ -312,7 +312,7 @@ def issues_closed(self, repo_group_id, repo_id=None, period='day', begin_date=No 'begin_date': begin_date, 'end_date': end_date}) return results -@annotate(tag='issue-duration') +@register_metric() def issue_duration(self, repo_group_id, repo_id=None, begin_date=None, end_date=None): """Returns the duration of each issue. @@ -376,7 +376,7 @@ def issue_duration(self, repo_group_id, repo_id=None, begin_date=None, end_date= results['duration'] = results['duration'].astype(str) return results -@annotate(tag='issue-participants') +@register_metric() def issue_participants(self, repo_group_id, repo_id=None, begin_date=None, end_date=None): """Returns number of participants per issue. @@ -449,7 +449,7 @@ def issue_participants(self, repo_group_id, repo_id=None, begin_date=None, end_d 'end_date': end_date}) return result -@annotate(tag='issue-backlog') +@register_metric() def issue_backlog(self, repo_group_id, repo_id=None): """Returns number of issues currently open. @@ -483,7 +483,7 @@ def issue_backlog(self, repo_group_id, repo_id=None): result = pd.read_sql(issue_backlog_SQL, self.database, params={'repo_id': repo_id}) return result -@annotate(tag='issue-throughput') +@register_metric() def issue_throughput(self, repo_group_id, repo_id=None): """Returns the ratio of issues closed to total issues @@ -527,7 +527,7 @@ def issue_throughput(self, repo_group_id, repo_id=None): result = pd.read_sql(issue_throughput_SQL, self.database, params={'repo_id': repo_id}) return result -@annotate(tag='issues-open-age') +@register_metric() def issues_open_age(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): """ Retrun the age of open issues @@ -579,7 +579,7 @@ def issues_open_age(self, repo_group_id, repo_id=None, period='day', begin_date= return results -@annotate(tag='issues-closed-resolution-duration') +@register_metric() def issues_closed_resolution_duration(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): """ Retrun Time duration of time for issues to be resolved @@ -641,7 +641,7 @@ def issues_closed_resolution_duration(self, repo_group_id, repo_id=None, period= return results -@annotate(tag='average-issue-resolution-time') +@register_metric() def average_issue_resolution_time(self, repo_group_id, repo_id=None): """ Returns the average issue resolution time @@ -685,7 +685,7 @@ def average_issue_resolution_time(self, repo_group_id, repo_id=None): params={'repo_id': repo_id}) return results -@annotate(tag='issues-maintainer-response-duration') +@register_metric() def issues_maintainer_response_duration(self, repo_group_id, repo_id=None, begin_date=None, end_date=None): if not begin_date: @@ -759,7 +759,7 @@ def issues_maintainer_response_duration(self, repo_group_id, repo_id=None, begin return results -@annotate(tag='open-issues-count') +@register_metric() def open_issues_count(self, repo_group_id, repo_id=None): """ Returns number of lines changed per author per day @@ -796,7 +796,7 @@ def open_issues_count(self, repo_group_id, repo_id=None): return results -@annotate(tag='closed-issues-count') +@register_metric() def closed_issues_count(self, repo_group_id, repo_id=None): """ Returns number of lines changed per author per day @@ -832,7 +832,7 @@ def closed_issues_count(self, repo_group_id, repo_id=None): results = pd.read_sql(closedIssueCountSQL, self.database, params={'repo_id': repo_id}) return results -@annotate(tag='issue-comments-mean') +@register_metric() def issue_comments_mean(self, repo_group_id, repo_id=None, group_by='week'): group_by = group_by.lower() @@ -948,7 +948,7 @@ def issue_comments_mean(self, repo_group_id, repo_id=None, group_by='week'): params={'repo_id': repo_id}) return results -@annotate(tag='issue-comments-mean-std') +@register_metric() def issue_comments_mean_std(self, repo_group_id, repo_id=None, group_by='week'): if not repo_id: issue_comments_mean_std_SQL = s.sql.text(""" @@ -1007,7 +1007,7 @@ def issue_comments_mean_std(self, repo_group_id, repo_id=None, group_by='week'): params={'repo_id': repo_id, 'group_by': group_by}) return results -@annotate(tag='abandoned_issues') +@register_metric() def abandoned_issues(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): if not begin_date: begin_date = '1970-1-1 00:00:01' @@ -1057,7 +1057,3 @@ def abandoned_issues(self, repo_group_id, repo_id=None, period='day', begin_date results = pd.read_sql(abandonedSQL, self.database, params={'repo_id': repo_id, 'repo_group_id': repo_group_id, 'period': period, 'begin_date': begin_date, 'end_date': end_date}) return results - - -def create_issue_metrics(metrics): - add_metrics(metrics, __name__) diff --git a/augur/metrics/issue/__init__.py b/augur/metrics/issue/__init__.py deleted file mode 100644 --- a/augur/metrics/issue/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .issue import create_issue_metrics - -from .routes import create_issue_routes \ No newline at end of file diff --git a/augur/metrics/issue/routes.py b/augur/metrics/issue/routes.py deleted file mode 100644 --- a/augur/metrics/issue/routes.py +++ /dev/null @@ -1,965 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_issue_routes(server): - - metrics = server._augur.metrics - - """ - @api {get} /repo-groups/:repo_group_id/issues-new Issues New (Repo Group) - @apiName issues-new-repo-group - @apiGroup Evolution - @apiDescription Time series of number of new issues opened during a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Issues_New.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21000, - "repo_name": "rails", - "date": "2019-01-01T00:00:00.000Z", - "issues": 318 - }, - { - "repo_id": 21002, - "repo_name": "acts_as_list", - "date": "2009-01-01T00:00:00.000Z", - "issues": 1 - }, - { - "repo_id": 21002, - "repo_name": "acts_as_list", - "date": "2010-01-01T00:00:00.000Z", - "issues": 7 - } - ] - """ - server.addRepoGroupMetric(metrics.issues_new, 'issues-new') - - """ - @api {get} /repos/:repo_id/issues-new Issues New (Repo) - @apiName issues-new-repo - @apiGroup Evolution - @apiDescription Time series of number of new issues opened during a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Issues_New.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "rails", - "date": "2015-01-01T00:00:00.000Z", - "issues": 116 - }, - { - "repo_name": "rails", - "date": "2016-01-01T00:00:00.000Z", - "issues": 196 - }, - { - "repo_name": "rails", - "date": "2017-01-01T00:00:00.000Z", - "issues": 180 - } - ] - """ - server.addRepoMetric(metrics.issues_new, 'issues-new') - - """ - @api {get} /repo-groups/:repo_group_id/issues-active Issues Active (Repo Group) - @apiName issues-active-repo-group - @apiGroup Evolution - @apiDescription Time series of number of issues that showed some activity during a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Issues_Active.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21039, - "repo_name": "rails_xss", - "date": "2019-01-01T00:00:00.000Z", - "issues": 18 - }, - { - "repo_id": 21041, - "repo_name": "prototype-rails", - "date": "2019-01-01T00:00:00.000Z", - "issues": 20 - }, - { - "repo_id": 21043, - "repo_name": "sprockets-rails", - "date": "2015-01-01T00:00:00.000Z", - "issues": 102 - } - ] - """ - server.addRepoGroupMetric(metrics.issues_active, 'issues-active') - - """ - @api {get} /repos/:repo_id/issues-active Issues Active (Repo) - @apiName issues-active-repo - @apiGroup Evolution - @apiDescription Time series of number of issues that showed some activity during a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Issues_Active.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "rails", - "date": "2011-01-01T00:00:00.000Z", - "issues": 30 - }, - { - "repo_name": "rails", - "date": "2012-01-01T00:00:00.000Z", - "issues": 116 - }, - { - "repo_name": "rails", - "date": "2013-01-01T00:00:00.000Z", - "issues": 479 - } - ] - """ - server.addRepoMetric(metrics.issues_active, 'issues-active') - - """ - @api {get} /repo-groups/:repo_group_id/issues-closed Issues Closed (Repo Group) - @apiName issues-closed-repo-group - @apiGroup Evolution - @apiDescription Time series of number of issues closed during a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Issues_Closed.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21681, - "repo_name": "incubator-zipkin", - "date": "2019-01-01T00:00:00.000Z", - "issues": 425 - }, - { - "repo_id": 21682, - "repo_name": "incubator-dubbo", - "date": "2013-01-01T00:00:00.000Z", - "issues": 7 - }, - { - "repo_id": 21682, - "repo_name": "incubator-dubbo", - "date": "2014-01-01T00:00:00.000Z", - "issues": 47 - } - ] - """ - server.addRepoGroupMetric(metrics.issues_closed, 'issues-closed') - - """ - @api {get} /repos/:repo_id/issues-closed Issues Closed (Repo) - @apiName issues-closed-repo - @apiGroup Evolution - @apiDescription Time series of number of issues closed during a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Issues_New.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "incubator-pagespeed-ngx", - "date": "2012-01-01T00:00:00.000Z", - "issues": 97 - }, - { - "repo_name": "incubator-pagespeed-ngx", - "date": "2013-01-01T00:00:00.000Z", - "issues": 395 - }, - { - "repo_name": "incubator-pagespeed-ngx", - "date": "2014-01-01T00:00:00.000Z", - "issues": 265 - } - ] - """ - server.addRepoMetric(metrics.issues_closed, 'issues-closed') - - """ - @api {get} /repo-groups/:repo_group_id/issue-duration Issue Duration (Repo Group) - @apiName issue-duration-repo-group - @apiGroup Evolution - @apiDescription Time since an issue is proposed until it is closed. - <a href="https://github.com/chaoss/wg-evolution/blob/master/focus_areas/code_development.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21017, - "repo_name": "ssl_requirement", - "issue_id": 50320, - "created_at": "2011-05-06T20:20:05.000Z", - "closed_at": "2011-05-06T20:21:47.000Z", - "duration": "0 days 00:01:42.000000000" - }, - { - "repo_id": 21027, - "repo_name": "rails-contributors", - "issue_id": 50328, - "created_at": "2019-06-20T22:56:38.000Z", - "closed_at": "2019-06-21T20:17:28.000Z", - "duration": "0 days 21:20:50.000000000" - }, - { - "repo_id": 21027, - "repo_name": "rails-contributors", - "issue_id": 50329, - "created_at": "2019-06-20T22:01:52.000Z", - "closed_at": "2019-06-22T02:29:03.000Z", - "duration": "1 days 04:27:11.000000000" - } - ] - """ - server.addRepoGroupMetric(metrics.issue_duration, 'issue-duration') - - """ - @api {get} /repos/:repo_id/issue-backlog Issue Duration (Repo) - @apiName issue-duration-repo - @apiGroup Evolution - @apiDescription Time since an issue is proposed until it is closed. - <a href="https://github.com/chaoss/wg-evolution/blob/master/focus_areas/code_development.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "exception_notification", - "issue_id": 50306, - "created_at": "2011-02-13T03:46:06.000Z", - "closed_at": "2011-04-14T23:27:33.000Z", - "duration": "60 days 19:41:27.000000000" - }, - { - "repo_name": "exception_notification", - "issue_id": 50308, - "created_at": "2011-01-19T18:47:41.000Z", - "closed_at": "2013-12-09T13:51:03.000Z", - "duration": "1054 days 19:03:22.000000000" - } - ] - """ - server.addRepoMetric(metrics.issue_duration, 'issue-duration') - - """ - @api {get} /repo-groups/:repo_group_id/issue-participants Issue Participants (Repo Group) - @apiName issue-participants-repo-group - @apiGroup Evolution - @apiDescription How many persons participated in the discussion of issues. - <a href="https://github.com/chaoss/wg-evolution/blob/master/focus_areas/code_development.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21027, - "repo_name": "rails-contributors", - "issue_id": 50328, - "created_at": "2019-06-20T22:56:38.000Z", - "participants": 1 - }, - { - "repo_id": 21030, - "repo_name": "arel", - "issue_id": 50796, - "created_at": "2017-03-02T21:14:46.000Z", - "participants": 1 - }, - { - "repo_id": 21030, - "repo_name": "arel", - "issue_id": 50795, - "created_at": "2017-03-24T15:39:08.000Z", - "participants": 2 - } - ] - """ - server.addRepoGroupMetric(metrics.issue_participants, 'issue-participants') - - """ - @api {get} /repos/:repo_id/issue-participants Issue Participants (Repo) - @apiName issue-participants-repo - @apiGroup Evolution - @apiDescription How many persons participated in the discussion of issues. - <a href="https://github.com/chaoss/wg-evolution/blob/master/focus_areas/code_development.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "arel", - "issue_id": 50796, - "created_at": "2017-03-02T21:14:46.000Z", - "participants": 1 - }, - { - "repo_name": "arel", - "issue_id": 50795, - "created_at": "2017-03-24T15:39:08.000Z", - "participants": 2 - } - ] - """ - server.addRepoMetric(metrics.issue_participants, 'issue-participants') - - """ - @api {get} /repo-groups/:repo_group_id/issue-backlog Issue Backlog (Repo Group) - @apiName issue-backlog-repo-group - @apiGroup Evolution - @apiDescription Number of issues currently open. - <a href="https://github.com/chaoss/wg-evolution/blob/master/focus_areas/code_development.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21055, - "repo_name": "cache_digests", - "issue_backlog": 21 - }, - { - "repo_id": 21056, - "repo_name": "rails-dev-box", - "issue_backlog": 1 - }, - { - "repo_id": 21058, - "repo_name": "activerecord-session_store", - "issue_backlog": 24 - } - ] - """ - server.addRepoGroupMetric(metrics.issue_backlog, 'issue-backlog') - - """ - @api {get} /repos/:repo_id/issue-backlog Issue Backlog (Repo) - @apiName issue-backlog-repo - @apiGroup Evolution - @apiDescription Number of issues currently open. - <a href="https://github.com/chaoss/wg-evolution/blob/master/focus_areas/code_development.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name":"render_component", - "issue_backlog": 3 - } - ] - """ - server.addRepoMetric(metrics.issue_backlog, 'issue-backlog') - - """ - @api {get} /repo-groups/:repo_group_id/issue-throughput Issue Throughput (Repo Group) - @apiName issue-throughput-repo-group - @apiGroup Evolution - @apiDescription Ratio of issues closed to total issues. - <a href="https://github.com/chaoss/wg-evolution/blob/master/focus_areas/code_development.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21681, - "repo_name": "incubator-zipkin", - "throughput": 0.819125 - }, - { - "repo_id": 21682, - "repo_name": "incubator-dubbo", - "throughput": 0.861896 - } - ] - """ - server.addRepoGroupMetric(metrics.issue_throughput, 'issue-throughput') - - """ - @api {get} /repos/:repo_id/issue-throughput Issue Throughput (Repo) - @apiName issue-throughput-repo - @apiGroup Evolution - @apiDescription Ratio of issues closed to total issues. - <a href="https://github.com/chaoss/wg-evolution/blob/master/focus_areas/code_development.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "rails-contributors", - "throughput": 0.997531 - } - ] - """ - server.addRepoMetric(metrics.issue_throughput, 'issue-throughput') - - """ - @api {get} /repo-groups/:repo_group_id/issues-first-time-opened New Contributors of Issues (Repo Group) - @apiName New Contributors of Issues(Repo Group) - @apiGroup Evolution - @apiDescription Number of persons opening an issue for the first time. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/issues-first-time-opened.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "issue_date": "2018-05-20T00:00:00.000Z", - "count": 3, - "repo_name": "rails", - "repo_id": 21000 - }, - { - "issue_date": "2019-06-03T00:00:00.000Z", - "count": 23, - "repo_name": "rails", - "repo_id": 21000 - } - ] - """ - server.addRepoGroupMetric( - metrics.issues_first_time_opened, 'issues-first-time-opened') - - """ - @api {get} /repos/:repo_id/issues-first-time-opened New Contributors of Issues (Repo) - @apiName New Contributors of Issues(Repo) - @apiGroup Evolution - @apiDescription Number of persons opening an issue for the first time. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/issues-first-time-opened.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "issue_date": "2018-05-20T00:00:00.000Z", - "count": 3, - "repo_name": "rails" - }, - { - "issue_date": "2019-06-03T00:00:00.000Z", - "count": 23, - "repo_name": "rails" - } - ] - """ - server.addRepoMetric( - metrics.issues_first_time_opened, 'issues-first-time-opened') - - """ - @api {get} /repo-groups/:repo_group_id/issues-first-time-closed Closed Issues New Contributor (Repo Group) - @apiName Closed Issues New Contributors(Repo Group) - @apiGroup Evolution - @apiDescription Number of persons closing an issue for the first time. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/issues-first-time-closed.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "issue_date": "2018-05-20T00:00:00.000Z", - "count": 3, - "repo_name": "rails", - "repo_id": 21000 - }, - { - "issue_date": "2019-06-03T00:00:00.000Z", - "count": 23 - "repo_name": "rails", - "repo_id": 21000 - } - ] - """ - server.addRepoGroupMetric( - metrics.issues_first_time_closed, 'issues-first-time-closed') - - """ - @api {get} /repos/:repo_id/issues-first-time-closed Closed Issues New Contributors (Repo) - @apiName Closed Issues New Contributors(Repo) - @apiGroup Evolution - @apiDescription Number of persons closing an issue for the first time. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/issues-first-time-closed.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "issue_date": "2018-05-20T00:00:00.000Z", - "count": 3, - "repo_name": "rails" - }, - { - "issue_date": "2019-06-03T00:00:00.000Z", - "count": 23, - "repo_name": "rails" - } - ] - """ - server.addRepoMetric( - metrics.issues_first_time_closed, 'issues-first-time-closed') - - """ - @api {get} /repo-groups/:repo_group_id/open-issues-count Open Issues Count (Repo Group) - @apiName open-issues-count-repo-group - @apiGroup Evolution - @apiDescription Count of open issues. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/contributors-new.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "rg_name": "Netflix", - "open_count": 1, - "date": "2017-09-11T00:00:00.000Z" - }, - { - "rg_name": "Netflix", - "open_count": 4, - "date": "2019-06-10T00:00:00.000Z" - } - ] - """ - server.addRepoGroupMetric(metrics.open_issues_count, 'open-issues-count') - - """ - @api {get} /repo-groups/:repo_group_id/open-issues-count Open Issues Count (Repo) - @apiName open-issues-count-repo - @apiGroup Evolution - @apiDescription Count of open issues. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/contributors-new.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21681, - "open_count": 18, - "date": "2019-04-15T00:00:00.000Z" - }, - { - "repo_id": 21681, - "open_count": 16, - "date": "2019-04-22T00:00:00.000Z" - } - ] - """ - server.addRepoMetric(metrics.open_issues_count, 'open-issues-count') - - """ - @api {get} /repos/:repo_id/closed-issues-count Closed Issues Count (Repo Group) - @apiName closed-issues-count-repo-group - @apiGroup Evolution - @apiDescription Count of closed issues. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/contributors-new.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "rg_name": "Apache", - "closed_count": 4, - "date": "2014-06-02T00:00:00.000Z" - }, - { - "rg_name": "Apache", - "closed_count": 6, - "date": "2014-06-09T00:00:00.000Z" - } - ] - """ - server.addRepoGroupMetric(metrics.closed_issues_count, 'closed-issues-count') - - """ - @api {get} /repo-groups/:repo_group_id/closed-issues-count Closed Issues Count (Repo) - @apiName closed-issues-count-repo - @apiGroup Evolution - @apiDescription Count of closed issues. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/contributors-new.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21681, - "closed_count": 26, - "date": "2018-11-26T00:00:00.000Z" - }, - { - "repo_id": 21681, - "closed_count": 14, - "date": "2018-12-03T00:00:00.000Z" - } - ] - """ - server.addRepoMetric(metrics.closed_issues_count, 'closed-issues-count') - - """ - @api {get} /repo-groups/:repo_group_id/issues-open-age Open Issue Age (Repo Group) - @apiName Open Issue Age(Repo Group) - @apiGroup Evolution - @apiDescription Age of open issues. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/issues-open-age.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21000, - "repo_name": "rails", - "issue_id": 38318, - "date": "2009-05-15T19:48:43.000Z", - "open_date": 3696 - }, - { - "repo_id": 21000, - "repo_name": "rails", - "issue_id": 38317, - "date": "2009-05-16T14:35:40.000Z", - "open_date": 3695 - } - ] - """ - server.addRepoGroupMetric( - metrics.issues_open_age, 'issues-open-age') - - """ - @api {get} /repos/:repo_id/issues-open-age Open Issue Age (Repo) - @apiName Open Issue Age(Repo) - @apiGroup Evolution - @apiDescription Age of open issues. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/issues-open-age.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21000, - "repo_name": "rails", - "issue_id": 38318, - "date": "2009-05-15T19:48:43.000Z", - "open_date": 3696 - }, - { - "repo_id": 21000, - "repo_name": "rails", - "issue_id": 38317, - "date": "2009-05-16T14:35:40.000Z", - "open_date": 3695 - } - ] - """ - server.addRepoMetric( - metrics.issues_open_age, 'issues-open-age') - - """ - @api {get} /repo-groups/:repo_group_id/issues-closed-resolution-duration Closed Issue Resolution Duration (Repo Group) - @apiName Closed Issue Resolution Duration(Repo Group) - @apiGroup Evolution - @apiDescription Duration of time for issues to be resolved. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/issues-closed-resolution-duration.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name":"incubator-dubbo", - "gh_issue_number":4110, - "issue_title":"rm incubating word", - "created_at":"2019-05-22T03:18:13.000Z", - "closed_at":"2019-05-22T05:27:29.000Z", - "diffdate":0.0 - }, - { - "repo_name":"incubator-dubbo", - "gh_issue_number":4111, - "issue_title":"nacos registry serviceName may conflict", - "created_at":"2019-05-22T03:30:23.000Z", - "closed_at":"2019-05-23T14:36:17.000Z", - "diffdate":1.0 - } - ] - """ - server.addRepoGroupMetric( - metrics.issues_closed_resolution_duration, 'issues-closed-resolution-duration') - - """ - @api {get} /repos/:repo_id/issues-closed-resolution-duration Closed Issue Resolution Duration (Repo) - @apiName Closed Issue Resolution Duration(Repo) - @apiGroup Evolution - @apiDescription Duration of time for issues to be resolved. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/issues-closed-resolution-duration.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21682 - "repo_name":"incubator-dubbo", - "gh_issue_number":4223, - "issue_title":"Cloud Native PR", - "created_at":"2019-05-31T07:55:44.000Z", - "closed_at":"2019-06-17T03:12:48.000Z", - "diffdate":16.0 - }, - { - "repo_id": 21682, - "repo_name":"incubator-dubbo", - "gh_issue_number":4131, - "issue_title":"Reduce context switching cost by optimizing thread model on consumer side.", - "created_at":"2019-05-23T06:18:21.000Z", - "closed_at":"2019-06-03T08:07:27.000Z", - "diffdate":11.0 - } - ] - """ - server.addRepoMetric( - metrics.issues_closed_resolution_duration, 'issues-closed-resolution-duration') - - """ - @api {get} /repo-groups/:repo_group_id/issues-maintainer-response-duration Issue Response Time (Repo Group) - @apiName Issue Response Time(Repo Group) - @apiGroup Evolution - @apiDescription Duration of time for issues to be resolved. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/issues-maintainer-response-duration.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21987, - "repo_name": "qpid-proton", - "average_days_comment": 27.1111111111 - }, - { - "repo_id": 22252, - "repo_name": "cordova-create", - "average_days_comment": 0.8 - } - ] - """ - server.addRepoGroupMetric(metrics.issues_maintainer_response_duration, 'issues-maintainer-response-duration') - - """ - @api {get} /repos/:repo_id/issues-maintainer-response-duration Issue Response Time (Repo) - @apiName Issue Response Time(Repo) - @apiGroup Evolution - @apiDescription Duration of time for issues to be resolved. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/issues-maintainer-response-duration.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string} repo_id Repository ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21987, - "repo_name": "qpid-proton", - "average_days_comment": 27.1111111111 - } - ] - """ - server.addRepoMetric(metrics.issues_maintainer_response_duration, 'issues-maintainer-response-duration') - - """ - @api {get} /repo-groups/:repo_group_id/avgerage-issue-resolution-time Average Issue Resolution Time (Repo Group) - @apiName average-issue-resolution-time-repo-group - @apiGroup Risk - @apiDescription The average issue resolution time. - <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/business-risk.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21353, - "repo_name": "open_id_authentication", - "avg_issue_resolution_time": "1413 days 15:39:48" - }, - { - "repo_id": 21362, - "repo_name": "country_select", - "avg_issue_resolution_time": "140 days 09:37:58.2" - } - ] - """ - server.addRepoGroupMetric(metrics.average_issue_resolution_time, 'average-issue-resolution-time') - - """ - @api {get} /repos/:repo_id/avgerage-issue-resolution-time Average Issue Resolution Time (Repo) - @apiName average-issue-resolution-time-repo - @apiGroup Risk - @apiDescription The average issue resolution time. - <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/business-risk.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "maven-release", - "avg_issue_resolution_time": "276 days 13:54:13.2" - } - ] - """ - server.addRepoMetric(metrics.average_issue_resolution_time, 'average-issue-resolution-time') - - """ - @api {get} /repo-groups/:repo_group_id/issue-comments-mean Issue Comments Mean (Repo Group) - @apiName issue-comments-mean-repo-group - @apiGroup Experimental - @apiDescription Mean(Average) of issue comments per day. - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} [group_by="week"] Allows for results to be grouped by day, week, month, or year. E.g. values: `year`, `day`, `month` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21326, - "date": "2018-01-01T00:00:00.000Z", - "mean":0.6191780822 - }, - { - "repo_id": 21326, - "date": "2019-01-01T00:00:00.000Z", - "mean": 0.7671232877 - }, - { - "repo_id": 21327, - "date": "2015-01-01T00:00:00.000Z", - "mean": 0.0602739726 - } - ] - """ - server.addRepoGroupMetric(metrics.issue_comments_mean, 'issue-comments-mean') - - """ - @api {get} /repos/:repo_id/issue-comments-mean Issue Comments Mean (Repo) - @apiName issue-comments-mean-repo - @apiGroup Experimental - @apiDescription Mean(Average) of issue comments per day. - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21326, - "date": "2018-01-01T00:00:00.000Z", - "mean":0.6191780822 - }, - { - "repo_id": 21326, - "date": "2019-01-01T00:00:00.000Z", - "mean": 0.7671232877 - } - ] - """ - server.addRepoMetric(metrics.issue_comments_mean, 'issue-comments-mean') - - """ - @api {get} /repo-groups/:repo_group_id/issue-comments-mean-std Issue Comments Mean Std (Repo Group) - @apiName issue-comments-mean-std-repo-group - @apiGroup Experimental - @apiDescription Mean(Average) and Standard Deviation of issue comments per day. - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} [group_by="week"] Allows for results to be grouped by day, week, month, or year. E.g. values: `year`, `day`, `month` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21326, - "date": "2018-01-01T00:00:00.000Z", - "mean":0.6191780822 - }, - { - "repo_id": 21326, - "date": "2019-01-01T00:00:00.000Z", - "mean": 0.7671232877 - }, - { - "repo_id": 21327, - "date": "2015-01-01T00:00:00.000Z", - "mean": 0.0602739726 - } - ] - """ - server.addRepoGroupMetric(metrics.issue_comments_mean_std, 'issue-comments-mean-std') - - """ - @api {get} /repos/:repo_id/issue-comments-mean-std Issue Comments Mean Std (Repo) - @apiName issue-comments-mean-repo - @apiGroup Experimental - @apiDescription Mean(Average) and Standard Deviation of issue comments per day. - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21000, - "date": "2011-01-01T00:00:00.000Z", - "average": 2.5, - "standard_deviation":1.7159383568 - }, - { - "repo_id": 21000, - "date": "2012-01-01T00:00:00.000Z", - "average": 1.9666666667, - "standard_deviation": 1.3767361036 - } - ] - """ - server.addRepoMetric(metrics.issue_comments_mean_std, 'issue-comments-mean-std') - - """ - @api {get} /repo-groups/:repo_group_id/abandoned_issues Abandoned Issues (Repo) - @apiName Abandoned Issues - @apiGroup Experimental - @apiDescription List of abandoned issues (last updated >= 1 year ago) - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "updated_at": "2017-10-30T06:52:19.000Z", - "issue_id": 125071, - "repo_id": 22004 - }, - { - "updated_at": "2018-01-10T06:02:16.000Z", - "issue_id": 125070, - "repo_id": 22003 - } - ] - """ - server.addRepoGroupMetric(metrics.abandoned_issues, 'abandoned_issues') \ No newline at end of file diff --git a/augur/metrics/message.py b/augur/metrics/message.py new file mode 100644 --- /dev/null +++ b/augur/metrics/message.py @@ -0,0 +1,3 @@ +""" +Metrics that provide data about messages (of any form) & their associated activity +""" diff --git a/augur/metrics/message/__init__.py b/augur/metrics/message/__init__.py deleted file mode 100644 --- a/augur/metrics/message/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .message import create_message_metrics - -from .routes import create_message_routes \ No newline at end of file diff --git a/augur/metrics/message/message.py b/augur/metrics/message/message.py deleted file mode 100644 --- a/augur/metrics/message/message.py +++ /dev/null @@ -1,8 +0,0 @@ -""" -Metrics that provide data about messages (of any form) & their associated activity -""" - -from augur.util import add_metrics - -def create_message_metrics(metrics): - add_metrics(metrics, __name__) diff --git a/augur/metrics/message/routes.py b/augur/metrics/message/routes.py deleted file mode 100644 --- a/augur/metrics/message/routes.py +++ /dev/null @@ -1,6 +0,0 @@ - -def create_message_routes(server): - - metrics = server._augur.metrics - - diff --git a/augur/metrics/metrics.py b/augur/metrics/metrics.py deleted file mode 100644 --- a/augur/metrics/metrics.py +++ /dev/null @@ -1,63 +0,0 @@ -import sqlalchemy as s -from augur.util import logger - -from .commit import create_commit_metrics, create_commit_routes -from .contributor import create_contributor_metrics, create_contributor_routes -from .experimental import create_experimental_metrics, create_experimental_routes -from .insight import create_insight_metrics, create_insight_routes -from .issue import create_issue_metrics, create_issue_routes -from .message import create_message_metrics, create_message_routes -from .platform import create_platform_metrics, create_platform_routes -from .pull_request import create_pull_request_metrics, create_pull_request_routes -from .repo_meta import create_repo_meta_metrics, create_repo_meta_routes -from .util import create_util_metrics, create_util_routes - -class MetricDefinitions(): - def __init__(self, app): - self.app = app - self.projects = None - - self.user = self.app.read_config('Database', 'user') - self.password = self.app.read_config('Database', 'password') - self.host = self.app.read_config('Database', 'host') - self.port = self.app.read_config('Database', 'port') - self.dbname = self.app.read_config('Database', 'name') - self.schema = self.app.read_config('Database', 'schema') - - self.database_connection_string = 'postgresql://{}:{}@{}:{}/{}'.format( - self.user, self.password, self.host, self.port, self.dbname - ) - - self.database = s.create_engine(self.database_connection_string, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(self.schema)}) - - spdx_schema = 'spdx' - self.spdx_db = s.create_engine(self.database_connection_string, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={},{}'.format(spdx_schema, self.schema)}) - - logger.debug('Augur DB: Connecting to {} schema of {}:{}/{} as {}'.format(self.schema, self.host, self.port, self.dbname, self.user)) - - # TODO: not hardcode this - create_commit_metrics(self) - create_contributor_metrics(self) - create_experimental_metrics(self) - create_insight_metrics(self) - create_issue_metrics(self) - create_message_metrics(self) - create_platform_metrics(self) - create_pull_request_metrics(self) - create_repo_meta_metrics(self) - create_util_metrics(self) - - def create_routes(self, server): - # TODO: not hardcode this - create_commit_routes(server) - create_contributor_routes(server) - create_experimental_routes(server) - create_insight_routes(server) - create_issue_routes(server) - create_message_routes(server) - create_platform_routes(server) - create_pull_request_routes(server) - create_repo_meta_routes(server) - create_util_routes(server) diff --git a/augur/metrics/platform.py b/augur/metrics/platform.py new file mode 100644 --- /dev/null +++ b/augur/metrics/platform.py @@ -0,0 +1,4 @@ +""" +Metrics that provide data about platform & their associated activity +""" + diff --git a/augur/metrics/platform/__init__.py b/augur/metrics/platform/__init__.py deleted file mode 100644 --- a/augur/metrics/platform/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .platform import create_platform_metrics - -from .routes import create_platform_routes \ No newline at end of file diff --git a/augur/metrics/platform/platform.py b/augur/metrics/platform/platform.py deleted file mode 100644 --- a/augur/metrics/platform/platform.py +++ /dev/null @@ -1,8 +0,0 @@ -""" -Metrics that provide data about platform & their associated activity -""" - -from augur.util import add_metrics - -def create_platform_metrics(metrics): - add_metrics(metrics, __name__) diff --git a/augur/metrics/platform/routes.py b/augur/metrics/platform/routes.py deleted file mode 100644 --- a/augur/metrics/platform/routes.py +++ /dev/null @@ -1,4 +0,0 @@ - -def create_platform_routes(server): - metrics = server._augur.metrics - diff --git a/augur/metrics/pull_request.py b/augur/metrics/pull_request.py new file mode 100644 --- /dev/null +++ b/augur/metrics/pull_request.py @@ -0,0 +1,1125 @@ +""" +Metrics that provide data about pull requests & their associated activity +""" + +import datetime +import sqlalchemy as s +import pandas as pd +from augur.util import register_metric + +@register_metric() +def pull_requests_merge_contributor_new(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): + """ + Returns a timeseries of the count of persons contributing with an accepted commit for the first time. + + :param repo_id: The repository's id + :param repo_group_id: The repository's group id + :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of persons/period + """ + if not begin_date: + begin_date = '1970-1-1 00:00:01' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + + if repo_id: + commitNewContributor = s.sql.text(""" + SELECT date_trunc(:period, new_date::DATE) as commit_date, + COUNT(cmt_author_email), repo_name + FROM ( SELECT repo_name, cmt_author_email, MIN(TO_TIMESTAMP(cmt_author_date,'YYYY-MM-DD')) AS new_date + FROM commits JOIN repo ON commits.repo_id = repo.repo_id + WHERE commits.repo_id = :repo_id + AND TO_TIMESTAMP(cmt_author_date,'YYYY-MM-DD') BETWEEN :begin_date AND :end_date AND cmt_author_email IS NOT NULL + GROUP BY cmt_author_email, repo_name + ) as abc GROUP BY commit_date, repo_name + """) + results = pd.read_sql(commitNewContributor, self.database, params={'repo_id': repo_id, 'period': period, + 'begin_date': begin_date, + 'end_date': end_date}) + else: + commitNewContributor = s.sql.text(""" + SELECT abc.repo_id, repo_name ,date_trunc(:period, new_date::DATE) as commit_date, + COUNT(cmt_author_email) + FROM (SELECT cmt_author_email, MIN(TO_TIMESTAMP(cmt_author_date, 'YYYY-MM-DD')) AS new_date, repo_id + FROM commits + WHERE repo_id in (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + AND TO_TIMESTAMP(cmt_author_date, 'YYYY-MM-DD') BETWEEN :begin_date AND :end_date + AND cmt_author_email IS NOT NULL + GROUP BY cmt_author_email, repo_id + ) as abc, repo + WHERE abc.repo_id = repo.repo_id + GROUP BY abc.repo_id, repo_name, commit_date + """) + results = pd.read_sql(commitNewContributor, self.database, + params={'repo_group_id': repo_group_id, 'period': period, + 'begin_date': begin_date, + 'end_date': end_date}) + return results + +@register_metric() +def pull_requests_closed_no_merge(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): + """ + Returns a timeseries of the which were closed but not merged + + :param repo_id: The repository's id + :param repo_group_id: The repository's group id + :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of persons/period + """ + if not begin_date: + begin_date = '1970-1-1 00:00:01' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + + if repo_id: + closedNoMerge = s.sql.text(""" + SELECT DATE_TRUNC(:period, pull_requests.pr_closed_at) AS closed_date, + COUNT(pull_request_id) as pr_count + FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id = :repo_id + AND pull_requests.pr_closed_at is NOT NULL AND + pull_requests.pr_merged_at is NULL + GROUP BY closed_date, pull_request_id + ORDER BY closed_date + """) + results = pd.read_sql(closedNoMerge, self.database, params={'repo_id': repo_id, 'period': period, + 'begin_date': begin_date, + 'end_date': end_date}) + + else: + closedNoMerge = s.sql.text(""" + SELECT DATE_TRUNC(:period, pull_requests.pr_closed_at) AS closed_date, + COUNT(pull_request_id) as pr_count + FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id WHERE pull_requests.repo_id in (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + and pull_requests.pr_closed_at is NOT NULL and pull_requests.pr_merged_at is NULL + GROUP BY closed_date, pull_request_id + ORDER BY closed_date + """) + + results = pd.read_sql(closedNoMerge, self.database, + params={'repo_group_id': repo_group_id, 'period': period, + 'begin_date': begin_date, + 'end_date': end_date}) + return results + +@register_metric() +def reviews(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): + """ Returns a timeseris of new reviews or pull requests opened + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of new reviews/period + """ + if not begin_date: + begin_date = '1970-1-1' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + if not repo_id: + reviews_SQL = s.sql.text(""" + SELECT + pull_requests.repo_id, + repo_name, + DATE_TRUNC(:period, pull_requests.pr_created_at) AS date, + COUNT(pr_src_id) AS pull_requests + FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id IN + (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + AND pull_requests.pr_created_at + BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') + AND to_timestamp(:end_date, 'YYYY-MM-DD') + GROUP BY pull_requests.repo_id, repo_name, date + ORDER BY pull_requests.repo_id, date + """) + + results = pd.read_sql(reviews_SQL, self.database, + params={'period': period, 'repo_group_id': repo_group_id, + 'begin_date': begin_date, 'end_date': end_date }) + return results + + else: + reviews_SQL = s.sql.text(""" + SELECT + repo_name, + DATE_TRUNC(:period, pull_requests.pr_created_at) AS date, + COUNT(pr_src_id) AS pull_requests + FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id = :repo_id + AND pull_requests.pr_created_at + BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD HH24:MI:SS') + AND to_timestamp(:end_date, 'YYYY-MM-DD HH24:MI:SS') + GROUP BY date, repo_name + ORDER BY date + """) + + results = pd.read_sql(reviews_SQL, self.database, + params={'period': period, 'repo_id': repo_id, + 'begin_date': begin_date, 'end_date': end_date}) + return results + +@register_metric() +def reviews_accepted(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): + """Returns a timeseries of number of reviews or pull requests accepted. + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of accepted reviews/period + """ + if not begin_date: + begin_date = '1970-1-1' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + if not repo_id: + reviews_accepted_SQL = s.sql.text(""" + SELECT + pull_requests.repo_id, + repo.repo_name, + DATE_TRUNC(:period, pull_requests.pr_merged_at) AS date, + COUNT(pr_src_id) AS pull_requests + FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id IN + (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + AND pr_merged_at IS NOT NULL + AND pr_merged_at + BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') + AND to_timestamp(:end_date, 'YYYY-MM-DD') + GROUP BY pull_requests.repo_id, repo_name, date + ORDER BY pull_requests.repo_id, date + """) + + results = pd.read_sql(reviews_accepted_SQL, self.database, + params={'period': period, 'repo_group_id': repo_group_id, + 'begin_date': begin_date, 'end_date': end_date}) + return results + else: + reviews_accepted_SQL = s.sql.text(""" + SELECT + repo.repo_name, + DATE_TRUNC(:period, pull_requests.pr_merged_at) AS date, + COUNT(pr_src_id) AS pull_requests + FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id = :repo_id + AND pr_merged_at IS NOT NULL + AND pr_merged_at + BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') + AND to_timestamp(:end_date, 'YYYY-MM-DD') + GROUP BY date, repo.repo_name + ORDER BY date + """) + + results = pd.read_sql(reviews_accepted_SQL, self.database, + params={'period': period, 'repo_id': repo_id, + 'begin_date': begin_date, 'end_date': end_date}) + return results + +@register_metric() +def reviews_declined(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): + """ Returns a time series of reivews declined + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of declined reviews/period + """ + if not begin_date: + begin_date = '1970-1-1' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + if not repo_id: + reviews_declined_SQL = s.sql.text(""" + SELECT + pull_requests.repo_id, + repo.repo_name, + DATE_TRUNC(:period, pull_requests.pr_closed_at) AS date, + COUNT(pr_src_id) AS pull_requests + FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id IN + (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + AND pr_src_state = 'closed' AND pr_merged_at IS NULL + AND pr_closed_at + BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') + AND to_timestamp(:end_date, 'YYYY-MM-DD') + GROUP BY pull_requests.repo_id, repo_name, date + ORDER BY pull_requests.repo_id, date + """) + + results = pd.read_sql(reviews_declined_SQL, self.database, + params={'period': period, 'repo_group_id': repo_group_id, + 'begin_date': begin_date, 'end_date': end_date }) + return results + else: + reviews_declined_SQL = s.sql.text(""" + SELECT + repo.repo_name, + DATE_TRUNC(:period, pull_requests.pr_closed_at) AS date, + COUNT(pr_src_id) AS pull_requests + FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id = :repo_id + AND pr_src_state = 'closed' AND pr_merged_at IS NULL + AND pr_closed_at + BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') + AND to_timestamp(:end_date, 'YYYY-MM-DD') + GROUP BY date, repo.repo_name + ORDER BY date + """) + + results = pd.read_sql(reviews_declined_SQL, self.database, + params={'period': period, 'repo_id': repo_id, + 'begin_date': begin_date, 'end_date': end_date}) + return results + +@register_metric() +def review_duration(self, repo_group_id, repo_id=None, begin_date=None, end_date=None): + """ Returns the duration of each accepted review. + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of pull request id with the corresponding duration + """ + if not begin_date: + begin_date = '1970-1-1' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + if not repo_id: + review_duration_SQL = s.sql.text(""" + SELECT + pull_requests.repo_id, + repo.repo_name, + pull_requests.pull_request_id, + pull_requests.pr_created_at AS created_at, + pull_requests.pr_merged_at AS merged_at, + (pr_merged_at - pr_created_at) AS duration + FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id IN + (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + AND pr_merged_at IS NOT NULL + AND pr_created_at + BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') + AND to_timestamp(:end_date, 'YYYY-MM-DD') + ORDER BY pull_requests.repo_id, pull_requests.pull_request_id + """) + + results = pd.read_sql(review_duration_SQL, self.database, + params={'repo_group_id': repo_group_id, + 'begin_date': begin_date, + 'end_date': end_date}) + results['duration'] = results['duration'].astype(str) + return results + else: + review_duration_SQL = s.sql.text(""" + SELECT + repo_name, + pull_request_id, + pr_created_at AS created_at, + pr_merged_at AS merged_at, + (pr_merged_at - pr_created_at) AS duration + FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id = :repo_id + AND pr_merged_at IS NOT NULL + AND pr_created_at + BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') + AND to_timestamp(:end_date, 'YYYY-MM-DD') + ORDER BY pull_requests.repo_id, pull_request_id + """) + + results = pd.read_sql(review_duration_SQL, self.database, + params={'repo_id': repo_id, + 'begin_date': begin_date, + 'end_date': end_date}) + results['duration'] = results['duration'].astype(str) + return results + +@register_metric() +def pull_request_acceptance_rate(self, repo_group_id, repo_id=None, begin_date=None, end_date=None, group_by='week'): + """ + Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date) + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :return: DataFrame with ratio/day + """ + if not begin_date: + begin_date = '1970-1-1 00:00:01' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + + if not repo_id: + prAccRateSQL = s.sql.text(""" + SELECT DATE(date_created) AS "date", CAST(num_approved AS DECIMAL)/CAST(num_open AS DECIMAL) AS "rate" + FROM + ( + SELECT count(issue_events.issue_id) AS num_approved, + date_trunc(:group_by,issue_events.created_at) AS accepted_on + FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id + JOIN repo ON issues.repo_id = repo.repo_id + WHERE action = 'merged' + AND issues.pull_request IS NOT NULL + AND repo_group_id = :repo_group_id + AND issue_events.created_at BETWEEN :begin_date AND :end_date + GROUP BY accepted_on + ORDER BY accepted_on + ) accepted + JOIN + ( + SELECT count(issue_events.issue_id) AS num_open, + date_trunc(:group_by,issue_events.created_at) AS date_created + FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id + JOIN repo ON issues.repo_id = repo.repo_id + WHERE action = 'ready_for_review' + AND issues.pull_request IS NOT NULL + AND repo_group_id = :repo_group_id + AND issue_events.created_at BETWEEN :begin_date AND :end_date + GROUP BY date_created + ORDER BY date_created + ) opened + ON opened.date_created = accepted.accepted_on + """) + results = pd.read_sql(prAccRateSQL, self.database, params={'repo_group_id': repo_group_id, 'group_by': group_by, + 'begin_date': begin_date, 'end_date': end_date}) + return results + else: + prAccRateSQL = s.sql.text(""" + SELECT DATE(date_created) AS "date", CAST(num_approved AS DECIMAL)/CAST(num_open AS DECIMAL) AS "rate" + FROM + ( + SELECT count(issue_events.issue_id) AS num_approved, + date_trunc(:group_by,issue_events.created_at) AS accepted_on + FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id + WHERE action = 'merged' + AND issues.pull_request IS NOT NULL + AND repo_id = :repo_id + AND issue_events.created_at BETWEEN :begin_date AND :end_date + GROUP BY accepted_on + ORDER BY accepted_on + ) accepted + JOIN + ( + SELECT count(issue_events.issue_id) AS num_open, + date_trunc(:group_by,issue_events.created_at) AS date_created + FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id + WHERE action = 'ready_for_review' + AND issues.pull_request IS NOT NULL + AND repo_id = :repo_id + AND issue_events.created_at BETWEEN :begin_date AND :end_date + GROUP BY date_created + ORDER BY date_created + ) opened + ON opened.date_created = accepted.accepted_on + """) + results = pd.read_sql(prAccRateSQL, self.database, params={'repo_id': repo_id, 'group_by': group_by, + 'begin_date': begin_date, 'end_date': end_date}) + return results + +@register_metric() +def pull_request_average_time_to_close(self, repo_group_id, repo_id=None, group_by='month', time_unit='hours', begin_date=None, end_date=None): + """ Avegage time to close pull requests with merged_status and the time frame + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month' + :param time_unit: Unit of time for data, options are: 'hours', or 'days', defaults to 'hours' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of average time to close pull request + """ + + if not begin_date: + begin_date = '1970-1-1' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + + unit_options = ['year', 'month', 'week', 'day'] + time_group_bys = [] + for unit in unit_options.copy(): + if group_by not in unit_options: + continue + time_group_bys.append('closed_{}'.format(unit)) + del unit_options[0] + + if not repo_id: + pr_all_SQL = s.sql.text(""" + SELECT + repo_id, + repo_name, + repo_group_id, + rg_name AS repo_group_name, + date_part( 'year', pr_closed_at :: DATE ) AS closed_year, + date_part( 'month', pr_closed_at :: DATE ) AS closed_month, + date_part('week', pr_closed_at :: DATE) AS closed_week, + date_part('day', pr_closed_at :: DATE) AS closed_day, + EXTRACT (epoch FROM time_to_close)/ 86400 AS average_days_to_close, + EXTRACT (epoch FROM time_to_close)/ 3600 AS average_hours_to_close, + CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status, + count(*) AS num_pull_requests + FROM ( + SELECT + pull_requests.pull_request_id, + pull_requests.repo_id, + repo_name, + repo.repo_group_id, + rg_name, + pr_closed_at, + pr_created_at, + pr_closed_at - pr_created_at AS time_to_close, + pr_merged_at + FROM pull_request_message_ref, message, repo_groups, + pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id IN + (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + AND repo.repo_id = pull_requests.repo_id + AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id + AND pull_request_message_ref.msg_id = message.msg_id + AND repo.repo_group_id = repo_groups.repo_group_id + AND pr_created_at::DATE >= :begin_date ::DATE + AND pr_closed_at::DATE <= :end_date ::DATE + GROUP BY pull_requests.pull_request_id, repo.repo_name, repo.repo_group_id, repo_groups.rg_name + ) time_between_responses + GROUP BY merged_status, time_between_responses.pr_closed_at, time_between_responses.time_to_close, time_between_responses.repo_id, time_between_responses.repo_name, time_between_responses.repo_group_id, time_between_responses.rg_name + ORDER BY merged_status + """) + + else: + pr_all_SQL = s.sql.text(""" + SELECT + date_part( 'year', pr_closed_at :: DATE ) AS closed_year, + date_part( 'month', pr_closed_at :: DATE ) AS closed_month, + date_part('week', pr_closed_at :: DATE) AS closed_week, + date_part('day', pr_closed_at :: DATE) AS closed_day, + EXTRACT (epoch FROM time_to_close)/ 86400 AS average_days_to_close, + EXTRACT (epoch FROM time_to_close)/ 3600 AS average_hours_to_close, + CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status, + count(*) AS num_pull_requests + FROM ( + SELECT pull_requests.pull_request_id, + pr_closed_at, + pr_created_at, + pr_closed_at - pr_created_at AS time_to_close, + pr_merged_at + FROM pull_requests, repo, pull_request_message_ref, message + WHERE repo.repo_id = :repo_id + AND repo.repo_id = pull_requests.repo_id + AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id + AND pull_request_message_ref.msg_id = message.msg_id + AND pr_created_at::DATE >= :begin_date ::DATE + AND pr_closed_at::DATE <= :end_date ::DATE + GROUP BY pull_requests.pull_request_id + ) time_between_responses + GROUP BY merged_status, time_between_responses.pr_closed_at, time_between_responses.time_to_close + ORDER BY merged_status + """) + + pr_all = pd.read_sql(pr_all_SQL, self.database, + params={'repo_id': repo_id, 'repo_group_id':repo_group_id, + 'begin_date': begin_date, 'end_date': end_date}) + if not repo_id: + pr_avg_time_to_close = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).mean().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + ['average_{}_to_close'.format(time_unit)]] + else: + pr_avg_time_to_close = pr_all.groupby(['merged_status'] + time_group_bys).mean().reset_index()[time_group_bys + ['merged_status', 'average_{}_to_close'.format(time_unit)]] + + return pr_avg_time_to_close + +@register_metric() +def pull_request_average_time_between_responses(self, repo_group_id, repo_id=None, group_by='month', time_unit='hours', begin_date=None, end_date=None): + """ Avegage time between responeses with merged_status and the time frame + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month' + :param time_unit: Unit of time for data, options are: 'minutes', or 'hours', defaults to 'hours' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of average time beteen responses +======= +@register_metric() +def pull_request_merged_status_counts(self, repo_group_id, repo_id=None, begin_date='1970-1-1 00:00:01', end_date=None, group_by='week'): +>>>>>>> Stashed changes + """ + + if not begin_date: + begin_date = '1970-1-1' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + + unit_options = ['year', 'month', 'week', 'day'] + time_group_bys = [] + for unit in unit_options.copy(): + if group_by not in unit_options: + continue + time_group_bys.append('closed_{}'.format(unit)) + del unit_options[0] + + if not repo_id: + pr_all_SQL = s.sql.text(""" + SELECT + repo_id, + repo_name, + repo_group_id, + rg_name AS repo_group_name, + date_part( 'year', pr_closed_at :: DATE ) AS closed_year, + date_part( 'month', pr_closed_at :: DATE ) AS closed_month, + date_part( 'week', pr_closed_at :: DATE ) AS closed_week, + date_part( 'day', pr_closed_at :: DATE ) AS closed_day, + (EXTRACT(epoch FROM average_time_between_responses)/3600) AS average_hours_between_responses, + (EXTRACT(epoch FROM average_time_between_responses)/60) AS average_minutes_between_responses, + CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status, + count(*) AS num_pull_requests + FROM ( + SELECT + repo_name, + repo_groups.repo_group_id, + rg_name, + pull_requests.repo_id, + pull_requests.pull_request_id, + pr_closed_at, + pr_created_at, + pr_merged_at, + (MAX(message.msg_timestamp) - MIN(message.msg_timestamp)) / COUNT(DISTINCT message.msg_timestamp) AS average_time_between_responses + FROM pull_request_message_ref, message, repo_groups, + pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id IN + (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + AND repo.repo_id = pull_requests.repo_id + AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id + AND pull_request_message_ref.msg_id = message.msg_id + AND repo_groups.repo_group_id = repo.repo_group_id + AND pr_created_at::DATE >= :begin_date ::DATE + AND pr_closed_at::DATE <= :end_date ::DATE + GROUP BY pull_requests.pull_request_id, repo.repo_id, repo.repo_name, repo_groups.repo_group_id, repo_groups.rg_name + ) time_between_responses + GROUP BY closed_year, closed_month, merged_status, time_between_responses.pr_closed_at, time_between_responses.average_time_between_responses, time_between_responses.repo_id, time_between_responses.repo_name, time_between_responses.repo_group_id, time_between_responses.rg_name + """) + + else: + pr_all_SQL = s.sql.text(""" + SELECT + date_part( 'year', pr_closed_at :: DATE ) AS closed_year, + date_part( 'month', pr_closed_at :: DATE ) AS closed_month, + date_part( 'week', pr_closed_at :: DATE ) AS closed_week, + date_part( 'day', pr_closed_at :: DATE ) AS closed_day, + (EXTRACT(epoch FROM average_time_between_responses)/3600) AS average_hours_between_responses, + (EXTRACT(epoch FROM average_time_between_responses)/60) AS average_minutes_between_responses, + CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status, + count(*) AS num_pull_requests + FROM ( + SELECT pull_requests.pull_request_id, + pr_closed_at, + pr_created_at, + pr_merged_at, + (MAX(message.msg_timestamp) - MIN(message.msg_timestamp)) / COUNT(DISTINCT message.msg_timestamp) AS average_time_between_responses + FROM pull_requests, repo, pull_request_message_ref, message + WHERE repo.repo_id = :repo_id + AND repo.repo_id = pull_requests.repo_id + AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id + AND pull_request_message_ref.msg_id = message.msg_id + AND pr_created_at::DATE >= :begin_date ::DATE + AND pr_closed_at::DATE <= :end_date ::DATE + GROUP BY pull_requests.pull_request_id + ) time_between_responses + GROUP BY closed_year, closed_month, merged_status, time_between_responses.pr_closed_at, time_between_responses.average_time_between_responses + """) + + pr_all = pd.read_sql(pr_all_SQL, self.database, + params={'repo_id': repo_id, 'repo_group_id':repo_group_id, + 'begin_date': begin_date, 'end_date': end_date}) + if not repo_id: + pr_avg_time_between_responses = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).mean().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + ['average_{}_between_responses'.format(time_unit)]] + else: + pr_avg_time_between_responses = pr_all.groupby(['merged_status'] + time_group_bys).mean().reset_index()[time_group_bys + ['merged_status', 'average_{}_between_responses'.format(time_unit)]] + + return pr_avg_time_between_responses + +@register_metric() +def pull_request_average_commit_counts(self, repo_group_id, repo_id=None, group_by='month', begin_date=None, end_date=None): + """ Average commits per pull request, with merged status and time frame + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of average commits per pull request + """ + + if not begin_date: + begin_date = '1970-1-1' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + + unit_options = ['year', 'month', 'week', 'day'] + time_group_bys = [] + for unit in unit_options.copy(): + if group_by not in unit_options: + continue + time_group_bys.append('closed_{}'.format(unit)) + del unit_options[0] + + if not repo_id: + pr_all_SQL = s.sql.text(""" + SELECT + repo_id, + repo_name, + repo_group_id, + repo_group_name, + CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status, + date_part( 'year', pr_closed_at :: DATE ) AS closed_year, + date_part( 'month', pr_closed_at :: DATE ) AS closed_month, + date_part( 'week', pr_closed_at :: DATE ) AS closed_week, + date_part( 'day', pr_closed_at :: DATE ) AS closed_day, + commit_count AS average_commits_per_pull_request, + count(*) AS pr_count + FROM ( + SELECT + pull_requests.repo_id, + repo.repo_name, + repo_groups.repo_group_id, + rg_name AS repo_group_name, + pull_request_commits.pull_request_id, + count(DISTINCT pr_cmt_sha) AS commit_count, + pr_merged_at, + pr_closed_at, + pr_created_at + FROM augur_data.pull_request_commits, augur_data.pull_request_meta,augur_data.repo_groups, + augur_data.pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id IN + (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + AND pull_requests.pull_request_id = pull_request_commits.pull_request_id + AND pull_requests.pull_request_id = pull_request_meta.pull_request_id + AND pr_cmt_sha <> pull_requests.pr_merge_commit_sha + AND pr_cmt_sha <> pull_request_meta.pr_sha + AND repo_groups.repo_group_id = repo.repo_group_id + AND pr_created_at::DATE >= :begin_date ::DATE + AND pr_closed_at::DATE <= :end_date ::DATE + GROUP BY pull_request_commits.pull_request_id, pr_merged_at, pr_closed_at, pr_created_at, repo.repo_name, pull_requests.repo_id, repo_groups.rg_name, repo_groups.repo_group_id + ORDER BY pr_created_at + ) data + GROUP BY closed_year, merged_status, data.pr_closed_at, data.commit_count, data.repo_id, data.repo_name, data.repo_group_id, data.repo_group_name + """) + + else: + pr_all_SQL = s.sql.text(""" + SELECT + CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status, + date_part( 'year', pr_closed_at :: DATE ) AS closed_year, + date_part( 'month', pr_closed_at :: DATE ) AS closed_month, + date_part( 'week', pr_closed_at :: DATE ) AS closed_week, + date_part( 'day', pr_closed_at :: DATE ) AS closed_day, + commit_count AS average_commits_per_pull_request, + count(*) AS pr_count + FROM ( + SELECT + pull_request_commits.pull_request_id, + count(DISTINCT pr_cmt_sha) AS commit_count, + pr_merged_at, + pr_closed_at, + pr_created_at + FROM augur_data.pull_request_commits, augur_data.pull_requests, augur_data.pull_request_meta + WHERE pull_requests.pull_request_id = pull_request_commits.pull_request_id + AND pull_requests.pull_request_id = pull_request_meta.pull_request_id + AND pull_requests.repo_id = :repo_id + AND pr_cmt_sha <> pull_requests.pr_merge_commit_sha + AND pr_cmt_sha <> pull_request_meta.pr_sha + AND pr_created_at::DATE >= :begin_date ::DATE + AND pr_closed_at::DATE <= :end_date ::DATE + GROUP BY pull_request_commits.pull_request_id, pr_merged_at, pr_closed_at, pr_created_at + ORDER BY pr_created_at + ) data + GROUP BY closed_year, merged_status, data.pr_closed_at, data.commit_count + """) + + pr_all = pd.read_sql(pr_all_SQL, self.database, + params={'repo_id': repo_id, 'repo_group_id':repo_group_id, + 'begin_date': begin_date, 'end_date': end_date}) + if not repo_id: + pr_avg_commit_counts = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).mean().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + ['average_commits_per_pull_request']] + else: + pr_avg_commit_counts = pr_all.groupby(['merged_status'] + time_group_bys).mean().reset_index()[time_group_bys + ['merged_status', 'average_commits_per_pull_request']] + + return pr_avg_commit_counts + +@register_metric() +def pull_request_average_event_counts(self, repo_group_id, repo_id=None, group_by='month', begin_date=None, end_date=None): + """ Average of event counts with merged status and time frame + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of event counts avergages + """ + + if not begin_date: + begin_date = '1970-1-1' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + + unit_options = ['year', 'month', 'week', 'day'] + time_group_bys = [] + for unit in unit_options.copy(): + if group_by not in unit_options: + continue + time_group_bys.append('closed_{}'.format(unit)) + del unit_options[0] + + if not repo_id: + pr_all_SQL = s.sql.text(""" + SELECT + repo_id, + repo_name, + repo_group_id, + repo_group_name, + date_part( 'year', pr_closed_at :: DATE ) AS closed_year, + date_part( 'month', pr_closed_at :: DATE ) AS closed_month, + date_part( 'week', pr_closed_at :: DATE ) AS closed_week, + date_part( 'day', pr_closed_at :: DATE ) AS closed_day, + CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status, + assigned_count AS average_assigned_count, + review_requested_count AS average_review_requested_count, + labeled_count AS average_labeled_count, + unlabeled_count AS average_unlabeled_count, + subscribed_count AS average_subscribed_count, + mentioned_count AS average_mentioned_count, + referenced_count AS average_referenced_count, + closed_count AS average_closed_count, + head_ref_force_pushed_count AS average_head_ref_force_pushed_count, + head_ref_deleted_count AS average_head_ref_deleted_count, + milestoned_count AS average_milestoned_count, + merged_count AS average_merged_count, + comment_count AS average_comment_count, + count(*) AS num_pull_requests + FROM ( + SELECT + pull_requests.repo_id, + repo_name, + repo_groups.repo_group_id, + rg_name AS repo_group_name, + pull_requests.pull_request_id, + pr_merged_at, + pr_created_at, + pr_closed_at, + count(*) FILTER (WHERE action = 'assigned') AS assigned_count, + count(*) FILTER (WHERE action = 'review_requested') AS review_requested_count, + count(*) FILTER (WHERE action = 'labeled') AS labeled_count, + count(*) FILTER (WHERE action = 'unlabeled') AS unlabeled_count, + count(*) FILTER (WHERE action = 'subscribed') AS subscribed_count, + count(*) FILTER (WHERE action = 'mentioned') AS mentioned_count, + count(*) FILTER (WHERE action = 'referenced') AS referenced_count, + count(*) FILTER (WHERE action = 'closed') AS closed_count, + count(*) FILTER (WHERE action = 'head_ref_force_pushed') AS head_ref_force_pushed_count, + count(*) FILTER (WHERE action = 'head_ref_deleted') AS head_ref_deleted_count, + count(*) FILTER (WHERE action = 'milestoned') AS milestoned_count, + count(*) FILTER (WHERE action = 'merged') AS merged_count, + COUNT(DISTINCT message.msg_timestamp) AS comment_count + FROM pull_request_events, pull_request_message_ref, message, repo_groups, + pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id IN + (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + AND repo.repo_id = pull_requests.repo_id + AND repo_groups.repo_group_id = repo.repo_group_id + AND pull_requests.pull_request_id = pull_request_events.pull_request_id + AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id + AND pull_request_message_ref.msg_id = message.msg_id + AND pr_created_at::DATE >= :begin_date ::DATE + AND pr_closed_at::DATE <= :end_date ::DATE + GROUP BY pull_requests.pull_request_id, repo.repo_name, repo_groups.repo_group_id, repo_groups.rg_name + ) data + GROUP BY closed_year, closed_month, closed_week, closed_day, merged_status, data.assigned_count, data.review_requested_count, data.labeled_count, data.unlabeled_count, data.subscribed_count, data.mentioned_count, data.referenced_count, data.closed_count, + data.head_ref_force_pushed_count, data.head_ref_deleted_count, data.milestoned_count, data.merged_count, data.comment_count, data.repo_id, data.repo_name, data.repo_group_id, data.repo_group_name + ORDER BY merged_status, closed_year, closed_week, closed_day + """) + + else: + pr_all_SQL = s.sql.text(""" + SELECT + date_part( 'year', pr_closed_at :: DATE ) AS closed_year, + date_part( 'month', pr_closed_at :: DATE ) AS closed_month, + date_part( 'week', pr_closed_at :: DATE ) AS closed_week, + date_part( 'day', pr_closed_at :: DATE ) AS closed_day, + CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status, + assigned_count AS average_assigned_count, + review_requested_count AS average_review_requested_count, + labeled_count AS average_labeled_count, + unlabeled_count AS average_unlabeled_count, + subscribed_count AS average_subscribed_count, + mentioned_count AS average_mentioned_count, + referenced_count AS average_referenced_count, + closed_count AS average_closed_count, + head_ref_force_pushed_count AS average_head_ref_force_pushed_count, + head_ref_deleted_count AS average_head_ref_deleted_count, + milestoned_count AS average_milestoned_count, + merged_count AS average_merged_count, + comment_count AS average_comment_count, + count(*) AS num_pull_requests + FROM ( + SELECT pull_requests.pull_request_id, + pr_merged_at, + pr_created_at, + pr_closed_at, + count(*) FILTER (WHERE action = 'assigned') AS assigned_count, + count(*) FILTER (WHERE action = 'review_requested') AS review_requested_count, + count(*) FILTER (WHERE action = 'labeled') AS labeled_count, + count(*) FILTER (WHERE action = 'unlabeled') AS unlabeled_count, + count(*) FILTER (WHERE action = 'subscribed') AS subscribed_count, + count(*) FILTER (WHERE action = 'mentioned') AS mentioned_count, + count(*) FILTER (WHERE action = 'referenced') AS referenced_count, + count(*) FILTER (WHERE action = 'closed') AS closed_count, + count(*) FILTER (WHERE action = 'head_ref_force_pushed') AS head_ref_force_pushed_count, + count(*) FILTER (WHERE action = 'head_ref_deleted') AS head_ref_deleted_count, + count(*) FILTER (WHERE action = 'milestoned') AS milestoned_count, + count(*) FILTER (WHERE action = 'merged') AS merged_count, + COUNT(DISTINCT message.msg_timestamp) AS comment_count + FROM pull_request_events, pull_requests, repo, pull_request_message_ref, message + WHERE repo.repo_id = :repo_id + AND repo.repo_id = pull_requests.repo_id + AND pull_requests.pull_request_id = pull_request_events.pull_request_id + AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id + AND pull_request_message_ref.msg_id = message.msg_id + AND pr_created_at::DATE >= :begin_date ::DATE + AND pr_closed_at::DATE <= :end_date ::DATE + GROUP BY pull_requests.pull_request_id + ) data + GROUP BY closed_year, closed_month, closed_week, closed_day, merged_status, data.assigned_count, data.review_requested_count, data.labeled_count, data.unlabeled_count, data.subscribed_count, data.mentioned_count, data.referenced_count, data.closed_count, + data.head_ref_force_pushed_count, data.head_ref_deleted_count, data.milestoned_count, data.merged_count, data.comment_count + ORDER BY merged_status, closed_year, closed_week, closed_day + """) + + pr_all = pd.read_sql(pr_all_SQL, self.database, + params={'repo_id': repo_id, 'repo_group_id':repo_group_id, + 'begin_date': begin_date, 'end_date': end_date}) + + count_names = ['assigned_count', 'review_requested_count', 'labeled_count', 'unlabeled_count', 'subscribed_count', 'mentioned_count', 'referenced_count', 'closed_count', 'head_ref_force_pushed_count', 'head_ref_deleted_count', 'milestoned_count', 'merged_count', 'comment_count'] + average_count_names = [] + for name in count_names.copy(): + average_count_names.append('average_' + name) + + if not repo_id: + pr_avg_event_counts = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).mean().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + average_count_names] + else: + pr_avg_event_counts = pr_all.groupby(['merged_status'] + time_group_bys).mean().reset_index()[['merged_status'] + time_group_bys + average_count_names] + + return pr_avg_event_counts + +@register_metric() +def pull_request_average_time_to_responses_and_close(self, repo_group_id, repo_id=None, group_by='month', time_unit ='days', begin_date=None, end_date=None): + """ Average of time to first reponse, last response, and time to close with merged status and time frame + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month' + :param time_unit: Unit of time of data is in, options are: 'hours', or 'days', defaults to 'days' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of averages of time to first response, last response, and close + """ + + if not begin_date: + begin_date = '1970-1-1' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + + unit_options = ['year', 'month', 'week', 'day'] + time_group_bys = [] + for unit in unit_options.copy(): + if group_by not in unit_options: + continue + time_group_bys.append('closed_{}'.format(unit)) + del unit_options[0] + + if not repo_id: + pr_all_SQL = s.sql.text(""" + SELECT + repo_id, + repo_name, + repo_group_id, + repo_group_name, + EXTRACT(epoch FROM(first_response_time - pr_created_at)/86400) AS average_days_to_first_response, + EXTRACT(epoch FROM(first_response_time - pr_created_at)/3600) AS average_hours_to_first_response, + EXTRACT(epoch FROM(last_response_time - pr_created_at)/86400) AS average_days_to_last_response, + EXTRACT(epoch FROM(last_response_time - pr_created_at)/3600) AS average_hours_to_last_response, + EXTRACT(epoch FROM(pr_closed_at - pr_created_at)/86400) AS average_days_to_close, + EXTRACT(epoch FROM(pr_closed_at - pr_created_at)/3600) AS average_hours_to_close, + CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status, + date_part( 'year', pr_closed_at :: DATE ) AS closed_year, + date_part( 'month', pr_closed_at :: DATE ) AS closed_month, + date_part( 'week', pr_closed_at :: DATE ) AS closed_week, + date_part( 'day', pr_closed_at :: DATE ) AS closed_day, + count(*) AS num_pull_requests + FROM ( + SELECT + pull_requests.repo_id, + repo.repo_name, + repo_groups.repo_group_id, + rg_name AS repo_group_name, + pull_requests.pull_request_id, + MIN(message.msg_timestamp) AS first_response_time, + MAX(message.msg_timestamp) AS last_response_time, + pull_requests.pr_closed_at, + pr_created_at, + pull_requests.pr_merged_at + FROM pull_request_message_ref, message, repo_groups, + pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id IN + (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + AND repo.repo_id = pull_requests.repo_id + AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id + AND pull_request_message_ref.msg_id = message.msg_id + AND repo_groups.repo_group_id = repo.repo_group_id + AND pr_created_at::DATE >= :begin_date ::DATE + AND pr_closed_at::DATE <= :end_date ::DATE + GROUP BY pull_requests.pull_request_id, repo.repo_name, repo_groups.repo_group_id, repo_groups.rg_name + ) response_times + GROUP BY closed_year, merged_status, response_times.first_response_time, response_times.last_response_time, response_times.pr_created_at, response_times.pr_closed_at, response_times.repo_id, response_times.repo_name, response_times.repo_group_id, response_times.repo_group_name + """) + + else: + pr_all_SQL = s.sql.text(""" + SELECT + EXTRACT(epoch FROM(first_response_time - pr_created_at)/86400) AS average_days_to_first_response, + EXTRACT(epoch FROM(first_response_time - pr_created_at)/3600) AS average_hours_to_first_response, + EXTRACT(epoch FROM(last_response_time - pr_created_at)/86400) AS average_days_to_last_response, + EXTRACT(epoch FROM(last_response_time - pr_created_at)/3600) AS average_hours_to_last_response, + EXTRACT(epoch FROM(pr_closed_at - pr_created_at)/86400) AS average_days_to_close, + EXTRACT(epoch FROM(pr_closed_at - pr_created_at)/3600) AS average_hours_to_close, + CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status, + date_part( 'year', pr_closed_at :: DATE ) AS closed_year, + date_part( 'month', pr_closed_at :: DATE ) AS closed_month, + date_part( 'week', pr_closed_at :: DATE ) AS closed_week, + date_part( 'day', pr_closed_at :: DATE ) AS closed_day, + count(*) AS num_pull_requests + FROM ( + SELECT pull_requests.pull_request_id, + MIN(message.msg_timestamp) AS first_response_time, + MAX(message.msg_timestamp) AS last_response_time, + pull_requests.pr_closed_at, + pr_created_at, + pull_requests.pr_merged_at + FROM pull_requests, repo, pull_request_message_ref, message + WHERE repo.repo_id = :repo_id + AND repo.repo_id = pull_requests.repo_id + AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id + AND pull_request_message_ref.msg_id = message.msg_id + AND pr_created_at::DATE >= :begin_date ::DATE + AND pr_closed_at::DATE <= :end_date ::DATE + GROUP BY pull_requests.pull_request_id + ) response_times + GROUP BY closed_year, merged_status, response_times.first_response_time, response_times.last_response_time, response_times.pr_created_at, response_times.pr_closed_at + """) + + pr_all = pd.read_sql(pr_all_SQL, self.database, + params={'repo_id': repo_id, 'repo_group_id':repo_group_id, + 'begin_date': begin_date, 'end_date': end_date}) + + if not repo_id: + avg_pr_time_to_responses_and_close = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).mean().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + ['average_{}_to_first_response'.format(time_unit), 'average_{}_to_last_response'.format(time_unit), 'average_{}_to_close'.format(time_unit)]] + else: + avg_pr_time_to_responses_and_close = pr_all.groupby(['merged_status'] + time_group_bys).mean().reset_index()[time_group_bys + ['merged_status', 'average_{}_to_first_response'.format(time_unit), 'average_{}_to_last_response'.format(time_unit), 'average_{}_to_close'.format(time_unit)]] + + return avg_pr_time_to_responses_and_close + +@register_metric() +def pull_request_merged_status_counts(self, repo_group_id, repo_id=None, begin_date='1970-1-1 00:00:01', end_date=None, group_by='month'): + """ Merged status counts with time frames + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of merged status counts + """ + + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + + unit_options = ['year', 'month', 'week', 'day'] + time_group_bys = [] + for time_unit in unit_options.copy(): + if group_by not in unit_options: + continue + time_group_bys.append('closed_{}'.format(time_unit)) + del unit_options[0] + + if not repo_id: + pr_all_sql = s.sql.text(""" + SELECT + repo_id, + repo_name, + repo_group_id, + repo_group_name, + pull_request_id AS pull_request_count, + CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status, + date_part( 'year', pr_closed_at :: DATE ) AS closed_year, + date_part( 'month', pr_closed_at :: DATE ) AS closed_month, + date_part( 'week', pr_closed_at :: DATE ) AS closed_week, + date_part( 'day', pr_closed_at :: DATE ) AS closed_day + FROM ( + SELECT + pull_requests.pull_request_id, + pull_requests.repo_id, + repo.repo_name, + repo_groups.repo_group_id, + rg_name AS repo_group_name, + pr_merged_at, + pr_closed_at + FROM repo_groups, + pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id + WHERE pull_requests.repo_id IN + (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + AND repo_groups.repo_group_id = repo.repo_group_id + AND pr_created_at::DATE >= :begin_date ::DATE + AND pr_closed_at::DATE <= :end_date ::DATE + GROUP BY pull_requests.pull_request_id, pull_requests.repo_id, repo.repo_name, repo_groups.repo_group_id, repo_groups.rg_name + ) data + GROUP BY repo_id, repo_name, repo_group_id, repo_group_name, pull_request_id, pr_merged_at, pr_closed_at + """) + else: + pr_all_sql = s.sql.text(""" + SELECT + pull_request_id as pull_request_count, + CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' end as merged_status, + date_part( 'year', pr_closed_at :: DATE ) AS closed_year, + date_part( 'month', pr_closed_at :: DATE ) AS closed_month, + date_part( 'week', pr_closed_at :: DATE ) AS closed_week, + date_part( 'day', pr_closed_at :: DATE ) AS closed_day + from pull_requests + where repo_id = :repo_id + AND pr_created_at::date >= :begin_date ::date + AND pr_closed_at::date <= :end_date ::date + """) + + pr_all = pd.read_sql(pr_all_sql, self.database, params={'repo_group_id': repo_group_id, + 'repo_id': repo_id, 'begin_date': begin_date, 'end_date': end_date}) + + if not repo_id: + pr_merged_counts = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).count().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + ['pull_request_count']] + else: + pr_merged_counts = pr_all.groupby(['merged_status'] + time_group_bys).count().reset_index()[time_group_bys + ['merged_status', 'pull_request_count']] + + return pr_merged_counts + + + + + diff --git a/augur/metrics/pull_request/__init__.py b/augur/metrics/pull_request/__init__.py deleted file mode 100644 --- a/augur/metrics/pull_request/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .pull_request import create_pull_request_metrics - -from .routes import create_pull_request_routes \ No newline at end of file diff --git a/augur/metrics/pull_request/pull_request.py b/augur/metrics/pull_request/pull_request.py deleted file mode 100644 --- a/augur/metrics/pull_request/pull_request.py +++ /dev/null @@ -1,481 +0,0 @@ -""" -Metrics that provide data about pull requests & their associated activity -""" - -import datetime -import sqlalchemy as s -import pandas as pd -from augur.util import annotate, add_metrics - -@annotate(tag='pull-requests-merge-contributor-new') -def pull_requests_merge_contributor_new(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): - """ - Returns a timeseries of the count of persons contributing with an accepted commit for the first time. - - :param repo_id: The repository's id - :param repo_group_id: The repository's group id - :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' - :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' - :param end_date: Specifies the end date, defaults to datetime.now() - :return: DataFrame of persons/period - """ - if not begin_date: - begin_date = '1970-1-1 00:00:01' - if not end_date: - end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') - - if repo_id: - commitNewContributor = s.sql.text(""" - SELECT date_trunc(:period, new_date::DATE) as commit_date, - COUNT(cmt_author_email), repo_name - FROM ( SELECT repo_name, cmt_author_email, MIN(TO_TIMESTAMP(cmt_author_date,'YYYY-MM-DD')) AS new_date - FROM commits JOIN repo ON commits.repo_id = repo.repo_id - WHERE commits.repo_id = :repo_id - AND TO_TIMESTAMP(cmt_author_date,'YYYY-MM-DD') BETWEEN :begin_date AND :end_date AND cmt_author_email IS NOT NULL - GROUP BY cmt_author_email, repo_name - ) as abc GROUP BY commit_date, repo_name - """) - results = pd.read_sql(commitNewContributor, self.database, params={'repo_id': repo_id, 'period': period, - 'begin_date': begin_date, - 'end_date': end_date}) - else: - commitNewContributor = s.sql.text(""" - SELECT abc.repo_id, repo_name ,date_trunc(:period, new_date::DATE) as commit_date, - COUNT(cmt_author_email) - FROM (SELECT cmt_author_email, MIN(TO_TIMESTAMP(cmt_author_date, 'YYYY-MM-DD')) AS new_date, repo_id - FROM commits - WHERE repo_id in (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) - AND TO_TIMESTAMP(cmt_author_date, 'YYYY-MM-DD') BETWEEN :begin_date AND :end_date - AND cmt_author_email IS NOT NULL - GROUP BY cmt_author_email, repo_id - ) as abc, repo - WHERE abc.repo_id = repo.repo_id - GROUP BY abc.repo_id, repo_name, commit_date - """) - results = pd.read_sql(commitNewContributor, self.database, - params={'repo_group_id': repo_group_id, 'period': period, - 'begin_date': begin_date, - 'end_date': end_date}) - return results - -@annotate(tag='pull-requests-closed-no-merge') -def pull_requests_closed_no_merge(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): - """ - Returns a timeseries of the which were closed but not merged - - :param repo_id: The repository's id - :param repo_group_id: The repository's group id - :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' - :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' - :param end_date: Specifies the end date, defaults to datetime.now() - :return: DataFrame of persons/period - """ - if not begin_date: - begin_date = '1970-1-1 00:00:01' - if not end_date: - end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') - - if repo_id: - closedNoMerge = s.sql.text(""" - SELECT DATE_TRUNC(:period, pull_requests.pr_closed_at) AS closed_date, - COUNT(pull_request_id) as pr_count - FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id - WHERE pull_requests.repo_id = :repo_id - AND pull_requests.pr_closed_at is NOT NULL AND - pull_requests.pr_merged_at is NULL - GROUP BY closed_date, pull_request_id - ORDER BY closed_date - """) - results = pd.read_sql(closedNoMerge, self.database, params={'repo_id': repo_id, 'period': period, - 'begin_date': begin_date, - 'end_date': end_date}) - - else: - closedNoMerge = s.sql.text(""" - SELECT DATE_TRUNC(:period, pull_requests.pr_closed_at) AS closed_date, - COUNT(pull_request_id) as pr_count - FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id WHERE pull_requests.repo_id in (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) - and pull_requests.pr_closed_at is NOT NULL and pull_requests.pr_merged_at is NULL - GROUP BY closed_date, pull_request_id - ORDER BY closed_date - """) - - results = pd.read_sql(closedNoMerge, self.database, - params={'repo_group_id': repo_group_id, 'period': period, - 'begin_date': begin_date, - 'end_date': end_date}) - return results - -@annotate(tag='reviews') -def reviews(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): - """ Returns a timeseris of new reviews or pull requests opened - - :param repo_group_id: The repository's repo_group_id - :param repo_id: The repository's repo_id, defaults to None - :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' - :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' - :param end_date: Specifies the end date, defaults to datetime.now() - :return: DataFrame of new reviews/period - """ - if not begin_date: - begin_date = '1970-1-1' - if not end_date: - end_date = datetime.datetime.now().strftime('%Y-%m-%d') - - if not repo_id: - reviews_SQL = s.sql.text(""" - SELECT - pull_requests.repo_id, - repo_name, - DATE_TRUNC(:period, pull_requests.pr_created_at) AS date, - COUNT(pr_src_id) AS pull_requests - FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id - WHERE pull_requests.repo_id IN - (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) - AND pull_requests.pr_created_at - BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') - AND to_timestamp(:end_date, 'YYYY-MM-DD') - GROUP BY pull_requests.repo_id, repo_name, date - ORDER BY pull_requests.repo_id, date - """) - - results = pd.read_sql(reviews_SQL, self.database, - params={'period': period, 'repo_group_id': repo_group_id, - 'begin_date': begin_date, 'end_date': end_date }) - return results - - else: - reviews_SQL = s.sql.text(""" - SELECT - repo_name, - DATE_TRUNC(:period, pull_requests.pr_created_at) AS date, - COUNT(pr_src_id) AS pull_requests - FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id - WHERE pull_requests.repo_id = :repo_id - AND pull_requests.pr_created_at - BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD HH24:MI:SS') - AND to_timestamp(:end_date, 'YYYY-MM-DD HH24:MI:SS') - GROUP BY date, repo_name - ORDER BY date - """) - - results = pd.read_sql(reviews_SQL, self.database, - params={'period': period, 'repo_id': repo_id, - 'begin_date': begin_date, 'end_date': end_date}) - return results - -@annotate(tag='reviews-accepted') -def reviews_accepted(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): - """Returns a timeseries of number of reviews or pull requests accepted. - - :param repo_group_id: The repository's repo_group_id - :param repo_id: The repository's repo_id, defaults to None - :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' - :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' - :param end_date: Specifies the end date, defaults to datetime.now() - :return: DataFrame of accepted reviews/period - """ - if not begin_date: - begin_date = '1970-1-1' - if not end_date: - end_date = datetime.datetime.now().strftime('%Y-%m-%d') - - if not repo_id: - reviews_accepted_SQL = s.sql.text(""" - SELECT - pull_requests.repo_id, - repo.repo_name, - DATE_TRUNC(:period, pull_requests.pr_merged_at) AS date, - COUNT(pr_src_id) AS pull_requests - FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id - WHERE pull_requests.repo_id IN - (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) - AND pr_merged_at IS NOT NULL - AND pr_merged_at - BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') - AND to_timestamp(:end_date, 'YYYY-MM-DD') - GROUP BY pull_requests.repo_id, repo_name, date - ORDER BY pull_requests.repo_id, date - """) - - results = pd.read_sql(reviews_accepted_SQL, self.database, - params={'period': period, 'repo_group_id': repo_group_id, - 'begin_date': begin_date, 'end_date': end_date}) - return results - else: - reviews_accepted_SQL = s.sql.text(""" - SELECT - repo.repo_name, - DATE_TRUNC(:period, pull_requests.pr_merged_at) AS date, - COUNT(pr_src_id) AS pull_requests - FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id - WHERE pull_requests.repo_id = :repo_id - AND pr_merged_at IS NOT NULL - AND pr_merged_at - BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') - AND to_timestamp(:end_date, 'YYYY-MM-DD') - GROUP BY date, repo.repo_name - ORDER BY date - """) - - results = pd.read_sql(reviews_accepted_SQL, self.database, - params={'period': period, 'repo_id': repo_id, - 'begin_date': begin_date, 'end_date': end_date}) - return results - -@annotate(tag='reviews-declined') -def reviews_declined(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): - """ Returns a time series of reivews declined - - :param repo_group_id: The repository's repo_group_id - :param repo_id: The repository's repo_id, defaults to None - :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' - :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' - :param end_date: Specifies the end date, defaults to datetime.now() - :return: DataFrame of declined reviews/period - """ - if not begin_date: - begin_date = '1970-1-1' - if not end_date: - end_date = datetime.datetime.now().strftime('%Y-%m-%d') - - if not repo_id: - reviews_declined_SQL = s.sql.text(""" - SELECT - pull_requests.repo_id, - repo.repo_name, - DATE_TRUNC(:period, pull_requests.pr_closed_at) AS date, - COUNT(pr_src_id) AS pull_requests - FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id - WHERE pull_requests.repo_id IN - (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) - AND pr_src_state = 'closed' AND pr_merged_at IS NULL - AND pr_closed_at - BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') - AND to_timestamp(:end_date, 'YYYY-MM-DD') - GROUP BY pull_requests.repo_id, repo_name, date - ORDER BY pull_requests.repo_id, date - """) - - results = pd.read_sql(reviews_declined_SQL, self.database, - params={'period': period, 'repo_group_id': repo_group_id, - 'begin_date': begin_date, 'end_date': end_date }) - return results - else: - reviews_declined_SQL = s.sql.text(""" - SELECT - repo.repo_name, - DATE_TRUNC(:period, pull_requests.pr_closed_at) AS date, - COUNT(pr_src_id) AS pull_requests - FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id - WHERE pull_requests.repo_id = :repo_id - AND pr_src_state = 'closed' AND pr_merged_at IS NULL - AND pr_closed_at - BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') - AND to_timestamp(:end_date, 'YYYY-MM-DD') - GROUP BY date, repo.repo_name - ORDER BY date - """) - - results = pd.read_sql(reviews_declined_SQL, self.database, - params={'period': period, 'repo_id': repo_id, - 'begin_date': begin_date, 'end_date': end_date}) - return results - -@annotate(tag='review-duration') -def review_duration(self, repo_group_id, repo_id=None, begin_date=None, end_date=None): - """ Returns the duration of each accepted review. - - :param repo_group_id: The repository's repo_group_id - :param repo_id: The repository's repo_id, defaults to None - :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' - :param end_date: Specifies the end date, defaults to datetime.now() - :return: DataFrame of pull request id with the corresponding duration - """ - if not begin_date: - begin_date = '1970-1-1' - if not end_date: - end_date = datetime.datetime.now().strftime('%Y-%m-%d') - - if not repo_id: - review_duration_SQL = s.sql.text(""" - SELECT - pull_requests.repo_id, - repo.repo_name, - pull_requests.pull_request_id, - pull_requests.pr_created_at AS created_at, - pull_requests.pr_merged_at AS merged_at, - (pr_merged_at - pr_created_at) AS duration - FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id - WHERE pull_requests.repo_id IN - (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) - AND pr_merged_at IS NOT NULL - AND pr_created_at - BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') - AND to_timestamp(:end_date, 'YYYY-MM-DD') - ORDER BY pull_requests.repo_id, pull_requests.pull_request_id - """) - - results = pd.read_sql(review_duration_SQL, self.database, - params={'repo_group_id': repo_group_id, - 'begin_date': begin_date, - 'end_date': end_date}) - results['duration'] = results['duration'].astype(str) - return results - else: - review_duration_SQL = s.sql.text(""" - SELECT - repo_name, - pull_request_id, - pr_created_at AS created_at, - pr_merged_at AS merged_at, - (pr_merged_at - pr_created_at) AS duration - FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id - WHERE pull_requests.repo_id = :repo_id - AND pr_merged_at IS NOT NULL - AND pr_created_at - BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD') - AND to_timestamp(:end_date, 'YYYY-MM-DD') - ORDER BY pull_requests.repo_id, pull_request_id - """) - - results = pd.read_sql(review_duration_SQL, self.database, - params={'repo_id': repo_id, - 'begin_date': begin_date, - 'end_date': end_date}) - results['duration'] = results['duration'].astype(str) - return results - -@annotate(tag='pull-request-acceptance-rate') -def pull_request_acceptance_rate(self, repo_group_id, repo_id=None, begin_date=None, end_date=None, group_by='week'): - """ - Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date) - - :param repo_group_id: The repository's repo_group_id - :param repo_id: The repository's repo_id, defaults to None - :return: DataFrame with ratio/day - """ - if not begin_date: - begin_date = '1970-1-1 00:00:01' - if not end_date: - end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') - - if not repo_id: - prAccRateSQL = s.sql.text(""" - SELECT DATE(date_created) AS "date", CAST(num_approved AS DECIMAL)/CAST(num_open AS DECIMAL) AS "rate" - FROM - ( - SELECT count(issue_events.issue_id) AS num_approved, - date_trunc(:group_by,issue_events.created_at) AS accepted_on - FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id - JOIN repo ON issues.repo_id = repo.repo_id - WHERE action = 'merged' - AND issues.pull_request IS NOT NULL - AND repo_group_id = :repo_group_id - AND issue_events.created_at BETWEEN :begin_date AND :end_date - GROUP BY accepted_on - ORDER BY accepted_on - ) accepted - JOIN - ( - SELECT count(issue_events.issue_id) AS num_open, - date_trunc(:group_by,issue_events.created_at) AS date_created - FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id - JOIN repo ON issues.repo_id = repo.repo_id - WHERE action = 'ready_for_review' - AND issues.pull_request IS NOT NULL - AND repo_group_id = :repo_group_id - AND issue_events.created_at BETWEEN :begin_date AND :end_date - GROUP BY date_created - ORDER BY date_created - ) opened - ON opened.date_created = accepted.accepted_on - """) - results = pd.read_sql(prAccRateSQL, self.database, params={'repo_group_id': repo_group_id, 'group_by': group_by, - 'begin_date': begin_date, 'end_date': end_date}) - return results - else: - prAccRateSQL = s.sql.text(""" - SELECT DATE(date_created) AS "date", CAST(num_approved AS DECIMAL)/CAST(num_open AS DECIMAL) AS "rate" - FROM - ( - SELECT count(issue_events.issue_id) AS num_approved, - date_trunc(:group_by,issue_events.created_at) AS accepted_on - FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id - WHERE action = 'merged' - AND issues.pull_request IS NOT NULL - AND repo_id = :repo_id - AND issue_events.created_at BETWEEN :begin_date AND :end_date - GROUP BY accepted_on - ORDER BY accepted_on - ) accepted - JOIN - ( - SELECT count(issue_events.issue_id) AS num_open, - date_trunc(:group_by,issue_events.created_at) AS date_created - FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id - WHERE action = 'ready_for_review' - AND issues.pull_request IS NOT NULL - AND repo_id = :repo_id - AND issue_events.created_at BETWEEN :begin_date AND :end_date - GROUP BY date_created - ORDER BY date_created - ) opened - ON opened.date_created = accepted.accepted_on - """) - results = pd.read_sql(prAccRateSQL, self.database, params={'repo_id': repo_id, 'group_by': group_by, - 'begin_date': begin_date, 'end_date': end_date}) - return results - -@annotate(tag='pull-request-merged-status-counts') -def pull_request_merged_status_counts(self, repo_group_id, repo_id=None, begin_date='1970-1-1 00:00:01', end_date=None, group_by='week'): - """ - _____ - - :param repo_group_id: The repository's repo_group_id - :param repo_id: The repository's repo_id, defaults to None - :param begin_date: pull requests opened after this date - :____ - : - :return: ____ - """ - - if not end_date: - end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') - - unit_options = ['year', 'month', 'week', 'day'] - time_group_bys = [] - for time_unit in unit_options.copy(): - if group_by not in unit_options: - continue - time_group_bys.append('closed_{}'.format(time_unit)) - del unit_options[0] - - if not repo_id: - pr_all_sql = s.sql.text(""" - - """) - else: - pr_all_sql = s.sql.text(""" - SELECT - pull_request_id as pull_request_count, - CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' end as merged_status, - date_part( 'year', pr_closed_at :: DATE ) AS closed_year, - date_part( 'month', pr_closed_at :: DATE ) AS closed_month, - date_part( 'week', pr_closed_at :: DATE ) AS closed_week, - date_part( 'day', pr_closed_at :: DATE ) AS closed_day - from pull_requests - where repo_id = :repo_id - AND pr_created_at::date >= :begin_date ::date - AND pr_closed_at::date <= :end_date ::date - """) - - pr_all = pd.read_sql(pr_all_sql, self.database, params={'repo_group_id': repo_group_id, - 'repo_id': repo_id, 'begin_date': begin_date, 'end_date': end_date}) - - pr_counts = pr_all.groupby(['merged_status'] + time_group_bys).count().reset_index()[time_group_bys + ['merged_status', 'pull_request_count']] - - return pr_counts - -def create_pull_request_metrics(metrics): - add_metrics(metrics, __name__) diff --git a/augur/metrics/pull_request/routes.py b/augur/metrics/pull_request/routes.py deleted file mode 100644 --- a/augur/metrics/pull_request/routes.py +++ /dev/null @@ -1,453 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_pull_request_routes(server): - - metrics = server._augur.metrics - - """ - @api {get} /repo-groups/:repo_group_id/reviews Reviews (Repo Group) - @apiName reviews-repo-group - @apiGroup Evolution - @apiDescription Time series of number of new reviews / pull requests opened within a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/reviews.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21035, - "repo_name": "prototype-ujs", - "date": "2010-01-01T00:00:00.000Z", - "pull_requests": 1 - }, - { - "repo_id": 21035, - "repo_name": "prototype-ujs", - "date": "2011-01-01T00:00:00.000Z", - "pull_requests": 5 - }, - { - "repo_id": 21042, - "repo_name": "pjax_rails", - "date": "2011-01-01T00:00:00.000Z", - "pull_requests": 16 - }, - { - "repo_id": 21042, - "repo_name": "pjax_rails", - "date": "2012-01-01T00:00:00.000Z", - "pull_requests": 14 - } - ] - """ - server.addRepoGroupMetric(metrics.reviews, 'reviews') - - """ - @api {get} /repos/:repo_id/reviews Reviews (Repo) - @apiName reviews-repo - @apiGroup Evolution - @apiDescription Time series of number of new reviews / pull requests opened within a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/reviews.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "graphql-spec", - "date": "2016-01-01T00:00:00.000Z", - "pull_requests": 37 - }, - { - "repo_name": "graphql-spec", - "date": "2017-01-01T00:00:00.000Z", - "pull_requests": 49 - }, - { - "repo_name": "graphql-spec", - "date": "2018-01-01T00:00:00.000Z", - "pull_requests": 63 - } - ] - """ - server.addRepoMetric(metrics.reviews, 'reviews') - - """ - @api {get} /repo-groups/:repo_group_id/reviews-accepted Reviews Accepted (Repo Group) - @apiName reviews-accepted-repo-group - @apiGroup Evolution - @apiDescription Time series of number of accepted reviews / pull requests opened within a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Reviews_Accepted.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21035, - "repo_name": "prototype-ujs", - "date": "2010-01-01T00:00:00.000Z", - "pull_requests": 1 - }, - { - "repo_id": 21042, - "repo_name": "pjax_rails", - "date": "2011-01-01T00:00:00.000Z", - "pull_requests": 4 - }, - { - "repo_id": 21042, - "repo_name": "pjax_rails", - "date": "2012-01-01T00:00:00.000Z", - "pull_requests": 4 - } - ] - """ - server.addRepoGroupMetric(metrics.reviews_accepted, 'reviews-accepted') - - """ - @api {get} /repos/:repo_id/reviews-accepted Reviews Accepted (Repo) - @apiName reviews-accepted-repo - @apiGroup Evolution - @apiDescription Time series of number of accepted reviews / pull requests opened within a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Reviews_Accepted.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "graphql-spec", - "date": "2016-01-01T00:00:00.000Z", - "pull_requests": 30 - }, - { - "repo_name": "graphql-spec", - "date": "2017-01-01T00:00:00.000Z", - "pull_requests": 37 - }, - { - "repo_name": "graphql-spec", - "date": "2018-01-01T00:00:00.000Z", - "pull_requests": 46 - } - ] - """ - server.addRepoMetric(metrics.reviews_accepted, 'reviews-accepted') - - """ - @api {get} /repo-groups/:repo_group_id/reviews-declined Reviews Declined (Repo Group) - @apiName reviews-declined-repo-group - @apiGroup Evolution - @apiDescription Time series of number of declined reviews / pull requests opened within a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Reviews_Accepted.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21035, - "repo_name": "prototype-ujs", - "date": "2010-01-01T00:00:00.000Z", - "pull_requests": 1 - }, - { - "repo_id": 21042, - "repo_name": "pjax_rails", - "date": "2011-01-01T00:00:00.000Z", - "pull_requests": 3 - }, - { - "repo_id": 21042, - "repo_name": "pjax_rails", - "date": "2012-01-01T00:00:00.000Z", - "pull_requests": 6 - } - ] - """ - server.addRepoGroupMetric(metrics.reviews_declined, 'reviews-declined') - - """ - @api {get} /repos/:repo_id/reviews-declined Reviews Declined (Repo) - @apiName reviews-declined-repo - @apiGroup Evolution - @apiDescription Time series of number of declined reviews / pull requests opened within a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Reviews_Accepted.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "graphql-spec", - "date": "2016-01-01T00:00:00.000Z", - "pull_requests": 11 - }, - { - "repo_name": "graphql-spec", - "date": "2017-01-01T00:00:00.000Z", - "pull_requests": 16 - }, - { - "repo_name": "graphql-spec", - "date": "2018-01-01T00:00:00.000Z", - "pull_requests": 4 - } - ] - """ - server.addRepoMetric(metrics.reviews_declined, 'reviews-declined') - - """ - @api {get} /repo-groups/:repo_group_id/review-duration Review Duration (Repo Group) - @apiName review-duration-repo-group - @apiGroup Evolution - @apiDescription Time since an review/pull request is proposed until it is accepted. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Reviews_Duration.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21035, - "repo_name": "prototype-ujs", - "pull_request_id": 25386, - "created_at": "2010-09-28T19:07:15.000Z", - "merged_at": "2010-09-29T17:46:59.000Z", - "duration": "0 days 22:39:44.000000000" - }, - { - "repo_id": 21042, - "repo_name": "pjax_rails", - "pull_request_id": 25392, - "created_at": "2011-05-18T14:11:23.000Z", - "merged_at": "2011-05-18T19:03:01.000Z", - "duration": "0 days 04:51:38.000000000" - }, - { - "repo_id": 21042, - "repo_name": "pjax_rails", - "pull_request_id": 25396, - "created_at": "2011-05-25T10:09:01.000Z", - "merged_at": "2011-05-25T19:30:01.000Z", - "duration": "0 days 09:21:00.000000000" - } - ] - """ - server.addRepoGroupMetric(metrics.review_duration, 'review-duration') - - """ - @api {get} /repos/:repo_id/review-duration review Duration (Repo) - @apiName review-duration-repo - @apiGroup Evolution - @apiDescription Time since an review/pull request is proposed until it is accepted. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Reviews_Duration.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "graphql-spec", - "pull_request_id": 25374, - "created_at": "2019-01-02T11:02:08.000Z", - "merged_at": "2019-07-05T09:10:45.000Z", - "duration": "183 days 22:08:37.000000000" - }, - { - "repo_name": "graphql-spec", - "pull_request_id": 25378, - "created_at": "2019-03-28T13:44:04.000Z", - "merged_at": "2019-07-03T23:10:36.000Z", - "duration": "97 days 09:26:32.000000000" - } - ] - """ - server.addRepoMetric(metrics.review_duration, 'review-duration') - - """ - @api {get} /repo-groups/:repo_group_id/pull-requests-merge-contributor-new New Contributors of Commits (Repo Group) - @apiName New Contributors of Commits(Repo Group) - @apiGroup Evolution - @apiDescription Number of persons contributing with an accepted commit for the first time. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/pull-requests-merge-contributor-new.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "commit_date": "2018-01-01T00:00:00.000Z", - "count": 5140, - "repo_name": "rails" - }, - { - "commit_date": "2019-01-01T00:00:00.000Z", - "commit_count": 711, - "repo_name": "rails" - } - ] - """ - server.addRepoGroupMetric( - metrics.pull_requests_merge_contributor_new, 'pull-requests-merge-contributor-new') - - """ - @api {get} /repos/:repo_id/pull-requests-merge-contributor-new New Contributors of Commits (Repo) - @apiName New Contributors of Commits(Repo) - @apiGroup Evolution - @apiDescription Number of persons contributing with an accepted commit for the first time. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/pull-requests-merge-contributor-new.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "commit_date": "2018-01-01T00:00:00.000Z", - "count": 2287, - "repo_name": "rails" - }, - { - "commit_date": "2018-02-01T00:00:00.000Z", - "count": 1939, - "repo_name": "rails" - } - ] - """ - server.addRepoMetric( - metrics.pull_requests_merge_contributor_new, 'pull-requests-merge-contributor-new') - - """ - @api {get} /repo-groups/:repo_group_id/pull-request-acceptance-rate Pull Request Acceptance Rate (Repo Group) - @apiName pull-request-acceptance-rate-repo-group - @apiGroup Experimental - @apiDescription Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date) - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [group_by="week"] Allows for results to be grouped by day, week, month, or year. E.g. values: `year`, `day`, `month` - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2019-02-11T00:00:00.000Z", - "rate": 120.5 - }, - { - "date": "2019-02-18T00:00:00.000Z", - "rate": 34 - }, - { - "date": "2019-02-25T00:00:00.000Z", - "rate": 38.6666666667 - } - ] - """ - server.addRepoGroupMetric(metrics.pull_request_acceptance_rate, 'pull-request-acceptance-rate') - - """ - @api {get} /repos/:repo_id/pull-request-acceptance-rate Pull Request Acceptance Rate (Repo) - @apiName pull-request-acceptance-rate-repo - @apiGroup Experimental - @apiDescription Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date) - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2019-01-01T00:00:00.000Z", - "rate": 5.3333333333 - } - ] - """ - server.addRepoMetric(metrics.pull_request_acceptance_rate, 'pull-request-acceptance-rate') - - """ - @api {get} /repos/:repo_id/pull-request-closed-no-merge Pull Request Closed but not merged(Repo) - @apiName pull-request-closed-no-merge - @apiGroup Experimental - @apiDescription Timeseries of pull request which were closed but not merged - @apiParam {string} repo_id Repository ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2019-01-01T00:00:00.000Z", - "pr_count": 3 - } - ] - """ - server.addRepoMetric(metrics.pull_requests_closed_no_merge, 'pull-requests-closed-no-merge') - - """ - @api {get} /repo-groups/:repo_group_id/pull-request-closed-no-merge Pull Request Closed but not merged(Repo) - @apiName pull-request-closed-no-merge - @apiGroup Experimental - @apiDescription Timeseries of pull request which were closed but not merged - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2019-01-01T00:00:00.000Z", - "pr_count": 3 - } - ] - """ - server.addRepoGroupMetric(metrics.pull_requests_closed_no_merge, 'pull-requests-closed-no-merge') - - """ - @api {get} /repos/:repo_id/pull-request-closed-no-merge Pull Request Closed but not merged(Repo) - @apiName pull-request-closed-no-merge - @apiGroup Experimental - @apiDescription ___ - @apiParam {string} repo_id Repository ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - ___ - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2019-01-01T00:00:00.000Z", - "pr_count": 3 - } - ] - """ - server.addRepoMetric(metrics.pull_request_merged_status_counts, 'pull-request-merged-status-counts') - - """ - @api {get} /repo-groups/:repo_group_id/pull-request-closed-no-merge Pull Request Closed but not merged(Repo) - @apiName pull-request-closed-no-merge - @apiGroup Experimental - @apiDescription Timeseries of pull request which were closed but not merged - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "date": "2019-01-01T00:00:00.000Z", - "pr_count": 3 - } - ] - """ - server.addRepoGroupMetric(metrics.pull_request_merged_status_counts, 'pull-request-merged-status-counts') - diff --git a/augur/metrics/release.py b/augur/metrics/release.py new file mode 100644 --- /dev/null +++ b/augur/metrics/release.py @@ -0,0 +1,88 @@ +""" +Metrics that provide data about releases +""" + +import datetime +import sqlalchemy as s +import pandas as pd +from augur.util import register_metric + +@register_metric() +def releases(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): + """ Returns a timeseris of new reviews or pull requests opened + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of new releases/period + """ + if not begin_date: + begin_date = '1970-1-1' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + if not repo_id: + reviews_SQL = s.sql.text(""" + SELECT + res.repo_name, + res.release_id, + res.release_name, + res.release_description, + res.release_author, + res.release_created_at, + res.release_published_at, + res.release_updated_at, + res.release_is_draft, + res.release_is_prerelease, + res.release_tag_name, + res.release_url, + COUNT(res) + FROM ( + SELECT + releases.* + repo.repo_name + FROM + releases LEFT JOIN repo ON releases.repo_id = repo.repo_id + WHERE + repo.repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id ) + ) as res + GROUP BY releases.repo_id, releases.release_id + ORDER BY releases.release_published_at DESC + """) + + results = pd.read_sql(reviews_SQL, self.database, + params={'period': period, 'repo_group_id': repo_group_id, + 'begin_date': begin_date, 'end_date': end_date }) + return results + + else: + reviews_SQL = s.sql.text(""" + SELECT + repo.repo_name, + releases.release_id, + releases.release_name, + releases.release_description, + releases.release_author, + releases.release_created_at, + releases.release_published_at, + releases.release_updated_at, + releases.release_is_draft, + releases.release_is_prerelease, + releases.release_tag_name, + releases.release_url, + COUNT(releases) + FROM + releases LEFT JOIN repo ON releases.repo_id = repo.repo_id + GROUP BY repo.repo_id, releases.release_id + ORDER BY releases.release_published_at DESC + """) + + results = pd.read_sql(reviews_SQL, self.database, + params={'period': period, 'repo_id': repo_id, + 'begin_date': begin_date, 'end_date': end_date}) + return results + +def create_release_metrics(metrics): + add_metrics(metrics, __name__) \ No newline at end of file diff --git a/augur/metrics/repo_meta/repo_meta.py b/augur/metrics/repo_meta.py similarity index 83% rename from augur/metrics/repo_meta/repo_meta.py rename to augur/metrics/repo_meta.py --- a/augur/metrics/repo_meta/repo_meta.py +++ b/augur/metrics/repo_meta.py @@ -5,10 +5,14 @@ import datetime import sqlalchemy as s import pandas as pd -from augur.util import logger, annotate, add_metrics import math +import logging -@annotate(tag='code-changes') +from augur.util import register_metric + +logger = logging.getLogger("augur") + +@register_metric() def code_changes(self, repo_group_id, repo_id=None, period='week', begin_date=None, end_date=None): """ Returns a timeseries of the count of commits. @@ -70,7 +74,7 @@ def code_changes(self, repo_group_id, repo_id=None, period='week', begin_date=No results = results[(results['date'] >= begin_date) & (results['date'] <= end_date)] return results -@annotate(tag='code-changes-lines') +@register_metric() def code_changes_lines(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): """Returns a timeseries of code changes added and removed. @@ -128,7 +132,7 @@ def code_changes_lines(self, repo_group_id, repo_id=None, period='day', begin_da -@annotate(tag='sub-projects') +@register_metric() def sub_projects(self, repo_group_id, repo_id=None, begin_date=None, end_date=None): """ Returns number of sub-projects @@ -169,7 +173,7 @@ def sub_projects(self, repo_group_id, repo_id=None, begin_date=None, end_date=No -@annotate(tag='sbom-download') +@register_metric() def sbom_download(self, repo_group_id, repo_id=None): """REQUIRES SBOMS TO BE PRESENT IN THE DATABASE @@ -187,7 +191,7 @@ def sbom_download(self, repo_group_id, repo_id=None): return pd.read_sql(dosocs_SQL, self.database, params=params) #return [json.dumps(license_information)] -@annotate(tag='cii-best-practices-badge') +@register_metric() def cii_best_practices_badge(self, repo_group_id, repo_id=None): """Returns the CII best practices badge level @@ -217,7 +221,7 @@ def cii_best_practices_badge(self, repo_group_id, repo_id=None): return pd.DataFrame(result, index=[0]) -@annotate(tag='forks') +@register_metric() def forks(self, repo_group_id, repo_id=None): """ Returns a time series of the fork count @@ -257,7 +261,7 @@ def forks(self, repo_group_id, repo_id=None): results = pd.read_sql(forks_SQL, self.database, params={'repo_id': repo_id}) return results -@annotate(tag='fork-count') +@register_metric() def fork_count(self, repo_group_id, repo_id=None): """ Returns the latest fork count @@ -292,7 +296,7 @@ def fork_count(self, repo_group_id, repo_id=None): results = pd.read_sql(fork_count_SQL, self.database, params={'repo_id': repo_id}) return results -@annotate(tag='languages') +@register_metric() def languages(self, repo_group_id, repo_id=None): """Returns the implementation languages @@ -320,7 +324,7 @@ def languages(self, repo_group_id, repo_id=None): results = pd.read_sql(languages_SQL, self.database, params={'repo_id': repo_id}) return results -@annotate(tag='license-files') +@register_metric(type="license") def license_files(self, license_id, spdx_binary, repo_group_id, repo_id=None,): """Returns the files related to a license @@ -356,7 +360,7 @@ def license_files(self, license_id, spdx_binary, repo_group_id, repo_id=None,): results = pd.read_sql(license_data_SQL, self.spdx_db, params={'repo_id': repo_id, 'spdx_binary': spdx_binary, 'license_id': license_id}) return results -@annotate(tag='license-declared') +@register_metric() def license_declared(self, repo_group_id, repo_id=None): """Returns the declared license @@ -425,7 +429,7 @@ def license_declared(self, repo_group_id, repo_id=None): results = pd.read_sql(license_declared_SQL, self.spdx_db, params={'repo_id': repo_id}) return results -@annotate(tag='license-coverage') +@register_metric() def license_coverage(self, repo_group_id, repo_id=None): """Returns the declared license @@ -486,7 +490,7 @@ def license_coverage(self, repo_group_id, repo_id=None): return results -@annotate(tag='license-count') +@register_metric() def license_count(self, repo_group_id, repo_id=None): """Returns the declared license @@ -548,7 +552,7 @@ def license_count(self, repo_group_id, repo_id=None): return results -@annotate(tag='stars') +@register_metric() def stars(self, repo_group_id, repo_id=None): """ Returns a time series of the stars count @@ -588,7 +592,7 @@ def stars(self, repo_group_id, repo_id=None): results = pd.read_sql(stars_SQL, self.database, params={'repo_id': repo_id}) return results -@annotate(tag='stars-count') +@register_metric() def stars_count(self, repo_group_id, repo_id=None): """ Returns the latest stars count @@ -623,7 +627,7 @@ def stars_count(self, repo_group_id, repo_id=None): results = pd.read_sql(stars_count_SQL, self.database, params={'repo_id': repo_id}) return results -@annotate(tag='watchers') +@register_metric() def watchers(self, repo_group_id, repo_id=None): """ Returns a time series of the watchers count @@ -663,7 +667,7 @@ def watchers(self, repo_group_id, repo_id=None): results = pd.read_sql(watchers_SQL, self.database, params={'repo_id': repo_id}) return results -@annotate(tag='watchers-count') +@register_metric() def watchers_count(self, repo_group_id, repo_id=None): """ Returns the latest watchers count @@ -698,7 +702,7 @@ def watchers_count(self, repo_group_id, repo_id=None): results = pd.read_sql(watchers_count_SQL, self.database, params={'repo_id': repo_id}) return results -@annotate(tag='annual-lines-of-code-count-ranked-by-new-repo-in-repo-group') +@register_metric() def annual_lines_of_code_count_ranked_by_new_repo_in_repo_group(self, repo_group_id, repo_id = None, calendar_year=None): """ For each repository in a collection of repositories being managed, each REPO that first appears in the parameterized @@ -742,7 +746,7 @@ def annual_lines_of_code_count_ranked_by_new_repo_in_repo_group(self, repo_group "repo_id": repo_id, "calendar_year": calendar_year}) return results -@annotate(tag='annual-lines-of-code-count-ranked-by-repo-in-repo-group') +@register_metric() def annual_lines_of_code_count_ranked_by_repo_in_repo_group(self, repo_group_id, repo_id=None, timeframe=None): """ For each repository in a collection of repositories being managed, each REPO's total commits during the current Month, @@ -838,7 +842,7 @@ def annual_lines_of_code_count_ranked_by_repo_in_repo_group(self, repo_group_id, "repo_id": repo_id}) return results -@annotate(tag='lines-of-code-commit-counts-by-calendar-year-grouped') +@register_metric() def lines_of_code_commit_counts_by_calendar_year_grouped(self, repo_url, calendar_year=None, interval=None): """ For a single repository, all the commits and lines of code occuring for the specified year, grouped by the specified interval (week or month) @@ -890,8 +894,12 @@ def lines_of_code_commit_counts_by_calendar_year_grouped(self, repo_url, calenda results = pd.read_sql(cdRepTpIntervalLocCommitsSQL, self.database, params={"repourl": '%{}%'.format(repo_url), 'calendar_year': calendar_year}) return results -@annotate(tag='average-weekly-commits') -def average_weekly_commits(self, repo_group_id=None, repo_id=None, calendar_year=2019): +@register_metric() +def average_weekly_commits(self, repo_group_id=None, repo_id=None, calendar_year=None): + + if calendar_year == None: + calendar_year = datetime.datetime.now().strftime('%Y') + extra_and = "AND repo.repo_group_id = :repo_group_id" if repo_group_id and not repo_id else "AND repo.repo_id = :repo_id" if repo_group_id and repo_id else "" average_weekly_commits_sql = s.sql.text(""" SELECT repo.repo_id, repo.repo_name, year, sum(patches)/52 AS average_weekly_commits @@ -907,5 +915,153 @@ def average_weekly_commits(self, repo_group_id=None, repo_id=None, calendar_year "repo_id": repo_id, "calendar_year": calendar_year}) return results -def create_repo_meta_metrics(metrics): - add_metrics(metrics, __name__) +@register_metric() +def aggregate_summary(self, repo_group_id, repo_id=None, begin_date=None, end_date=None): + + if not begin_date: + begin_date = datetime.datetime.now() + # Subtract 1 year and leap year check + try: + begin_date = begin_date.replace(year=begin_date.year-1) + except ValueError: + begin_date = begin_date.replace(year=begin_date.year-1, day=begin_date.day-1) + begin_date = begin_date.strftime('%Y-%m-%d') + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + if not repo_id: + summarySQL = s.sql.text(""" + SELECT + ( + SELECT watchers_count AS watcher_count + FROM repo_info JOIN repo ON repo_info.repo_id = repo.repo_id + WHERE repo_group_id = :repo_group_id + ORDER BY last_updated DESC + LIMIT 1 + ) - ( + SELECT watchers_count AS watcher_count + FROM repo_info JOIN repo ON repo_info.repo_id = repo.repo_id + WHERE repo_group_id = :repo_group_id + AND last_updated >= date_trunc('day', NOW() - INTERVAL '1 year') + ORDER BY last_updated ASC + LIMIT 1 + ) AS watcher_count, + ( + SELECT stars_count AS stars_count + FROM repo_info JOIN repo ON repo_info.repo_id = repo.repo_id + WHERE repo_group_id = :repo_group_id + ORDER BY last_updated DESC + LIMIT 1 + ) - ( + SELECT stars_count AS stars_count + FROM repo_info JOIN repo ON repo_info.repo_id = repo.repo_id + WHERE repo_group_id = :repo_group_id + AND last_updated >= date_trunc('day', NOW() - INTERVAL '1 year') + ORDER BY last_updated ASC + LIMIT 1 + ) AS stars_count, + ( + SELECT fork_count AS fork_count + FROM repo_info JOIN repo ON repo_info.repo_id = repo.repo_id + WHERE repo_group_id = :repo_group_id + ORDER BY last_updated DESC + LIMIT 1 + ) - ( + SELECT fork_count AS fork_count + FROM repo_info JOIN repo ON repo_info.repo_id = repo.repo_id + WHERE repo_group_id = :repo_group_id + AND last_updated >= date_trunc('day', NOW() - INTERVAL '1 year') + ORDER BY last_updated ASC + LIMIT 1 + ) AS fork_count, + ( + SELECT count(*) AS merged_count + FROM ( + SELECT DISTINCT issue_events.issue_id + FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id JOIN repo ON issues.repo_id = repo.repo_id + WHERE action = 'merged' + AND repo_group_id = :repo_group_id + AND issue_events.created_at BETWEEN :begin_date AND :end_date + ) a + ) AS merged_count, + committer_count, commit_count FROM ( + SELECT count(cmt_author_name) AS committer_count, sum(commit_count) AS commit_count + FROM ( + SELECT DISTINCT cmt_author_name, COUNT(cmt_id) AS commit_count FROM commits JOIN repo ON commits.repo_id = repo.repo_id + WHERE repo_group_id = :repo_group_id + AND commits.cmt_committer_date BETWEEN :begin_date AND :end_date + GROUP BY cmt_author_name + ) temp + ) commit_data + """) + results = pd.read_sql(summarySQL, self.database, params={'repo_group_id': repo_group_id, + 'begin_date': begin_date, 'end_date': end_date}) + return results + else: + summarySQL = s.sql.text(""" + SELECT + ( + SELECT watchers_count AS watcher_count + FROM repo_info + WHERE repo_id = :repo_id + ORDER BY last_updated DESC + LIMIT 1 + ) - ( + SELECT watchers_count AS watcher_count + FROM repo_info + WHERE repo_id = :repo_id + AND last_updated >= date_trunc('day', NOW() - INTERVAL '1 year') + ORDER BY last_updated ASC + LIMIT 1 + ) AS watcher_count, + ( + SELECT stars_count AS stars_count + FROM repo_info + WHERE repo_id = :repo_id + ORDER BY last_updated DESC + LIMIT 1 + ) - ( + SELECT stars_count AS stars_count + FROM repo_info + WHERE repo_id = :repo_id + AND last_updated >= date_trunc('day', NOW() - INTERVAL '1 year') + ORDER BY last_updated ASC + LIMIT 1 + ) AS stars_count, + ( + SELECT fork_count AS fork_count + FROM repo_info + WHERE repo_id = :repo_id + ORDER BY last_updated DESC + LIMIT 1 + ) - ( + SELECT fork_count AS fork_count + FROM repo_info + WHERE repo_id = :repo_id + AND last_updated >= date_trunc('day', NOW() - INTERVAL '1 year') + ORDER BY last_updated ASC + LIMIT 1 + ) AS fork_count, + ( + SELECT count(*) AS merged_count + FROM ( + SELECT DISTINCT issue_events.issue_id + FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id + WHERE action = 'merged' + AND repo_id = :repo_id + AND issue_events.created_at BETWEEN :begin_date AND :end_date + ) a + ) AS merged_count, + committer_count, commit_count FROM ( + SELECT count(cmt_author_name) AS committer_count, sum(commit_count) AS commit_count + FROM ( + SELECT DISTINCT cmt_author_name, COUNT(cmt_id) AS commit_count FROM commits + WHERE repo_id = :repo_id + AND commits.cmt_committer_date BETWEEN :begin_date AND :end_date + GROUP BY cmt_author_name + ) temp + ) commit_data + """) + results = pd.read_sql(summarySQL, self.database, params={'repo_id': repo_id, + 'begin_date': begin_date, 'end_date': end_date}) + return results \ No newline at end of file diff --git a/augur/metrics/repo_meta/__init__.py b/augur/metrics/repo_meta/__init__.py deleted file mode 100644 --- a/augur/metrics/repo_meta/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .repo_meta import create_repo_meta_metrics - -from .routes import create_repo_meta_routes \ No newline at end of file diff --git a/augur/metrics/repo_meta/routes.py b/augur/metrics/repo_meta/routes.py deleted file mode 100644 --- a/augur/metrics/repo_meta/routes.py +++ /dev/null @@ -1,833 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_repo_meta_routes(server): - - metrics = server._augur.metrics - - """ - @api {get} /repo-groups/:repo_group_id/code-changes Code Changes (Repo Group) - @apiName code-changes-repo-group - @apiGroup Evolution - @apiDescription Time series of number of commits during a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Code_Changes.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21337, - "repo_name": "graphql-wg", - "date": "2018-01-01T00:00:00.000Z", - "commit_count": 173 - }, - { - "repo_id": 21337, - "repo_name": "graphql-wg", - "date": "2019-01-01T00:00:00.000Z", - "commit_count": 92 - }, - { - "repo_id": 21338, - "repo_name": "foundation", - "date": "2019-01-01T00:00:00.000Z", - "commit_count": 8 - } - ] - """ - server.addRepoGroupMetric(metrics.code_changes, 'code-changes') - - """ - @api {get} /repos/:repo_id/code-changes Code Changes (Repo) - @apiName code-changes-repo - @apiGroup Evolution - @apiDescription Time series number of commits during a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Code_Changes.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "graphql", - "date": "2015-01-01T00:00:00.000Z", - "commit_count": 90, - }, - { - "repo_name": "graphql", - "date": "2016-01-01T00:00:00.000Z", - "commit_count": 955, - } - ] - """ - server.addRepoMetric(metrics.code_changes, 'code-changes') - - """ - @api {get} /repo-groups/:repo_group_id/code-changes-lines Code Changes Lines (Repo Group) - @apiName code-changes-lines-repo-group - @apiGroup Evolution - @apiDescription Time series of lines added & removed during a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Code_Changes_Lines.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21337, - "repo_name": "graphql-wg", - "date": "2018-01-01T00:00:00.000Z", - "added": 1135, - "removed": 101 - }, - { - "repo_id": 21337, - "repo_name": "graphql-wg", - "date": "2019-01-01T00:00:00.000Z", - "added": 872, - "removed": 76 - }, - { - "repo_id": 21338, - "repo_name": "foundation", - "date": "2019-01-01T00:00:00.000Z", - "added": 130, - "removed": 5 - } - ] - """ - server.addRepoGroupMetric(metrics.code_changes_lines, 'code-changes-lines') - - """ - @api {get} /repos/:repo_id/code-changes-lines Code Changes Lines (Repo) - @apiName code-changes-lines-repo - @apiGroup Evolution - @apiDescription Time series of lines added & removed during a certain period. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/Code_Changes_Lines.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string=day, week, month, year} [period="day"] Periodicity specification. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "graphql-js", - "date": "2015-06-01T00:00:00.000Z", - "added": 17613, - "removed": 106 - }, - { - "repo_name": "graphql-js", - "date": "2015-07-01T00:00:00.000Z", - "added": 9448, - "removed": 5081 - }, - { - "repo_name": "graphql-js", - "date": "2015-08-01T00:00:00.000Z", - "added": 6270, - "removed": 3833 - } - ] - """ - server.addRepoMetric(metrics.code_changes_lines, 'code-changes-lines') - - # TODO: document this - server.addLicenseMetric(metrics.license_files, 'license-files') - - # TODO: document this - server.addRepoMetric(metrics.sbom_download, 'sbom-download') - - # @server.app.route('/{}/repo-groups/<repo_group_id>/code-changes'.format(server.api_version)) - # def code_changes_repo_group_route(repo_group_id): - # period = request.args.get('period', 'day') - # begin_date = request.args.get('begin_date') - # end_date = request.args.get('end_date') - - # kwargs = {'repo_group_id': repo_group_id, 'period': period, - # 'begin_date': begin_date, 'end_date': end_date} - - # data = server.transform(metrics.code_changes, - # args=[], - # kwargs=kwargs) - - # return Response(response=data, status=200, mimetype='application/json') - - # @server.app.route('/{}/repo-groups/<repo_group_id>/repo/<repo_id>/code-changes'.format(server.api_version)) - # def code_changes_repo_route(repo_group_id, repo_id): - # period = request.args.get('period', 'day') - # begin_date = request.args.get('begin_date') - # end_date = request.args.get('end_date') - - # kwargs = {'repo_group_id': repo_group_id, 'repo_id': repo_id, - # 'period': period, 'begin_date': begin_date, - # 'end_date': end_date} - - # data = server.transform(metrics.code_changes, - # args=[], - # kwargs=kwargs) - - # return Response(response=data, status=200, mimetype='application/json') - - """ - @api {get} /repo-groups/:repo_group_id/sub-projects Sub-Projects (Repo Group) - @apiName Sub-Projects(Repo Group) - @apiGroup Evolution - @apiDescription Number of sub-projects. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/sub-projects.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "sub_protject_count": 2 - } - ] - """ - server.addRepoGroupMetric( - metrics.sub_projects, 'sub-projects') - - """ - @api {get} /repos/:repo_id/sub-projects Sub-Projects (Repo) - @apiName Sub-Projects(Repo) - @apiGroup Evolution - @apiDescription Number of sub-projects. - <a href="https://github.com/chaoss/wg-evolution/blob/master/metrics/sub-projects.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "sub_protject_count": 2 - } - ] - """ - server.addRepoMetric( - metrics.sub_projects, 'sub-projects') - - """ - @api {get} /repo-groups/:repo_group_id/cii-best-practices-badge CII Best Practices Badge (Repo Group) - @apiName cii-best-practices-badge-repo-group - @apiGroup Risk - @apiDescription The CII Best Practices Badge level. - <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/security.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21252, - "repo_name": "php-legal-licenses", - "badge_level": "in_progress" - }, - { - "repo_id": 21277, - "repo_name": "trickster", - "badge_level": "passing" - } - ] - """ - server.addRepoGroupMetric(metrics.cii_best_practices_badge, 'cii-best-practices-badge') - - """ - @api {get} /repos/:repo_id/cii-best-practices-badge CII Best Practices Badge (Repo) - @apiName cii-best-practices-badge-repo - @apiGroup Risk - @apiDescription The CII Best Practices Badge level. - <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/security.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "trickster", - "badge_level": "passing" - } - ] - """ - server.addRepoMetric(metrics.cii_best_practices_badge, 'cii-best-practices-badge') - - """ - @api {get} /repo-groups/:repo_group_id/forks Forks (Repo Group) - @apiName forks-repo-group - @apiGroup Risk - @apiDescription A time series of fork count. - <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/business-risk.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21036, - "repo_name": "jquery-ujs", - "date": "2019-07-03T23:26:42.000Z", - "forks": 519 - }, - { - "repo_id": 21036, - "repo_name": "jquery-ujs", - "date": "2019-07-04T16:39:39.000Z", - "forks": 519 - }, - { - "repo_id": 21039, - "repo_name": "rails_xss", - "date": "2019-07-03T23:26:22.000Z", - "forks": 20 - }, - { - "repo_id": 21039, - "repo_name": "rails_xss", - "date": "2019-07-04T16:39:20.000Z", - "forks": 20 - } - ] - """ - server.addRepoGroupMetric(metrics.forks, 'forks') - - """ - @api {get} /repos/:repo_id/forks Forks (Repo) - @apiName forks-repo - @apiGroup Risk - @apiDescription A time series of fork count. - <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/business-risk.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "graphiql", - "date": "2019-07-03T23:27:42.000Z", - "forks": 843 - }, - { - "repo_name": "graphiql", - "date": "2019-07-04T16:40:44.000Z", - "forks": 844 - } - ] - """ - server.addRepoMetric(metrics.forks, 'forks') - - """ - @api {get} /repo-groups/:repo_group_id/fork-count Fork Count (Repo Group) - @apiName fork-count-repo-group - @apiGroup Risk - @apiDescription Fork count. - <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/business-risk.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21364, - "repo_name": "irs_process_scripts", - "forks": 4 - }, - { - "repo_id": 21420, - "repo_name": "ruby-coffee-script", - "forks": 54 - } - ] - """ - server.addRepoGroupMetric(metrics.fork_count, 'fork-count') - - """ - @api {get} /repos/:repo_id/fork-count Fork Count (Repo) - @apiName fork-count-repo - @apiGroup Risk - @apiDescription Fork count. - <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/business-risk.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "graphiql", - "forks": 844 - } - ] - """ - server.addRepoMetric(metrics.fork_count, 'fork-count') - - """ - @api {get} /repo-groups/:repo_group_id/languages Languages (Repo Group) - @apiName languages-repo-group - @apiGroup Risk - @apiDescription The primary language of the repository. - <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/security.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21277, - "primary_language": "Go" - }, - { - "repo_id": 21252, - "primary_language": "PHP" - } - ] - """ - server.addRepoGroupMetric(metrics.languages, 'languages') - - """ - @api {get} /repo-groups/:repo_group_id/languages Languages (Repo) - @apiName languages-repo - @apiGroup Risk - @apiDescription The primary language of the repository. - <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/security.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "primary_language":"PHP" - } - ] - """ - server.addRepoMetric(metrics.languages, 'languages') - - """ - @api {get} /repo-groups/:repo_group_id/license-count License Count (Repo Group) - @apiName license-count-repo-group - @apiGroup Risk - @apiDescription The declared software package license (fetched from CII Best Practices badging data). - <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/licensing.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "name": "ActorServiceRegistry", - "number_of_license": 2, - "file_without_licenses": true - }, - { - "name": "adyen-api", - "number_of_license": 1, - "file_without_licenses": true - }, - ] - """ - server.addRepoGroupMetric(metrics.license_count, 'license-count') - - """ - @api {get} /repo-groups/:repo_group_id/license-count License Count (Repo) - @apiName license-count-repo - @apiGroup Risk - @apiDescription The declared software package license (fetched from CII Best Practices badging data). - <a href="https://github.com/chaoss/wg-risk/blob/master/focus-areas/licensing.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - [ - { - "name": "zucchini", - "number_of_license": 2, - "file_without_licenses": true - } - ] - ] - """ - server.addRepoMetric(metrics.license_count, 'license-count') - - """ - @api {get} /repos/:repo_id/license-coverage License Coverage(Repo) - @apiName license-coverage-repo - @apiGroup Risk - @apiDescription Number of persons contributing with an accepted commit for the first time. - <a href="https://github.com/chaoss/wg-risk/blob/master/metrics/License_Coverage.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "zucchini", - "total_files": 95, - "license_declared_file": 33, - "coverage": 0.347 - } - ] - """ - server.addRepoMetric(metrics.license_coverage, 'license-coverage') - - """ - @api {get} /repo-groups/:repo_group_id/license-coverage License Coverage(Repo Group) - @apiName license-coverage-repo-group - @apiGroup Risk - @apiDescription Number of persons opening an issue for the first time. - <a href="https://github.com/chaoss/wg-risk/blob/master/metrics/License_Coverage.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "name": "ActorServiceRegistry", - "total_files": 51, - "license_declared_files": 19, - "coverage": 0.373 - }, - { - "name": "adyen-api", - "total_files": 92, - "license_declared_files": 55, - "coverage": 0.598 - } - ] - """ - - server.addRepoGroupMetric(metrics.license_coverage, 'license-coverage') - - """ - @api {get} /repos/:repo_id/license-declared License Declared(Repo) - @apiName license-declared-repo - @apiGroup Risk - @apiDescription Number of persons contributing with an accepted commit for the first time. - <a href="https://github.com/chaoss/wg-risk/blob/master/metrics/License_Coverage.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "name": "trickster", - "short_name": "Apache-2.0", - "note": "" - } - ] - """ - server.addRepoMetric(metrics.license_declared, 'license-declared') - - """ - @api {get} /repo-groups/:repo_group_id/license-declared License Declared(Repo Group) - @apiName license-declared-repo-group - @apiGroup Risk - @apiDescription Number of persons opening an issue for the first time. - <a href="https://github.com/chaoss/wg-risk/blob/master/metrics/License_Coverage.md">CHAOSS Metric Definition</a> - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "name": "trickster", - "short_name": "Apache-2.0", - "note": "" - }, - { - "name": "dialyzex", - "short_name": "Apache-2.0", - "note": "" - } - ] - """ - - server.addRepoGroupMetric(metrics.license_declared, 'license-declared') - - """ - @api {get} /repo-groups/:repo_group_id/stars Stars (Repo Group) - @apiName stars-repo-group - @apiGroup Value - @apiDescription A time series of stars count. - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21491, - "repo_name": "commons-io", - "date": "2019-07-03T23:23:36.000Z", - "stars": 600 - }, - { - "repo_id": 21491, - "repo_name": "commons-io", - "date": "2019-07-04T16:36:27.000Z", - "stars": 601 - }, - { - "repo_id": 21524, - "repo_name": "maven", - "date": "2019-07-03T23:21:14.000Z", - "stars": 1730 - }, - { - "repo_id": 21524, - "repo_name": "maven", - "date": "2019-07-04T16:34:04.000Z", - "stars": 1733 - } - ] - """ - server.addRepoGroupMetric(metrics.stars, 'stars') - - """ - @api {get} /repos/:repo_id/stars Stars (Repo) - @apiName stars-repo - @apiGroup Value - @apiDescription A time series of stars count. - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "graphiql", - "date": "2019-07-03T23:27:42.000Z", - "stars": 8652 - }, - { - "repo_name": "graphiql", - "date": "2019-07-04T16:40:44.000Z", - "stars": 8653 - } - ] - """ - server.addRepoMetric(metrics.stars, 'stars') - - """ - @api {get} /repo-groups/:repo_group_id/stars-count Stars Count (Repo Group) - @apiName stars-count-repo-group - @apiGroup Value - @apiDescription Stars count. - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21364, - "repo_name": "irs_process_scripts", - "stars": 20 - }, - { - "repo_id": 21420, - "repo_name": "ruby-coffee-script", - "stars": 19 - } - ] - """ - server.addRepoGroupMetric(metrics.stars_count, 'stars-count') - - """ - @api {get} /repos/:repo_id/stars-count Stars Count (Repo) - @apiName stars-count-repo - @apiGroup Value - @apiDescription Stars count. - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "graphiql", - "stars": 8653 - } - ] - """ - server.addRepoMetric(metrics.stars_count, 'stars-count') - - """ - @api {get} /repo-groups/:repo_group_id/watchers Watchers (Repo Group) - @apiName watchers-repo-group - @apiGroup Value - @apiDescription A time series of watchers count. - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21036, - "repo_name": "jquery-ujs", - "date": "2019-07-03T23:26:42.000Z", - "watchers": 60 - }, - { - "repo_id": 21036, - "repo_name": "jquery-ujs", - "date": "2019-07-04T16:39:39.000Z", - "watchers": 60 - }, - { - "repo_id": 21039, - "repo_name": "rails_xss", - "date": "2019-07-03T23:26:22.000Z", - "watchers": 19 - }, - { - "repo_id": 21039, - "repo_name": "rails_xss", - "date": "2019-07-04T16:39:20.000Z", - "watchers": 20 - } - ] - """ - server.addRepoGroupMetric(metrics.watchers, 'watchers') - - """ - @api {get} /repos/:repo_id/watchers Watchers (Repo) - @apiName watchers-repo - @apiGroup Value - @apiDescription A time series of watchers count. - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "airflow", - "date": "2019-07-03T23:22:26.000Z", - "watchers": 649 - }, - { - "repo_name": "airflow", - "date": "2019-07-04T16:35:16.000Z", - "watchers": 647 - } - ] - """ - server.addRepoMetric(metrics.watchers, 'watchers') - - """ - @api {get} /repo-groups/:repo_group_id/watchers-count Watchers Count (Repo Group) - @apiName watchers-count-repo-group - @apiGroup Value - @apiDescription Watchers count. - @apiParam {string} repo_group_id Repository Group ID - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21039, - "repo_name": "rails_xss", - "watchers": 20 - }, - { - "repo_id": 21036, - "repo_name": "jquery-ujs", - "watchers": 60 - } - ] - """ - server.addRepoGroupMetric(metrics.watchers_count, 'watchers-count') - - """ - @api {get} /repos/:repo_id/watchers-count watchers Count (Repo) - @apiName watchers-count-repo - @apiGroup Value - @apiDescription Watchers count. - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_name": "airflow", - "watchers": 649 - } - ] - """ - server.addRepoMetric(metrics.watchers_count, 'watchers-count') - - """ - @api {get} /repo-groups/:repo_group_id/annual-lines-of-code-count-ranked-by-new-repo-in-repo-group Annual Lines of Code Ranked by New Repo in Repo Group(Repo Group) - @apiName annual-lines-of-code-count-ranked-by-new-repo-in-repo-group - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. Source: Git Repository - @apiParam {String} repo_url_base Base64 version of the URL of the GitHub repository as it appears in the Facade DB - @apiSuccessExample {json} Success-Response: - [ - { - "repos_id": 1, - "net": 2479124, - "patches": 1, - "repo_name": "twemoji" - }, - { - "repos_id": 63, - "net": 2477911, - "patches": 1, - "repo_name": "twemoji-1" - } - ] - """ - server.addRepoGroupMetric(metrics.annual_lines_of_code_count_ranked_by_new_repo_in_repo_group,'annual-lines-of-code-count-ranked-by-new-repo-in-repo-group') - - """ - @api {get} /repo-groups/:repo_group_id/annual-lines-of-code-count-ranked-by-new-repo-in-repo-group Annual Lines of Code Ranked by New Repo in Repo Group(Repo) - @apiName annual-lines-of-code-count-ranked-by-new-repo-in-repo-group - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. Source: Git Repository - @apiParam {String} repo_url_base Base64 version of the URL of the GitHub repository as it appears in the Facade DB - @apiSuccessExample {json} Success-Response: - [ - { - "repos_id": 1, - "net": 2479124, - "patches": 1, - "repo_name": "twemoji" - }, - { - "repos_id": 63, - "net": 2477911, - "patches": 1, - "repo_name": "twemoji-1" - } - ] - """ - server.addRepoMetric(metrics.annual_lines_of_code_count_ranked_by_new_repo_in_repo_group,'annual-lines-of-code-count-ranked-by-new-repo-in-repo-group') - - """ - @api {get} /repo-groups/:repo_group_id/annual-lines-of-code-count-ranked-by-repo-in-repo-group Annual Lines of Code Ranked by Repo in Repo Group(Repo Group) - @apiName annual-lines-of-code-count-ranked-by-repo-in-repo-group - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. Source: Git Repository - @apiParam {String} repo_url_base Base64 version of the URL of the GitHub repository as it appears in the Facade DB - @apiSuccessExample {json} Success-Response: - [ - { - "repos_id": 1, - "net": 2479124, - "patches": 1, - "repo_name": "twemoji" - }, - { - "repos_id": 63, - "net": 2477911, - "patches": 1, - "repo_name": "twemoji-1" - } - ] - """ - server.addRepoGroupMetric(metrics.annual_lines_of_code_count_ranked_by_repo_in_repo_group,'annual-lines-of-code-count-ranked-by-repo-in-repo-group') - - """ - @api {get} /repo-groups/:repo_group_id/annual-lines-of-code-count-ranked-by-repo-in-repo-group Annual Lines of Code Ranked by Repo in Repo Group(Repo) - @apiName annual-lines-of-code-count-ranked-by-repo-in-repo-group - @apiGroup Experimental - @apiDescription This is an Augur-specific metric. We are currently working to define these more formally. Source: Git Repository - @apiParam {String} repo_url_base Base64 version of the URL of the GitHub repository as it appears in the Facade DB - @apiSuccessExample {json} Success-Response: - [ - { - "repos_id": 1, - "net": 2479124, - "patches": 1, - "name": "twemoji" - }, - { - "repos_id": 63, - "net": 2477911, - "patches": 1, - "name": "twemoji-1" - } - ] - """ - server.addRepoMetric(metrics.annual_lines_of_code_count_ranked_by_repo_in_repo_group,'annual-lines-of-code-count-ranked-by-repo-in-repo-group') - - # TODO: document this - server.addRepoMetric(metrics.lines_of_code_commit_counts_by_calendar_year_grouped,'lines-of-code-commit-counts-by-calendar-year-grouped') - - # TODO: document this - server.addMetric(metrics.average_weekly_commits, 'average-weekly-commits') - - # TODO: document this - server.addRepoMetric(metrics.average_weekly_commits, 'average-weekly-commits') - - # TODO: document this - server.addRepoGroupMetric(metrics.average_weekly_commits, 'average-weekly-commits') - diff --git a/augur/metrics/util/__init__.py b/augur/metrics/util/__init__.py deleted file mode 100644 --- a/augur/metrics/util/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .util import create_util_metrics - -from .routes import create_util_routes \ No newline at end of file diff --git a/augur/metrics/util/routes.py b/augur/metrics/util/routes.py deleted file mode 100644 --- a/augur/metrics/util/routes.py +++ /dev/null @@ -1,283 +0,0 @@ -from flask import Response - -def create_util_routes(server): - - metrics = server._augur.metrics - - """ - @api {get} /repo-groups Repo Groups - @apiName repo-groups - @apiGroup Utility - @apiDescription Get all the downloaded repo groups. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_group_id": 20, - "rg_name": "Rails", - "rg_description": "Rails Ecosystem.", - "rg_website": "", - "rg_recache": 0, - "rg_last_modified": "2019-06-03T15:55:20.000Z", - "rg_type": "GitHub Organization", - "tool_source": "load", - "tool_version": "one", - "data_source": "git", - "data_collection_date": "2019-06-05T13:36:25.000Z" - }, - { - "repo_group_id": 23, - "rg_name": "Netflix", - "rg_description": "Netflix Ecosystem.", - "rg_website": "", - "rg_recache": 0, - "rg_last_modified": "2019-06-03T15:55:20.000Z", - "rg_type": "GitHub Organization", - "tool_source": "load", - "tool_version": "one", - "data_source": "git", - "data_collection_date": "2019-06-05T13:36:36.000Z" - } - ] - """ - @server.app.route('/{}/repo-groups'.format(server.api_version)) - def get_repo_groups(): #TODO: make this name automatic - wrapper? - drs = server.transform(metrics.repo_groups) - return Response(response=drs, - status=200, - mimetype="application/json") - server.updateMetricMetadata(function=metrics.repo_groups, endpoint='/{}/repo-groups'.format(server.api_version), metric_type='git') - - """ - @api {get} /repos Repos - @apiName repos - @apiGroup Utility - @apiDescription Get all the downloaded repos. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21996, - "repo_name": "incubator-argus", - "description": null, - "url": "github.com\/apache\/incubator-argus.git", - "repo_status": "Update", - "commits_all_time": null, - "issues_all_time": null, - "rg_name": "Apache", - "base64_url": "Z2l0aHViLmNvbS9hcGFjaGUvaW5jdWJhdG9yLWFyZ3VzLmdpdA==" - }, - { - "repo_id": 21729, - "repo_name": "tomee-site", - "description": null, - "url": "github.com\/apache\/tomee-site.git", - "repo_status": "Complete", - "commits_all_time": 224216, - "issues_all_time": 2, - "rg_name": "Apache", - "base64_url": "Z2l0aHViLmNvbS9hcGFjaGUvdG9tZWUtc2l0ZS5naXQ=" - } - ] - """ - @server.app.route('/{}/repos'.format(server.api_version)) - def downloaded_repos(): - drs = server.transform(metrics.downloaded_repos) - return Response(response=drs, - status=200, - mimetype="application/json") - server.updateMetricMetadata(function=metrics.downloaded_repos, endpoint='/{}/repos'.format(server.api_version), metric_type='git') - - """ - @api {get} /repo-groups/:repo_group_id/repos Repos in Repo Group - @apiName repos-in-repo-groups - @apiGroup Utility - @apiDescription Get all the repos in a repo group. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21326, - "repo_name": "graphql-js", - "description": null, - "url": "https:\/\/github.com\/graphql\/graphql-js.git", - "repo_status": "Complete", - "commits_all_time": 6874, - "issues_all_time": 81 - }, - { - "repo_id": 21331, - "repo_name": "graphiql", - "description": null, - "url": "https:\/\/github.com\/graphql\/graphiql.git", - "repo_status": "Complete", - "commits_all_time": 4772, - "issues_all_time": 144 - } - ] - """ - server.addRepoGroupMetric(metrics.repos_in_repo_groups, 'repos') - - """ - @api {get} /owner/:owner/repo/:repo Get Repo - @apiName get-repo - @apiGroup Utility - @apiDescription Get the `repo_group_id` & `repo_id` of a particular repo. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21339, - "repo_group_id": 23 - }, - { - "repo_id": 21000, - "repo_group_id": 20 - } - ] - """ - @server.app.route('/{}/owner/<owner>/name/<repo>'.format(server.api_version)) - def get_repo_by_git_name(owner, repo): - a = [owner, repo] - gre = server.transform(metrics.get_repo_by_git_name, args = a) - return Response(response=gre, - status=200, - mimetype="application/json") - - """ - @api {get} /rg-name/:rg_name/repo-name/:repo_name Get Repo - @apiName get-repo - @apiGroup Utility - @apiDescription Get the `repo_group_id` & `repo_id` of a particular repo. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_id": 21000, - "repo_group_id": 20, - "repo_git":"https://github.com/rails/rails.git" - }, - ] - """ - @server.app.route('/{}/rg-name/<rg_name>/repo-name/<repo_name>'.format(server.api_version)) - def get_repo_by_name(rg_name, repo_name): - arg = [rg_name, repo_name] - gre = server.transform(metrics.get_repo_by_name, args=arg) - return Response(response=gre, - status=200, - mimetype="application/json") - - """ - @api {get} /rg-names/:rg_name Get Repo - @apiName get-repo - @apiGroup Utility - @apiDescription Get the `repo_id` of a particular repo group. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_group_id": 20, - "rg_name": 'Rails' - }, - ] - """ - @server.app.route('/{}/rg-name/<rg_name>'.format(server.api_version)) - def get_group_by_name(rg_name): - arg = [rg_name] - res = server.transform(metrics.get_group_by_name, args=arg) - return Response(response=res, - status=200, - mimetype="application/json") - - - @server.app.route('/{}/dosocs/repos'.format(server.api_version)) - def get_repos_for_dosocs(): - res = server.transform(metrics.get_repos_for_dosocs) - return Response(response=res, - status=200, - mimetype='application/json') - - server.addRepoGroupMetric(metrics.get_issues, 'get-issues') - server.addRepoMetric(metrics.get_issues, 'get-issues') - - """ - @api {get} /top-insights Top Insights - @apiName top-insights - @apiGroup Utility - @apiDescription Get all the downloaded repo groups. - @apiSuccessExample {json} Success-Response: - [ - { - "repo_group_id": 20, - "rg_name": "Rails", - "rg_description": "Rails Ecosystem.", - "rg_website": "", - "rg_recache": 0, - "rg_last_modified": "2019-06-03T15:55:20.000Z", - "rg_type": "GitHub Organization", - "tool_source": "load", - "tool_version": "one", - "data_source": "git", - "data_collection_date": "2019-06-05T13:36:25.000Z" - }, - { - "repo_group_id": 23, - "rg_name": "Netflix", - "rg_description": "Netflix Ecosystem.", - "rg_website": "", - "rg_recache": 0, - "rg_last_modified": "2019-06-03T15:55:20.000Z", - "rg_type": "GitHub Organization", - "tool_source": "load", - "tool_version": "one", - "data_source": "git", - "data_collection_date": "2019-06-05T13:36:36.000Z" - } - ] - """ - # @server.app.route('/{}/top-insights'.format(server.api_version)) - # def top_insights(): #TODO: make this name automatic - wrapper? - # tis = server.transform(metrics.top_insights) - # return Response(response=tis, - # status=200, - # mimetype="application/json") - # server.updateMetricMetadata(function=metrics.top_insights, endpoint='/{}/top-insights'.format(server.api_version), metric_type='git') - - """ - @api {get} /repo-groups/:repo_group_id/aggregate-summary Aggregate Summary (Repo Group) - @apiName aggregate-summary-repo-group - @apiGroup Experimental - @apiDescription Returns the current count of watchers, stars, and forks and the counts of all commits, committers, and pull requests merged between a given beginning and end date (default between now and 365 days ago). - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "watcher_count": 69106, - "star_count": 460447, - "fork_count": 226841, - "merged_count": 3883, - "committer_count": 8553, - "commit_count": 7890143 - } - ] - """ - server.addRepoGroupMetric(metrics.aggregate_summary, 'aggregate-summary') - - """ - @api {get} /repos/:repo_id/aggregate-summary Aggregate Summary (Repo) - @apiName aggregate-summary-repo - @apiGroup Experimental - @apiDescription Returns the current count of watchers, stars, and forks and the counts of all commits, committers, and pull requests merged between a given beginning and end date (default between now and 365 days ago). - @apiParam {string} repo_group_id Repository Group ID. - @apiParam {string} repo_id Repository ID. - @apiParam {string} [begin_date="1970-1-1 0:0:0"] Beginning date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiParam {string} [end_date="current date"] Ending date specification. E.g. values: `2018`, `2018-05`, `2019-05-01` - @apiSuccessExample {json} Success-Response: - [ - { - "watcher_count": 83, - "star_count": 581, - "fork_count": 449, - "merged_count": 0, - "committer_count": 5, - "commit_count": 133 - } - ] - """ - server.addRepoMetric(metrics.aggregate_summary, 'aggregate-summary') \ No newline at end of file diff --git a/augur/metrics/util/util.py b/augur/metrics/util/util.py deleted file mode 100644 --- a/augur/metrics/util/util.py +++ /dev/null @@ -1,364 +0,0 @@ -""" -Directory, which is for utility/non-metric endpoints. -""" - -import datetime -import base64 -import sqlalchemy as s -import pandas as pd -from augur.util import annotate, add_metrics - -@annotate(tag='repo-groups') -def repo_groups(self): - """ - Returns number of lines changed per author per day - - :param repo_url: the repository's URL - """ - repoGroupsSQL = s.sql.text(""" - SELECT * - FROM repo_groups - ORDER BY rg_name - """) - results = pd.read_sql(repoGroupsSQL, self.database) - return results - -@annotate(tag='downloaded-repos') -def downloaded_repos(self): - """ - Returns all repository names, URLs, and base64 URLs in the facade database - """ - downloadedReposSQL = s.sql.text(""" - SELECT - repo.repo_id, - repo.repo_name, - repo.description, - repo.repo_git AS url, - repo.repo_status, - a.commits_all_time, - b.issues_all_time , - rg_name, - repo.repo_group_id - FROM - repo - left outer join - (select repo_id, COUNT ( commits.cmt_id ) AS commits_all_time from commits group by repo_id ) a on - repo.repo_id = a.repo_id - left outer join - (select repo_id, count ( * ) as issues_all_time from issues where issues.pull_request IS NULL group by repo_id) b - on - repo.repo_id = b.repo_id - JOIN repo_groups ON repo_groups.repo_group_id = repo.repo_group_id - order by repo_name - """) - results = pd.read_sql(downloadedReposSQL, self.database) - results['url'] = results['url'].apply(lambda datum: datum.split('//')[1]) - # if self.projects: - # results = results[results.project_name.isin(self.projects)] - if self.projects: - results = results[results.project_name.isin(self.projects)] - - b64_urls = [] - for i in results.index: - b64_urls.append(base64.b64encode((results.at[i, 'url']).encode())) - results['base64_url'] = b64_urls - - return results - -@annotate(tag='repos-in-repo-groups') -def repos_in_repo_groups(self, repo_group_id): - """ - Returns a list of all the repos in a repo_group - - :param repo_group_id: The repository's repo_group_id - """ - repos_in_repo_groups_SQL = s.sql.text(""" - SELECT - repo.repo_id, - repo.repo_name, - repo.description, - repo.repo_git AS url, - repo.repo_status, - a.commits_all_time, - b.issues_all_time - FROM - repo - left outer join - (select repo_id, COUNT ( commits.cmt_id ) AS commits_all_time from commits group by repo_id ) a on - repo.repo_id = a.repo_id - left outer join - (select repo_id, count ( issues.issue_id) as issues_all_time from issues where issues.pull_request IS NULL group by repo_id) b - on - repo.repo_id = b.repo_id - JOIN repo_groups ON repo_groups.repo_group_id = repo.repo_group_id - WHERE - repo_groups.repo_group_id = :repo_group_id - ORDER BY repo.repo_git - """) - - results = pd.read_sql(repos_in_repo_groups_SQL, self.database, params={'repo_group_id': repo_group_id}) - return results - -@annotate(tag='get-repo-by-git-name') -def get_repo_by_git_name(self, owner, repo): - """ - Returns repo id and repo group id by owner and repo - - :param owner: the owner of the repo - :param repo: the name of the repo - """ - getRepoSQL = s.sql.text(""" - SELECT repo.repo_id, repo.repo_group_id, rg_name - FROM repo JOIN repo_groups ON repo_groups.repo_group_id = repo.repo_group_id - WHERE repo_name = :repo AND repo_path LIKE :owner - GROUP BY repo_id, rg_name - """) - - results = pd.read_sql(getRepoSQL, self.database, params={'owner': '%{}_'.format(owner), 'repo': repo,}) - - return results - -@annotate(tag='get-repo-by-name') -def get_repo_by_name(self, rg_name, repo_name): - """ - Returns repo id and repo group id by rg_name and repo_name - - :param rg_name: the repo group of the repo - :param repo_name: the name of the repo - """ - - repoSQL = s.sql.text(""" - SELECT repo_id, repo.repo_group_id, repo_git as url - FROM repo, repo_groups - WHERE repo.repo_group_id = repo_groups.repo_group_id - AND LOWER(rg_name) = LOWER(:rg_name) - AND LOWER(repo_name) = LOWER(:repo_name) - """) - results = pd.read_sql(repoSQL, self.database, params={'rg_name': rg_name, 'repo_name': repo_name}) - results['url'] = results['url'].apply(lambda datum: datum.split('//')[1]) - return results - -def get_group_by_name(self, rg_name): - """ - Returns repo group id by repo group name - - :param rg_name: - """ - groupSQL = s.sql.text(""" - SELECT repo_group_id, rg_name - FROM repo_groups - WHERE lower(rg_name) = lower(:rg_name) - """) - results = pd.read_sql(groupSQL, self.database, params={'rg_name': rg_name}) - return results - -@annotate(tag='dosocs-repos') -def get_repos_for_dosocs(self): - """ Returns a list of repos along with their repo_id & path """ - get_repos_for_dosocs_SQL = s.sql.text(""" - SELECT b.repo_id, CONCAT(a.value || b.repo_group_id || chr(47) || b.repo_path || b.repo_name) AS path - FROM settings a, repo b - WHERE a.setting='repo_directory' - """) - - results = pd.read_sql(get_repos_for_dosocs_SQL, self.database) - return results - -@annotate(tag="get-issues") -def get_issues(self, repo_group_id, repo_id=None): - if not repo_id: - issuesSQL = s.sql.text(""" - SELECT issue_title, - issues.issue_id, - issues.repo_id, - issues.html_url, - issue_state AS STATUS, - issues.created_at AS DATE, - count(issue_events.event_id), - MAX(issue_events.created_at) AS LAST_EVENT_DATE, - EXTRACT(DAY FROM NOW() - issues.created_at) AS OPEN_DAY - FROM issues, - issue_events - WHERE issues.repo_id IN (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) - AND issues.issue_id = issue_events.issue_id - AND issues.pull_request is NULL - GROUP BY issues.issue_id - ORDER by OPEN_DAY DESC - """) - results = pd.read_sql(issuesSQL, self.database, params={'repo_group_id': repo_group_id}) - return results - else: - issuesSQL = s.sql.text(""" - SELECT issue_title, - issues.issue_id, - issues.repo_id, - issues.html_url, - issue_state AS STATUS, - issues.created_at AS DATE, - count(issue_events.event_id), - MAX(issue_events.created_at) AS LAST_EVENT_DATE, - EXTRACT(DAY FROM NOW() - issues.created_at) AS OPEN_DAY, - repo_name - FROM issues JOIN repo ON issues.repo_id = repo.repo_id, issue_events - WHERE issues.repo_id = :repo_id - AND issues.pull_request IS NULL - AND issues.issue_id = issue_events.issue_id - GROUP BY issues.issue_id, repo_name - ORDER by OPEN_DAY DESC - """) - results = pd.read_sql(issuesSQL, self.database, params={'repo_id': repo_id}) - return results - -@annotate(tag="aggregate-summary") -def aggregate_summary(self, repo_group_id, repo_id=None, begin_date=None, end_date=None): - - if not begin_date: - begin_date = datetime.datetime.now() - # Subtract 1 year and leap year check - try: - begin_date = begin_date.replace(year=begin_date.year-1) - except ValueError: - begin_date = begin_date.replace(year=begin_date.year-1, day=begin_date.day-1) - begin_date = begin_date.strftime('%Y-%m-%d') - if not end_date: - end_date = datetime.datetime.now().strftime('%Y-%m-%d') - - if not repo_id: - summarySQL = s.sql.text(""" - SELECT - ( - SELECT watchers_count AS watcher_count - FROM repo_info JOIN repo ON repo_info.repo_id = repo.repo_id - WHERE repo_group_id = :repo_group_id - ORDER BY last_updated DESC - LIMIT 1 - ) - ( - SELECT watchers_count AS watcher_count - FROM repo_info JOIN repo ON repo_info.repo_id = repo.repo_id - WHERE repo_group_id = :repo_group_id - AND last_updated >= date_trunc('day', NOW() - INTERVAL '1 year') - ORDER BY last_updated ASC - LIMIT 1 - ) AS watcher_count, - ( - SELECT stars_count AS stars_count - FROM repo_info JOIN repo ON repo_info.repo_id = repo.repo_id - WHERE repo_group_id = :repo_group_id - ORDER BY last_updated DESC - LIMIT 1 - ) - ( - SELECT stars_count AS stars_count - FROM repo_info JOIN repo ON repo_info.repo_id = repo.repo_id - WHERE repo_group_id = :repo_group_id - AND last_updated >= date_trunc('day', NOW() - INTERVAL '1 year') - ORDER BY last_updated ASC - LIMIT 1 - ) AS stars_count, - ( - SELECT fork_count AS fork_count - FROM repo_info JOIN repo ON repo_info.repo_id = repo.repo_id - WHERE repo_group_id = :repo_group_id - ORDER BY last_updated DESC - LIMIT 1 - ) - ( - SELECT fork_count AS fork_count - FROM repo_info JOIN repo ON repo_info.repo_id = repo.repo_id - WHERE repo_group_id = :repo_group_id - AND last_updated >= date_trunc('day', NOW() - INTERVAL '1 year') - ORDER BY last_updated ASC - LIMIT 1 - ) AS fork_count, - ( - SELECT count(*) AS merged_count - FROM ( - SELECT DISTINCT issue_events.issue_id - FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id JOIN repo ON issues.repo_id = repo.repo_id - WHERE action = 'merged' - AND repo_group_id = :repo_group_id - AND issue_events.created_at BETWEEN :begin_date AND :end_date - ) a - ) AS merged_count, - committer_count, commit_count FROM ( - SELECT count(cmt_author_name) AS committer_count, sum(commit_count) AS commit_count - FROM ( - SELECT DISTINCT cmt_author_name, COUNT(cmt_id) AS commit_count FROM commits JOIN repo ON commits.repo_id = repo.repo_id - WHERE repo_group_id = :repo_group_id - AND commits.cmt_committer_date BETWEEN :begin_date AND :end_date - GROUP BY cmt_author_name - ) temp - ) commit_data - """) - results = pd.read_sql(summarySQL, self.database, params={'repo_group_id': repo_group_id, - 'begin_date': begin_date, 'end_date': end_date}) - return results - else: - summarySQL = s.sql.text(""" - SELECT - ( - SELECT watchers_count AS watcher_count - FROM repo_info - WHERE repo_id = :repo_id - ORDER BY last_updated DESC - LIMIT 1 - ) - ( - SELECT watchers_count AS watcher_count - FROM repo_info - WHERE repo_id = :repo_id - AND last_updated >= date_trunc('day', NOW() - INTERVAL '1 year') - ORDER BY last_updated ASC - LIMIT 1 - ) AS watcher_count, - ( - SELECT stars_count AS stars_count - FROM repo_info - WHERE repo_id = :repo_id - ORDER BY last_updated DESC - LIMIT 1 - ) - ( - SELECT stars_count AS stars_count - FROM repo_info - WHERE repo_id = :repo_id - AND last_updated >= date_trunc('day', NOW() - INTERVAL '1 year') - ORDER BY last_updated ASC - LIMIT 1 - ) AS stars_count, - ( - SELECT fork_count AS fork_count - FROM repo_info - WHERE repo_id = :repo_id - ORDER BY last_updated DESC - LIMIT 1 - ) - ( - SELECT fork_count AS fork_count - FROM repo_info - WHERE repo_id = :repo_id - AND last_updated >= date_trunc('day', NOW() - INTERVAL '1 year') - ORDER BY last_updated ASC - LIMIT 1 - ) AS fork_count, - ( - SELECT count(*) AS merged_count - FROM ( - SELECT DISTINCT issue_events.issue_id - FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id - WHERE action = 'merged' - AND repo_id = :repo_id - AND issue_events.created_at BETWEEN :begin_date AND :end_date - ) a - ) AS merged_count, - committer_count, commit_count FROM ( - SELECT count(cmt_author_name) AS committer_count, sum(commit_count) AS commit_count - FROM ( - SELECT DISTINCT cmt_author_name, COUNT(cmt_id) AS commit_count FROM commits - WHERE repo_id = :repo_id - AND commits.cmt_committer_date BETWEEN :begin_date AND :end_date - GROUP BY cmt_author_name - ) temp - ) commit_data - """) - results = pd.read_sql(summarySQL, self.database, params={'repo_id': repo_id, - 'begin_date': begin_date, 'end_date': end_date}) - return results - -def create_util_metrics(metrics): - add_metrics(metrics, __name__) diff --git a/augur/models/__init__.py b/augur/models/__init__.py deleted file mode 100644 --- a/augur/models/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from sqlalchemy.orm import sessionmaker -from .user import User -from .repo import Repo, RepoGroup - - -__all__ = ['User', 'RepoGroup', 'Repo'] \ No newline at end of file diff --git a/augur/models/common.py b/augur/models/common.py deleted file mode 100644 --- a/augur/models/common.py +++ /dev/null @@ -1,2 +0,0 @@ -from sqlalchemy.ext.declarative import declarative_base -Base = declarative_base() \ No newline at end of file diff --git a/augur/models/repo.py b/augur/models/repo.py deleted file mode 100644 --- a/augur/models/repo.py +++ /dev/null @@ -1,48 +0,0 @@ -import datetime -from sqlalchemy import Table, ForeignKey, Column, Integer, String, DateTime -from sqlalchemy.orm import relationship -from .common import Base -from .user import user_has_repo_group - -repo_group_has_project = Table('repo_group_has_project', - Base.metadata, - Column('repo_group_id', ForeignKey('repo_group.id'), primary_key=True), - Column('repo_id', ForeignKey('repo.url'), primary_key=True), -) - -class Repo(Base): - """ - The Repo object models a VCS repository - """ - __tablename__ = 'repo' - - # Keys - url = Column(String(1024), primary_key=True) - vcs = Column(String(64), default='git') - - # Fields - created_at = Column(DateTime, default=datetime.datetime.utcnow) - - # Foreign Keys - repo_groups_member_of = relationship('RepoGroup', secondary=repo_group_has_project, back_populates='projects') - - def __repr__(self): - return f"<Repo(giturl='{self.password}')>" - - -class RepoGroup(Base): - """ - The RepoGroup class models lists of projects that a user wants to keep track of - """ - __tablename__ = 'repo_group' - - # Keys - id = Column(Integer, primary_key=True) - name = Column(String(128)) - - # Fields - created_at = Column(DateTime, default=datetime.datetime.utcnow) - - # Foreign Keys - projects = relationship('Repo', secondary=repo_group_has_project, back_populates='repo_groups_member_of') - users_of = relationship('User', secondary=user_has_repo_group, back_populates='repo_groups') \ No newline at end of file diff --git a/augur/models/user.py b/augur/models/user.py deleted file mode 100644 --- a/augur/models/user.py +++ /dev/null @@ -1,61 +0,0 @@ -import datetime -from sqlalchemy import Table, ForeignKey, Column, Integer, String, DateTime, Boolean -from sqlalchemy.orm import relationship -from sqlalchemy.ext.hybrid import hybrid_property -from .common import Base -from werkzeug.security import generate_password_hash, check_password_hash -from flask_login import UserMixin - -user_has_repo_group = Table('user_has_repo_group', - Base.metadata, - Column('user_id', ForeignKey('user.id'), primary_key=True), - Column('repo_group_id', ForeignKey('repo_group.id'), primary_key=True), -) - -class User(Base): - """ - The User object models users in the database. - """ - __tablename__ = 'user' - - # Keys - id = Column(Integer, primary_key=True) - username = Column(String(64), unique=True, nullable=False) - email = Column(String(64), unique=True, nullable=False) - - # Fields - password_hash = Column(String(128)) - email_confirmation_token = Column(String(128), nullable=True) - created_at = Column(DateTime, default=datetime.datetime.utcnow) - password_updated_at = Column(DateTime, default=datetime.datetime.utcnow) - last_login_at = Column(DateTime, nullable=True) - authenticated = Column(Boolean, default=False) - active = Column(Boolean, default=True) - administrator = Column(Boolean, default=False) - - # Foreign Keys - repo_groups = relationship('RepoGroup', secondary=user_has_repo_group, back_populates='users_of') - - def get_id(self): - return self.id - - def __repr__(self): - return f"<User(username='{self.username}', email='{self.email}')>" - - @hybrid_property - def password(self): - return self.password_hash - - @password.setter - def password(self, password): - self.password_hash = generate_password_hash(password) - - def check_password(self, password): - return check_password_hash(self.password_hash, password) - - def is_authenticated(self): - return self.authenticated - - def is_active(self): - # False as we do not support annonymity - return self.active diff --git a/augur/plugins/example_plugin/__init__.py b/augur/plugins/example_plugin/__init__.py deleted file mode 100644 --- a/augur/plugins/example_plugin/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# SPDX-License-Identifier: MIT -from augur.augurplugin import AugurPlugin -from augur.application import Application - -class ExamplePlugin(AugurPlugin): - """ - This plugin serves as an example as to how to load plugins into Augur - """ - def __init__(self, augur_app): - super().__init__(augur_app) - - def __call__(self): - from .example_datasource import ExampleDatasource - return ExampleDatasource() - - def add_routes(self, flask_app): - """ - Responsible for adding this plugin's data sources to the API - """ - pass - -ExamplePlugin.augur_plugin_meta = { - 'name': 'example_plugin', - 'datasource': True -} -Application.register_plugin(ExamplePlugin) - -__all__ = ['ExamplePlugin'] \ No newline at end of file diff --git a/augur/plugins/example_plugin/example_datasource.py b/augur/plugins/example_plugin/example_datasource.py deleted file mode 100644 --- a/augur/plugins/example_plugin/example_datasource.py +++ /dev/null @@ -1,7 +0,0 @@ -class ExampleDatasource: - def __init__(self): - self.counter = 0 - - def hello_world(self, add=1): - self.counter += add - return {'counter': add} \ No newline at end of file diff --git a/augur/routes/__init__.py b/augur/routes/__init__.py --- a/augur/routes/__init__.py +++ b/augur/routes/__init__.py @@ -1,13 +1,34 @@ + +import logging import importlib import os import glob +import sys +import inspect + +logger = logging.getLogger(__name__) + +def get_route_files(): + route_files = [] + + def get_file_id(path): + return os.path.splitext(os.path.basename(path))[0] -from .broker import create_broker_routes -from .manager import create_manager_routes + for filename in glob.iglob("augur/routes/*"): + file_id = get_file_id(filename) + if not file_id.startswith('__') and filename.endswith('.py'): + route_files.append(file_id) + + return route_files + +route_files = get_route_files() def create_routes(server): - create_broker_routes(server) - create_manager_routes(server) - # for plugin_name in server._augur._loaded_plugins: - # module = server._augur[plugin_name] - # module.create_routes(server) + for route_file in route_files: + module = importlib.import_module('.' + route_file, 'augur.routes') + module.create_routes(server) + + for name, obj in inspect.getmembers(server.augur_app.metrics): + if hasattr(obj, 'is_metric') == True: + if obj.metadata['type'] == "standard": + server.add_standard_metric(obj, obj.metadata['endpoint']) diff --git a/augur/routes/batch.py b/augur/routes/batch.py new file mode 100644 --- /dev/null +++ b/augur/routes/batch.py @@ -0,0 +1,160 @@ +#SPDX-License-Identifier: MIT +""" +Creates routes for the manager +""" + +import logging +import time +import requests +import sqlalchemy as s +from sqlalchemy import exc +from flask import request, Response +from augur.util import metric_metadata +import json + +logger = logging.getLogger(__name__) + +def create_routes(server): + + @server.app.route('/{}/batch'.format(server.api_version), methods=['GET', 'POST']) + def batch(): + """ + Execute multiple requests, submitted as a batch. + :statuscode 207: Multi status + """ + + server.show_metadata = False + + if request.method == 'GET': + """this will return sensible defaults in the future""" + return server.app.make_response('{"status": "501", "response": "Defaults for batch requests not implemented. Please POST a JSON array of requests to this endpoint for now."}') + + try: + requests = json.loads(request.data.decode('utf-8')) + except ValueError as e: + request.abort(400) + + responses = [] + + for index, req in enumerate(requests): + + + method = req['method'] + path = req['path'] + body = req.get('body', None) + + try: + + logger.debug('batch-internal-loop: %s %s' % (method, path)) + + with server.app.server.app.context(): + with server.app.test_request_context(path, + method=method, + data=body): + try: + # Can modify flask.g here without affecting + # flask.g of the root request for the batch + + # Pre process Request + rv = server.app.preprocess_request() + + if rv is None: + # Main Dispatch + rv = server.app.dispatch_request() + + except Exception as e: + rv = server.app.handle_user_exception(e) + + response = server.app.make_response(rv) + + # Post process Request + response = server.app.process_response(response) + + # Response is a Flask response object. + # _read_response(response) reads response.response + # and returns a string. If your endpoints return JSON object, + # this string would be the response as a JSON string. + responses.append({ + "path": path, + "status": response.status_code, + "response": str(response.get_data(), 'utf8'), + }) + + except Exception as e: + + responses.append({ + "path": path, + "status": 500, + "response": str(e) + }) + + + return Response(response=json.dumps(responses), + status=207, + mimetype="server.app.ication/json") + + + """ + @api {post} /batch Batch Request Metadata + @apiName BatchMetadata + @apiGroup Batch + @apiDescription Returns metadata of batch requests + POST JSON of API requests metadata + """ + @server.app.route('/{}/batch/metadata'.format(server.api_version), methods=['GET', 'POST']) + def batch_metadata(): + """ + Returns endpoint metadata in batch format + """ + server.show_metadata = True + + if request.method == 'GET': + """this will return sensible defaults in the future""" + return server.app.make_response(json.dumps(metric_metadata)) + + try: + requests = json.loads(request.data.decode('utf-8')) + except ValueError as e: + request.abort(400) + + responses = [] + + for index, req in enumerate(requests): + method = req['method'] + path = req['path'] + body = req.get('body', None) + + try: + logger.info('batch endpoint: ' + path) + with server.app.server.app.context(): + with server.app.test_request_context(path, + method=method, + data=body): + try: + rv = server.app.preprocess_request() + if rv is None: + rv = server.app.dispatch_request() + except Exception as e: + rv = server.app.handle_user_exception(e) + response = server.app.make_response(rv) + response = server.app.process_response(response) + + responses.append({ + "path": path, + "status": response.status_code, + "response": str(response.get_data(), 'utf8'), + }) + + except Exception as e: + responses.append({ + "path": path, + "status": 500, + "response": str(e) + }) + + server.show_metadata = False + + return Response(response=json.dumps(responses), + status=207, + mimetype="server.app.ication/json") + diff --git a/augur/routes/broker.py b/augur/routes/broker.py --- a/augur/routes/broker.py +++ b/augur/routes/broker.py @@ -9,6 +9,9 @@ import requests from flask import request, Response +logger = logging.getLogger(__name__) + +# TODO: not this... def worker_start(worker_name=None): process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True) @@ -26,12 +29,12 @@ def send_task(worker_proxy): j = r.json() if 'status' not in j: - logging.info("Worker: {}'s heartbeat did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) + logger.error("Worker: {}'s heartbeat did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) worker_proxy['status'] = 'Disconnected' return if j['status'] != 'alive': - logging.info("Worker: {} is busy, setting its status as so.\n".format(worker_id)) + logger.info("Worker: {} is busy, setting its status as so.\n".format(worker_id)) return # Want to check user-created job requests first @@ -43,22 +46,22 @@ def send_task(worker_proxy): new_task = maintain_queue.pop(0) else: - logging.info("Both queues are empty for worker {}\n".format(worker_id)) + logger.debug("Both queues are empty for worker {}\n".format(worker_id)) worker_proxy['status'] = 'Idle' return - logging.info("Worker {} is idle, preparing to send the {} task to {}\n".format(worker_id, new_task['display_name'], task_endpoint)) + logger.info("Worker {} is idle, preparing to send the {} task to {}\n".format(worker_id, new_task['display_name'], task_endpoint)) try: requests.post(task_endpoint, json=new_task) worker_proxy['status'] = 'Working' except: - logging.info("Sending Worker: {} a task did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) + logger.error("Sending Worker: {} a task did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) worker_proxy['status'] = 'Disconnected' # If the worker died, then restart it worker_start(worker_id.split('.')[len(worker_id.split('.')) - 2]) -def create_broker_routes(server): +def create_routes(server): @server.app.route('/{}/task'.format(server.api_version), methods=['POST']) def task(): @@ -71,9 +74,9 @@ def task(): for given_component in list(task['given'].keys()): given.append(given_component) model = task['models'][0] - logging.info("Broker recieved a new user task ... checking for compatible workers for given: " + str(given) + " and model(s): " + str(model) + "\n") + logger.info("Broker recieved a new user task ... checking for compatible workers for given: " + str(given) + " and model(s): " + str(model) + "\n") - logging.info("Broker's list of all workers: {}\n".format(server.broker._getvalue().keys())) + logger.debug("Broker's list of all workers: {}\n".format(server.broker._getvalue().keys())) worker_found = False compatible_workers = {} @@ -83,7 +86,7 @@ def task(): if type(server.broker[worker_id]._getvalue()) != dict: continue - logging.info("Considering compatible worker: {}\n".format(worker_id)) + logger.info("Considering compatible worker: {}\n".format(worker_id)) # Group workers by type (all gh workers grouped together etc) worker_type = worker_id.split('.')[len(worker_id.split('.'))-2] @@ -91,28 +94,28 @@ def task(): # Make worker that is prioritized the one with the smallest sum of task queues if (len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue'])) < min([compatible_workers[w]['task_load'] for w in compatible_workers.keys() if worker_type == w]): - logging.info("Worker id: {} has the smallest task load encountered so far: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']))) + logger.info("Worker id: {} has the smallest task load encountered so far: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']))) compatible_workers[worker_type]['task_load'] = len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']) compatible_workers[worker_type]['worker_id'] = worker_id for worker_type in compatible_workers.keys(): worker_id = compatible_workers[worker_type]['worker_id'] worker = server.broker[worker_id] - logging.info("Final compatible worker chosen: {} with smallest task load: {} found to work on task: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']), task)) + logger.info("Final compatible worker chosen: {} with smallest task load: {} found to work on task: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']), task)) if task['job_type'] == "UPDATE": worker['user_queue'].append(task) - logging.info("Added task for model: {}. New length of worker {}'s user queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['user_queue'])))) + logger.info("Added task for model: {}. New length of worker {}'s user queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['user_queue'])))) elif task['job_type'] == "MAINTAIN": worker['maintain_queue'].append(task) - logging.info("Added task for model: {}. New length of worker {}'s maintain queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['maintain_queue'])))) + logger.info("Added task for model: {}. New length of worker {}'s maintain queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['maintain_queue'])))) if worker['status'] == 'Idle': send_task(worker) worker_found = True # Otherwise, let the frontend know that the request can't be served if not worker_found: - logging.info("Augur does not have knowledge of any workers that are capable of handing the request: {}\n".format(task)) + logger.warning("Augur does not have knowledge of any workers that are capable of handing the request: {}\n".format(task)) return Response(response=task, status=200, @@ -124,7 +127,7 @@ def worker(): and telling the broker to add this worker to the set it maintains """ worker = request.json - logging.info("Recieved HELLO message from worker: {}\n".format(worker['id'])) + logger.info("Recieved HELLO message from worker: {}\n".format(worker['id'])) if worker['id'] not in server.broker: server.broker[worker['id']] = server.manager.dict() server.broker[worker['id']]['id'] = worker['id'] @@ -139,7 +142,7 @@ def worker(): server.broker[worker['id']]['status'] = 'Idle' server.broker[worker['id']]['location'] = worker['location'] else: - logging.info("Worker: {} has been reconnected.\n".format(worker['id'])) + logger.info("Worker: {} has been reconnected.\n".format(worker['id'])) models = server.broker[worker['id']]['models'] givens = server.broker[worker['id']]['given'] user_queue = server.broker[worker['id']]['user_queue'] @@ -157,7 +160,7 @@ def worker(): def sync_queue(): task = request.json worker = task['worker_id'] - logging.info("Message recieved that worker {} completed task: {}\n".format(worker,task)) + logger.info("Message recieved that worker {} completed task: {}\n".format(worker,task)) try: models = server.broker[worker]['models'] givens = server.broker[worker]['given'] @@ -167,8 +170,8 @@ def sync_queue(): if server.broker[worker]['status'] != 'Disconnected': send_task(server.broker[worker]) except Exception as e: - logging.info("Ran into error: {}\n".format(repr(e))) - logging.info("A past instance of the {} worker finished a previous leftover task.\n".format(worker)) + logger.error("Ran into error: {}\n".format(repr(e))) + logger.error("A past instance of the {} worker finished a previous leftover task.\n".format(worker)) return Response(response=task, status=200, @@ -190,7 +193,7 @@ def get_status(): @server.app.route('/{}/workers/remove'.format(server.api_version), methods=['POST']) def remove_worker(): worker = request.json - logging.info("Recieved a message to disconnect worker: {}\n".format(worker)) + logger.info("Recieved a message to disconnect worker: {}\n".format(worker)) server.broker[worker['id']]['status'] = 'Disconnected' return Response(response=worker, status=200, @@ -200,13 +203,13 @@ def remove_worker(): def task_error(): task = request.json worker_id = task['worker_id'] - logging.info("Recieved a message that {} ran into an error on task: {}\n".format(worker_id, task)) + logger.error("Recieved a message that {} ran into an error on task: {}\n".format(worker_id, task)) if worker_id in server.broker: if server.broker[worker_id]['status'] != 'Disconnected': - logging.info("{} ran into error while completing task: {}\n".format(worker_id, task)) + logger.error("{} ran into error while completing task: {}\n".format(worker_id, task)) send_task(server.broker[worker_id]) else: - logging.info("A previous instance of {} ran into error while completing task: {}\n".format(worker_id, task)) + logger.error("A previous instance of {} ran into error while completing task: {}\n".format(worker_id, task)) return Response(response=request.json, status=200, mimetype="application/json") \ No newline at end of file diff --git a/augur/routes/manager.py b/augur/routes/manager.py --- a/augur/routes/manager.py +++ b/augur/routes/manager.py @@ -11,7 +11,7 @@ from flask import request, Response import json -def create_manager_routes(server): +def create_routes(server): @server.app.route('/{}/add-repos'.format(server.api_version), methods=['POST']) def add_repos(): @@ -19,10 +19,9 @@ def add_repos(): adds repos belonging to any user or group to an existing augur repo group 'repos' are in the form org/repo, user/repo, or maybe even a full url """ - if authenticate_request(server._augur, request): - db_connection = get_db_engine(server._augur).connect() + if authenticate_request(server.augur_app, request): group = request.json['group'] - repo_manager = Repo_insertion_manager(group, db_connection) + repo_manager = Repo_insertion_manager(group, server.augur_app.database) group_id = repo_manager.get_org_id() errors = {} errors['invalid_inputs'] = [] @@ -59,10 +58,9 @@ def add_repos(): @server.app.route('/{}/create-repo-group'.format(server.api_version), methods=['POST']) def create_repo_group(): - if authenticate_request(server._augur, request): - conn = get_db_engine(server._augur) + if authenticate_request(server.augur_app, request): group = request.json['group'] - repo_manager = Repo_insertion_manager(group, conn) + repo_manager = Repo_insertion_manager(group, server.augur_app.database) summary = {} summary['errors'] = [] summary['repo_groups_created'] = [] @@ -98,10 +96,9 @@ def add_repo_group(): """ creates a new augur repo group and adds to it the given organization or user's repos takes an organization or user name """ - if authenticate_request(server._augur, request): - conn = get_db_engine(server._augur) + if authenticate_request(server.augur_app, request): group = request.json['org'] - repo_manager = Repo_insertion_manager(group, conn) + repo_manager = Repo_insertion_manager(group, server.augur_app.database) summary = {} summary['group_errors'] = [] summary['failed_repo_records'] = [] @@ -288,29 +285,15 @@ def get_repo_name(self): repo = self.name return repo[repo.find('/')+1:] -def get_db_engine(app): - - user = app.read_config('Database', 'user') - password = app.read_config('Database', 'password') - host = app.read_config('Database', 'host') - port = app.read_config('Database', 'port') - dbname = app.read_config('Database', 'name') - - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - user, password, host, port, dbname - ) - - return s.create_engine(DB_STR, poolclass=s.pool.NullPool) - -def authenticate_request(app, request): +def authenticate_request(augur_app, request): # do I like doing it like this? not at all # do I have the time to implement a better solution right now? not at all - user = app.read_config('Database', 'user') - password = app.read_config('Database', 'password') - host = app.read_config('Database', 'host') - port = app.read_config('Database', 'port') - dbname = app.read_config('Database', 'name') + user = augur_app.config.get_value('Database', 'user') + password = augur_app.config.get_value('Database', 'password') + host = augur_app.config.get_value('Database', 'host') + port = augur_app.config.get_value('Database', 'port') + dbname = augur_app.config.get_value('Database', 'name') DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( user, password, host, port, dbname diff --git a/augur/routes/nonstandard_metrics.py b/augur/routes/nonstandard_metrics.py new file mode 100644 --- /dev/null +++ b/augur/routes/nonstandard_metrics.py @@ -0,0 +1,24 @@ +import base64 +import sqlalchemy as s +import pandas as pd +import json +from flask import Response + +def create_routes(server): + + metrics = server.augur_app.metrics + + @server.app.route(f"/{server.api_version}/<license_id>/<spdx_binary>/<repo_group_id>/<repo_id>/license-files") + def get_license_files(license_id, spdx_binary, repo_group_id, repo_id): + arguments = [license_id, spdx_binary, repo_group_id, repo_id] + license_files = server.transform(metrics.license_files, args=arguments) + return Response(response=license_files, + status=200, + mimetype="application/json") + + @server.app.route(f"/{server.api_version}/repo-groups/<repo_group_id>/top-insights") + def top_insights(repo_group_id): + data = server.transform(metrics.top_insights, args=[repo_group_id]) + return Response(response=data, + status=200, + mimetype="application/json") \ No newline at end of file diff --git a/augur/routes/util.py b/augur/routes/util.py new file mode 100644 --- /dev/null +++ b/augur/routes/util.py @@ -0,0 +1,206 @@ +import base64 +import sqlalchemy as s +import pandas as pd +import json +from flask import Response + +def create_routes(server): + + @server.app.route('/{}/repo-groups'.format(server.api_version)) + def get_all_repo_groups(): #TODO: make this name automatic - wrapper? + repoGroupsSQL = s.sql.text(""" + SELECT * + FROM repo_groups + ORDER BY rg_name + """) + results = pd.read_sql(repoGroupsSQL, server.augur_app.database) + data = results.to_json(orient="records", date_format='iso', date_unit='ms') + return Response(response=data, + status=200, + mimetype="application/json") + + @server.app.route('/{}/repos'.format(server.api_version)) + def get_all_repos(): + + get_all_repos_sql = s.sql.text(""" + SELECT + repo.repo_id, + repo.repo_name, + repo.description, + repo.repo_git AS url, + repo.repo_status, + a.commits_all_time, + b.issues_all_time , + rg_name, + repo.repo_group_id + FROM + repo + left outer join + (select repo_id, COUNT ( distinct commits.cmt_commit_hash ) AS commits_all_time from commits group by repo_id ) a on + repo.repo_id = a.repo_id + left outer join + (select repo_id, count ( * ) as issues_all_time from issues where issues.pull_request IS NULL group by repo_id) b + on + repo.repo_id = b.repo_id + JOIN repo_groups ON repo_groups.repo_group_id = repo.repo_group_id + order by repo_name + """) + results = pd.read_sql(get_all_repos_sql, server.augur_app.database) + results['url'] = results['url'].apply(lambda datum: datum.split('//')[1]) + + b64_urls = [] + for i in results.index: + b64_urls.append(base64.b64encode((results.at[i, 'url']).encode())) + results['base64_url'] = b64_urls + + data = results.to_json(orient="records", date_format='iso', date_unit='ms') + return Response(response=data, + status=200, + mimetype="application/json") + + @server.app.route('/{}/repo-groups/<repo_group_id>/repos'.format(server.api_version)) + def get_repos_in_repo_group(repo_group_id): + repos_in_repo_groups_SQL = s.sql.text(""" + SELECT + repo.repo_id, + repo.repo_name, + repo.description, + repo.repo_git AS url, + repo.repo_status, + a.commits_all_time, + b.issues_all_time + FROM + repo + left outer join + (select repo_id, COUNT ( distinct commits.cmt_commit_hash ) AS commits_all_time from commits group by repo_id ) a on + repo.repo_id = a.repo_id + left outer join + (select repo_id, count ( issues.issue_id) as issues_all_time from issues where issues.pull_request IS NULL group by repo_id) b + on + repo.repo_id = b.repo_id + JOIN repo_groups ON repo_groups.repo_group_id = repo.repo_group_id + WHERE + repo_groups.repo_group_id = :repo_group_id + ORDER BY repo.repo_git + """) + + results = pd.read_sql(repos_in_repo_groups_SQL, server.augur_app.database, params={'repo_group_id': repo_group_id}) + data = results.to_json(orient="records", date_format='iso', date_unit='ms') + return Response(response=data, + status=200, + mimetype="application/json") + + @server.app.route('/{}/owner/<owner>/name/<repo>'.format(server.api_version)) + def get_repo_by_git_name(owner, repo): + + get_repo_by_git_name_sql = s.sql.text(""" + SELECT repo.repo_id, repo.repo_group_id, rg_name + FROM repo JOIN repo_groups ON repo_groups.repo_group_id = repo.repo_group_id + WHERE repo_name = :repo AND repo_path LIKE :owner + GROUP BY repo_id, rg_name + """) + + results = pd.read_sql(get_repo_by_git_name_sql, server.augur_app.database, params={'owner': '%{}_'.format(owner), 'repo': repo,}) + data = results.to_json(orient="records", date_format='iso', date_unit='ms') + return Response(response=data, + status=200, + mimetype="application/json") + + @server.app.route('/{}/rg-name/<rg_name>/repo-name/<repo_name>'.format(server.api_version)) + def get_repo_by_name(rg_name, repo_name): + + get_repo_by_name_sql = s.sql.text(""" + SELECT repo_id, repo.repo_group_id, repo_git as url + FROM repo, repo_groups + WHERE repo.repo_group_id = repo_groups.repo_group_id + AND LOWER(rg_name) = LOWER(:rg_name) + AND LOWER(repo_name) = LOWER(:repo_name) + """) + results = pd.read_sql(get_repo_by_name_sql, server.augur_app.database, params={'rg_name': rg_name, 'repo_name': repo_name}) + results['url'] = results['url'].apply(lambda datum: datum.split('//')[1]) + data = results.to_json(orient="records", date_format='iso', date_unit='ms') + return Response(response=data, + status=200, + mimetype="application/json") + + @server.app.route('/{}/rg-name/<rg_name>'.format(server.api_version)) + def get_group_by_name(rg_name): + groupSQL = s.sql.text(""" + SELECT repo_group_id, rg_name + FROM repo_groups + WHERE lower(rg_name) = lower(:rg_name) + """) + results = pd.read_sql(groupSQL, server.augur_app.database, params={'rg_name': rg_name}) + data = results.to_json(orient="records", date_format='iso', date_unit='ms') + return Response(response=data, + status=200, + mimetype="application/json") + + @server.app.route('/{}/dosocs/repos'.format(server.api_version)) + def get_repos_for_dosocs(): + get_repos_for_dosocs_SQL = s.sql.text(""" + SELECT b.repo_id, CONCAT(a.value || b.repo_group_id || chr(47) || b.repo_path || b.repo_name) AS path + FROM settings a, repo b + WHERE a.setting='repo_directory' + """) + + results = pd.read_sql(get_repos_for_dosocs_SQL, server.augur_app.database) + data = results.to_json(orient="records", date_format='iso', date_unit='ms') + return Response(response=data, + status=200, + mimetype='application/json') + + @server.app.route('/{}/repo-groups/<repo_group_id>/get-issues'.format(server.api_version)) + @server.app.route('/{}/repos/<repo_id>/get-issues'.format(server.api_version)) + def get_issues(repo_group_id, repo_id=None): + if not repo_id: + get_issues_sql = s.sql.text(""" + SELECT issue_title, + issues.issue_id, + issues.repo_id, + issues.html_url, + issue_state AS STATUS, + issues.created_at AS DATE, + count(issue_events.event_id), + MAX(issue_events.created_at) AS LAST_EVENT_DATE, + EXTRACT(DAY FROM NOW() - issues.created_at) AS OPEN_DAY + FROM issues, + issue_events + WHERE issues.repo_id IN (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) + AND issues.issue_id = issue_events.issue_id + AND issues.pull_request is NULL + GROUP BY issues.issue_id + ORDER by OPEN_DAY DESC + """) + results = pd.read_sql(get_issues_sql, server.augur_app.database, params={'repo_group_id': repo_group_id}) + else: + get_issues_sql = s.sql.text(""" + SELECT issue_title, + issues.issue_id, + issues.repo_id, + issues.html_url, + issue_state AS STATUS, + issues.created_at AS DATE, + count(issue_events.event_id), + MAX(issue_events.created_at) AS LAST_EVENT_DATE, + EXTRACT(DAY FROM NOW() - issues.created_at) AS OPEN_DAY, + repo_name + FROM issues JOIN repo ON issues.repo_id = repo.repo_id, issue_events + WHERE issues.repo_id = :repo_id + AND issues.pull_request IS NULL + AND issues.issue_id = issue_events.issue_id + GROUP BY issues.issue_id, repo_name + ORDER by OPEN_DAY DESC + """) + results = pd.read_sql(get_issues_sql, server.augur_app.database, params={'repo_id': repo_id}) + data = results.to_json(orient="records", date_format='iso', date_unit='ms') + return Response(response=data, + status=200, + mimetype='application/json') + + @server.app.route('/{}/api-port'.format(server.api_version)) + def api_port(): + response = {'port': server.augur_app.config.get_value('Server', 'port')} + return Response(response=json.dumps(response), + status=200, + mimetype="application/json") diff --git a/augur/server.py b/augur/server.py --- a/augur/server.py +++ b/augur/server.py @@ -3,98 +3,76 @@ Creates a WSGI server that serves the Augur REST API """ +import glob +import sys +import inspect +import types import json import os import base64 -from flask import Flask, request, Response, send_from_directory +import logging + +from flask import Flask, request, Response, redirect from flask_cors import CORS -from flask_login import current_user import pandas as pd + import augur -from augur.util import annotate, metric_metadata, logger from augur.routes import create_routes AUGUR_API_VERSION = 'api/unstable' -class VueCompatibleFlask(Flask): - jinja_options = Flask.jinja_options.copy() - jinja_options.update(dict( - block_start_string='(%', - block_end_string='%)', - variable_start_string='%%', - variable_end_string='%%', - comment_start_string='(#', - comment_end_string='#)', - )) - +logger = logging.getLogger(__name__) class Server(object): """ Defines Augur's server's behavior """ - def __init__(self, frontend_folder='../frontend/public', manager=None, broker=None, housekeeper=None): + def __init__(self, augur_app=None): """ Initializes the server, creating both the Flask application and Augur application """ # Create Flask application - self.app = VueCompatibleFlask(__name__, static_folder=frontend_folder, template_folder=frontend_folder) + self.app = Flask(__name__) + logger.debug("Created Flask app") self.api_version = AUGUR_API_VERSION app = self.app CORS(app) app.url_map.strict_slashes = False - # Create Augur application - self._augur = augur.Application() - augur_app = self._augur + self.augur_app = augur_app + self.manager = augur_app.manager + self.broker = augur_app.broker + self.housekeeper = augur_app.housekeeper # Initialize cache - expire = int(augur_app.read_config('Server', 'cache_expire')) - self.cache = augur_app.cache.get_cache('server', expire=expire) + expire = int(self.augur_app.config.get_value('Server', 'cache_expire')) + self.cache = self.augur_app.cache.get_cache('server', expire=expire) self.cache.clear() app.config['WTF_CSRF_ENABLED'] = False self.show_metadata = False - self.manager = manager - self.broker = broker - self.housekeeper = housekeeper - - self.worker_pids = [] - + logger.debug("Creating API routes...") create_routes(self) - augur_app.metrics.create_routes(self) ##################################### ### UTILITY ### ##################################### @app.route('/') - @app.errorhandler(404) - @app.errorhandler(405) - def index(err=None): + @app.route('/ping') + @app.route('/status') + @app.route('/healthcheck') + def index(): """ Redirects to health check route """ - if AUGUR_API_VERSION in request.url: - return Response(response=json.dumps({'error': 'Not Found'}), - status=404, - mimetype="application/json") - else: - - session_data = {} - if current_user and hasattr(current_user, 'username'): - session_data = { 'username': current_user.username } - return Response(response=json.dumps(session_data), - status=405, - mimetype="application/json")#render_template('index.html', session_script=f'window.AUGUR_SESSION={json.dumps(session_data)}\n') - - @app.route('/static/<path:path>') - def send_static(path): - return send_from_directory(frontend_folder, path) + return redirect(self.api_version) @app.route('/{}/'.format(self.api_version)) + @app.route('/{}/status'.format(self.api_version)) def status(): """ Health check route @@ -106,184 +84,6 @@ def status(): status=200, mimetype="application/json") - """ - @api {post} /batch Batch Requests - @apiName Batch - @apiGroup Batch - @apiDescription Returns results of batch requests - POST JSON of api requests - """ - @app.route('/{}/batch'.format(self.api_version), methods=['GET', 'POST']) - def batch(): - """ - Execute multiple requests, submitted as a batch. - :statuscode 207: Multi status - """ - - """ - to have on future batch request for each individual chart: - - timeseries/metric - - props that are in current card files (title) - - do any of these things act like the vuex states? - - what would singular card(dashboard) look like now? - """ - - self.show_metadata = False - - if request.method == 'GET': - """this will return sensible defaults in the future""" - return app.make_response('{"status": "501", "response": "Defaults for batch requests not implemented. Please POST a JSON array of requests to this endpoint for now."}') - - try: - requests = json.loads(request.data.decode('utf-8')) - except ValueError as e: - request.abort(400) - - responses = [] - - for index, req in enumerate(requests): - - - method = req['method'] - path = req['path'] - body = req.get('body', None) - - try: - - logger.debug('batch-internal-loop: %s %s' % (method, path)) - - with app.app_context(): - with app.test_request_context(path, - method=method, - data=body): - try: - # Can modify flask.g here without affecting - # flask.g of the root request for the batch - - # Pre process Request - rv = app.preprocess_request() - - if rv is None: - # Main Dispatch - rv = app.dispatch_request() - - except Exception as e: - rv = app.handle_user_exception(e) - - response = app.make_response(rv) - - # Post process Request - response = app.process_response(response) - - # Response is a Flask response object. - # _read_response(response) reads response.response - # and returns a string. If your endpoints return JSON object, - # this string would be the response as a JSON string. - responses.append({ - "path": path, - "status": response.status_code, - "response": str(response.get_data(), 'utf8'), - }) - - except Exception as e: - - responses.append({ - "path": path, - "status": 500, - "response": str(e) - }) - - - return Response(response=json.dumps(responses), - status=207, - mimetype="application/json") - - - """ - @api {post} /batch Batch Request Metadata - @apiName BatchMetadata - @apiGroup Batch - @apiDescription Returns metadata of batch requests - POST JSON of API requests metadata - """ - @app.route('/{}/batch/metadata'.format(self.api_version), methods=['GET', 'POST']) - def batch_metadata(): - """ - Returns endpoint metadata in batch format - """ - - self.show_metadata = True - - if request.method == 'GET': - """this will return sensible defaults in the future""" - return app.make_response(json.dumps(metric_metadata)) - - try: - requests = json.loads(request.data.decode('utf-8')) - except ValueError as e: - request.abort(400) - - responses = [] - - for index, req in enumerate(requests): - - method = req['method'] - path = req['path'] - body = req.get('body', None) - - try: - - augur.logger.info('batch endpoint: ' + path) - - with app.app_context(): - with app.test_request_context(path, - method=method, - data=body): - try: - # Can modify flask.g here without affecting - # flask.g of the root request for the batch - - # Pre process Request - rv = app.preprocess_request() - - if rv is None: - # Main Dispatch - rv = app.dispatch_request() - - except Exception as e: - rv = app.handle_user_exception(e) - - response = app.make_response(rv) - - # Post process Request - response = app.process_response(response) - - # Response is a Flask response object. - # _read_response(response) reads response.response - # and returns a string. If your endpoints return JSON object, - # this string would be the response as a JSON string. - - responses.append({ - "path": path, - "status": response.status_code, - "response": str(response.get_data(), 'utf8'), - }) - - except Exception as e: - - responses.append({ - "path": path, - "status": 500, - "response": str(e) - }) - - self.show_metadata = False - - return Response(response=json.dumps(responses), - status=207, - mimetype="application/json") - - def transform(self, func, args=None, kwargs=None, repo_url_base=None, orient='records', group_by=None, on=None, aggregate='sum', resample=None, date_col='date'): """ @@ -326,7 +126,7 @@ def transform(self, func, args=None, kwargs=None, repo_url_base=None, orient='re return result - def flaskify(self, func, cache=True): + def flaskify(self, function, cache=True): """ Simplifies API endpoints that just accept owner and repo, transforms them and spits them out @@ -334,20 +134,21 @@ def flaskify(self, func, cache=True): if cache: def generated_function(*args, **kwargs): def heavy_lifting(): - return self.transform(func, args, kwargs, **request.args.to_dict()) + return self.transform(function, args, kwargs, **request.args.to_dict()) body = self.cache.get(key=str(request.url), createfunc=heavy_lifting) return Response(response=body, status=200, mimetype="application/json") - generated_function.__name__ = func.__class__.__name__ + " _" + func.__name__ + generated_function.__name__ = function.__name__ + logger.info(generated_function.__name__) return generated_function else: def generated_function(*args, **kwargs): kwargs.update(request.args.to_dict()) - return Response(response=self.transform(func, args, kwargs, **request.args.to_dict()), + return Response(response=self.transform(function, args, kwargs, **request.args.to_dict()), status=200, mimetype="application/json") - generated_function.__name__ = func.__class__.__name__ + " _" + func.__name__ + generated_function.__name__ = function.__name__ return generated_function def routify(self, func, endpoint_type): @@ -372,95 +173,10 @@ def generated_function(*args, **kwargs): generated_function.__name__ = f"{endpoint_type}_" + func.__name__ return generated_function - def addLicenseMetric(self, function, endpoint, **kwargs): - endpoint = f'/{self.api_version}/<license_id>/<spdx_binary>/<repo_group_id>/<repo_id>/{endpoint}' - self.app.route(endpoint)(self.routify(function, 'license_metric')) - kwargs['endpoint_type'] = 'license_metric' - self.updateMetricMetadata(function, endpoint, **kwargs) - - def addRepoGroupMetric(self, function, endpoint, **kwargs): - """Simplifies adding routes that accept repo_group_id""" - endpoint = f'/{self.api_version}/repo-groups/<repo_group_id>/{endpoint}' - self.app.route(endpoint)(self.routify(function, 'repo_group')) - kwargs['endpoint_type'] = 'repo_group' - self.updateMetricMetadata(function, endpoint, **kwargs) - - def addRepoMetric(self, function, metric_endpoint, **kwargs): - """Simplifies adding routes that accept repo_group_id and repo_id""" - endpoint = f'/{self.api_version}/repos/<repo_id>/{metric_endpoint}' - deprecated_endpoint = f'/{self.api_version}/repo-groups/<repo_group_id>/repos/<repo_id>/{metric_endpoint}' - self.app.route(endpoint)(self.routify(function, 'repo')) - self.app.route(deprecated_endpoint)(self.routify(function, 'deprecated_repo')) - kwargs['endpoint_type'] = 'repo' - self.updateMetricMetadata(function, endpoint, **kwargs) - - def addMetric(self, function, endpoint, cache=True, **kwargs): - """Simplifies adding routes that dont accept group/repo ids""" - endpoint = '/{}/{}'.format(self.api_version, endpoint) - self.app.route(endpoint)(self.flaskify(function, 'general_metric')) - kwargs['endpoint_type'] = 'general_metric' - self.updateMetricMetadata(function, endpoint, **kwargs) - - def addTimeseries(self, function, endpoint): - """ - Simplifies adding routes that accept owner/repo and return timeseries - - :param app: Flask app - :param function: Function from a datasource to add - :param endpoint: GET endpoint to generate - """ - self.addMetric(function, 'timeseries/{}'.format(endpoint), metric_type='timeseries') - - def updateMetricMetadata(self, function, endpoint=None, **kwargs): - """ - Updates a given metric's metadata - """ - - # God forgive me - # - # Get the unbound function from the bound function's class so that we can modify metadata - # across instances of that class. - real_func = getattr(self._augur.metrics, function.__name__) - annotate(endpoint=endpoint, **kwargs)(real_func) - - def admin(self): - return (current_user and current_user.administrator) or (request.args.get('admin_token') == self._augur.read_config('Server', 'admin_token', 'AUGUR_ADMIN_TOKEN', 'changeme')) - - -def run(): - """ - Runs server with configured hosts/ports - """ - server = Server() - host = server._augur.read_config('Server', 'host', 'AUGUR_HOST', '0.0.0.0') - port = server._augur.read_config('Server', 'port', 'AUGUR_PORT', '5000') - Server().app.run(host=host, port=int(port), debug=True) - - -wsgi_app = None -def wsgi(environ, start_response): - """ - Creates WSGI app - """ - global wsgi_app - if (wsgi_app is None): - app_instance = Server() - wsgi_app = app_instance.app - # Stuff to make proxypass work - script_name = environ.get('HTTP_X_SCRIPT_NAME', '') - if script_name: - environ['SCRIPT_NAME'] = script_name - path_info = environ['PATH_INFO'] - if path_info.startswith(script_name): - environ['PATH_INFO'] = path_info[len(script_name):] - - scheme = environ.get('HTTP_X_SCHEME', '') - if scheme: - environ['wsgi.url_scheme'] = scheme - server = environ.get('HTTP_X_FORWARDED_SERVER', '') - if server: - environ['HTTP_HOST'] = server - return wsgi_app(environ, start_response) - -if __name__ == "__main__": - run() + def add_standard_metric(self, function, endpoint, **kwargs): + repo_endpoint = f'/{self.api_version}/repos/<repo_id>/{endpoint}' + repo_group_endpoint = f'/{self.api_version}/repo-groups/<repo_group_id>/{endpoint}' + deprecated_repo_endpoint = f'/{self.api_version}/repo-groups/<repo_group_id>/repos/<repo_id>/{endpoint}' + self.app.route(repo_endpoint)(self.routify(function, 'repo')) + self.app.route(repo_group_endpoint)(self.routify(function, 'repo_group')) + self.app.route(deprecated_repo_endpoint )(self.routify(function, 'deprecated_repo')) diff --git a/augur/util.py b/augur/util.py --- a/augur/util.py +++ b/augur/util.py @@ -4,24 +4,13 @@ """ import os import re -import logging import inspect import types import sys -import coloredlogs import beaker +import logging -# Logging -coloredlogs.install(level=os.getenv('AUGUR_LOG_LEVEL', 'INFO')) -logger = logging.getLogger('augur') - -def getFileID(path): - """ - Returns file ID of given object - - :param path: path of given object - """ - return os.path.splitext(os.path.basename(path))[0] +logger = logging.getLogger(__name__) __ROOT = os.path.abspath(os.path.dirname(__file__)) def get_data_path(path): @@ -52,75 +41,33 @@ def get_cache(namespace, cache_manager=None): return cache_manager.get_cache(namespace) metric_metadata = [] -def annotate(metadata=None, **kwargs): +def register_metric(metadata=None, **kwargs): """ - Decorates a function as being a metric + Register a function as being a metric """ if metadata is None: metadata = {} - def decorate(func): - if not hasattr(func, 'metadata'): - func.metadata = {} - metric_metadata.append(func.metadata) - - func.metadata.update(metadata) - if kwargs.get('endpoint_type', None): - endpoint_type = kwargs.pop('endpoint_type') - if endpoint_type == 'repo': - func.metadata['repo_endpoint'] = kwargs.get('endpoint') - else: - func.metadata['group_endpoint'] = kwargs.get('endpoint') - - func.metadata.update(dict(kwargs)) - - func.metadata['metric_name'] = re.sub('_', ' ', func.__name__).title() - func.metadata['source'] = re.sub(r'(.*\.)', '', func.__module__) - func.metadata['ID'] = "{}-{}".format(func.metadata['source'].lower(), func.metadata['tag']) - - return func - return decorate + def decorate(function): + if not hasattr(function, 'metadata'): + function.metadata = {} + metric_metadata.append(function.metadata) -def add_metrics(metrics, module_name): - # find all unbound endpoint functions objects (ones that have metadata) defined the given module_name - # and bind them to the metrics class - # Derek are you proud of me - for name, obj in inspect.getmembers(sys.modules[module_name]): - if inspect.isfunction(obj) == True: - if hasattr(obj, 'metadata') == True: - setattr(metrics, name, types.MethodType(obj, metrics)) + if not hasattr(function, 'is_metric'): + function.is_metric = True -# -# IPython -# + function.metadata.update(dict(kwargs)) -def init_shell_config(): - from IPython.terminal.prompts import Prompts, Token - from traitlets.config.loader import Config - - class PYRCSSPrompt(Prompts): - def in_prompt_tokens(self, cli=None): - return [ - (Token.Prompt, 'augur ['), - (Token.PromptNum, str(self.shell.execution_count)), - (Token.Prompt, ']: '), - ] - def out_prompt_tokens(self): - return [ - (Token.OutPrompt, 'output ['), - (Token.OutPromptNum, str(self.shell.execution_count)), - (Token.OutPrompt, ']: '), - ] + function.metadata['tag'] = re.sub('_', '-', function.__name__).lower() + function.metadata['endpoint'] = function.metadata['tag'] + function.metadata['name'] = re.sub('_', ' ', function.__name__).title() + function.metadata['model'] = re.sub(r'(.*\.)', '', function.__module__) - try: - get_ipython - except NameError: - nested = 0 - cfg = Config() - cfg.TerminalInteractiveShell.prompts_class=PYRCSSPrompt - else: - print("Running nested copies of the augur shell.") - cfg = Config() - nested = 1 - return cfg + if kwargs.get('type', None): + function.metadata['type'] = kwargs.get('type') + else: + function.metadata['type'] = "standard" + function.metadata.update(metadata) + return function + return decorate \ No newline at end of file diff --git a/conftest.py b/conftest.py new file mode 100644 --- /dev/null +++ b/conftest.py @@ -0,0 +1,31 @@ +import pytest +import re + +from augur.application import Application +from augur.cli.run import initialize_components + +default_repo_id = "25430" +default_repo_group_id = "10" + +def create_full_routes(routes): + full_routes = [] + for route in routes: + route = re.sub("<default_repo_id>", default_repo_id, route) + route = re.sub("<default_repo_group_id>", default_repo_group_id, route) + route = "http://localhost:5000/api/unstable/" + route + full_routes.append(route) + return full_routes + [email protected](scope="session") +def augur_app(): + augur_app = Application(disable_logs=True) + return augur_app + [email protected](scope="session") +def metrics(augur_app): + return augur_app.metrics + [email protected](scope="session") +def client(augur_app): + flask_client = initialize_components(augur_app, disable_housekeeper=True).load() + return flask_client.test_client() diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -25,7 +25,6 @@ exec(open(os.path.join(here, "../../metadata.py")).read()) - sys.path.insert(0, os.path.abspath('../../../augur')) # -- General configuration ------------------------------------------------ @@ -47,7 +46,23 @@ 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.githubpages', - 'sphinx_rtd_theme' + 'sphinx_rtd_theme', + 'sphinxcontrib.openapi', + 'sphinxcontrib.redoc' +] + +redoc = [ + { + 'name': 'Augur API', + 'page': 'rest-api/api', + 'spec': 'rest-api/spec.yml', + 'embed': True, + 'opts': { + "suppress-warnings": True, + "lazy-rendering": True, + 'expand-responses': ["200"] + } + } ] # Add any paths that contain templates here, relative to this directory. @@ -66,8 +81,6 @@ copyright = __copyright__ author = 'Carter Landis' - - # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. diff --git a/metadata.py b/metadata.py --- a/metadata.py +++ b/metadata.py @@ -1,13 +1,11 @@ -from os import path - __name__ = "Augur" __slug__ = "augur" __url__ = "https://github.com/chaoss/augur" __short_description__ = "Python 3 package for free/libre and open-source software community metrics & data collection" -__version__ = "0.12.0" -__release__ = "0.12.0" +__version__ = "0.13.0" +__release__ = "v0.13.0" __license__ = "MIT" __copyright__ = "CHAOSS & Augurlabs 2020" diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -44,15 +44,24 @@ "psycopg2-binary", "click", "psutil", - "gunicorn==19.9.0", + "gunicorn", "six>=1.14.0" ], extras_require={ - "dev": ["tox", "pytest", "ipdb", "sphinx","sphinx_rtd_theme"] + "dev": [ + "tox", + "pytest", + "ipdb", + "sphinx", + "sphinx_rtd_theme", + "sphinxcontrib-openapi", + "sphinxcontrib-redoc", + "docutils==0.15" + ] }, entry_points={ "console_scripts": [ - "augur=augur.runtime:run" + "augur=augur.cli._multicommand:run" ], } ) diff --git a/util/alembic/env.py b/util/alembic/env.py deleted file mode 100644 --- a/util/alembic/env.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import with_statement -from alembic import context -from sqlalchemy import engine_from_config, pool -from logging.config import fileConfig - - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -from augur.models.common import Base -target_metadata = Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, target_metadata=target_metadata, literal_binds=True) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - import augur.application - app = augur.application.Application() - - context.configure( - connection=app.db.connect(), - target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py b/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py deleted file mode 100644 --- a/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Create basic tables - -Revision ID: 2eaa930b1f5a -Revises: -Create Date: 2019-02-09 16:10:24.251828 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '2eaa930b1f5a' -down_revision = None -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.create_table('repo', - sa.Column('url', sa.String(length=1024), nullable=False), - sa.Column('vcs', sa.String(length=64), nullable=True), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('url') - ) - op.create_table('repo_group', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('id') - ) - op.create_table('user', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('username', sa.String(length=64), nullable=False), - sa.Column('email', sa.String(length=64), nullable=False), - sa.Column('password_hash', sa.String(length=128), nullable=True), - sa.Column('email_confirmation_token', sa.String(length=128), nullable=True), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('password_updated_at', sa.DateTime(), nullable=True), - sa.Column('last_login_at', sa.DateTime(), nullable=True), - sa.Column('authenticated', sa.Boolean(), nullable=True), - sa.Column('active', sa.Boolean(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('email'), - sa.UniqueConstraint('username') - ) - op.create_table('repo_group_has_project', - sa.Column('repo_group_id', sa.Integer(), nullable=False), - sa.Column('repo_id', sa.String(length=1024), nullable=False), - sa.ForeignKeyConstraint(['repo_group_id'], ['repo_group.id'], ), - sa.ForeignKeyConstraint(['repo_id'], ['repo.url'], ), - sa.PrimaryKeyConstraint('repo_group_id', 'repo_id') - ) - op.create_table('user_has_repo_group', - sa.Column('user_id', sa.Integer(), nullable=False), - sa.Column('repo_group_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['repo_group_id'], ['repo_group.id'], ), - sa.ForeignKeyConstraint(['user_id'], ['user.id'], ), - sa.PrimaryKeyConstraint('user_id', 'repo_group_id') - ) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.drop_table('user_has_repo_group') - op.drop_table('repo_group_has_project') - op.drop_table('user') - op.drop_table('repo_group') - op.drop_table('repo') - # ### end Alembic commands ### diff --git a/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py b/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py deleted file mode 100644 --- a/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Add admin to User, name to RepoGroup - -Revision ID: a051167419fa -Revises: 2eaa930b1f5a -Create Date: 2019-02-17 13:09:42.138936 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = 'a051167419fa' -down_revision = '2eaa930b1f5a' -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.add_column('repo_group', sa.Column('name', sa.String(length=128), nullable=True)) - op.add_column('user', sa.Column('administrator', sa.Boolean(), nullable=True)) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.drop_column('user', 'administrator') - op.drop_column('repo_group', 'name') - # ### end Alembic commands ### diff --git a/workers/contributor_worker/contributor_worker/worker.py b/workers/contributor_worker/contributor_worker.py similarity index 56% rename from workers/contributor_worker/contributor_worker/worker.py rename to workers/contributor_worker/contributor_worker.py --- a/workers/contributor_worker/contributor_worker/worker.py +++ b/workers/contributor_worker/contributor_worker.py @@ -8,189 +8,50 @@ import statistics, logging, os, json, time import numpy as np import datetime -from workers.standard_methods import * #init_oauths, get_table_values, get_max_id, register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process, paginate -import warnings -warnings.filterwarnings('ignore') -class ContributorWorker: +from workers.worker_base import Worker + +class ContributorWorker(Worker): """ Worker that detects anomalies on a select few of our metrics task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self._task = task - self._child = None - self._queue = Queue() - self.db = None - self.tool_source = 'Contributor Worker' - self.tool_version = '0.0.1' # See __init__.py - self.data_source = 'Augur API' - self.finishing_task = False - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["contributors"] - } - ], - "config": [self.config] - } + def __init__(self, config={}): - self.results_counter = 0 + worker_type = "contributor_worker" - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) + given = [['git_url']] + models = ['contributors'] - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) + data_tables = ['contributors', 'contributors_aliases', 'contributor_affiliations', + 'issue_events', 'pull_request_events', 'issues', 'message', 'issue_assignees', + 'pull_request_assignees', 'pull_request_reviewers', 'pull_request_meta', 'pull_request_repo'] + operations_tables = ['worker_history', 'worker_job'] - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - # produce our own MetaData object - metadata = MetaData() - helper_metadata = MetaData() + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) - # we can reflect it ourselves from a database, using options - # such as 'only' to limit what tables we look at... - metadata.reflect(self.db, only=['contributors', 'contributors_aliases', 'contributor_affiliations', - 'issue_events', 'pull_request_events', 'issues', 'message', 'issue_assignees', - 'pull_request_assignees', 'pull_request_reviewers', 'pull_request_meta', 'pull_request_repo']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job']) - - # we can then produce a set of mappings from this MetaData. - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - # calling prepare() just sets up mapped classes and relationships. - Base.prepare() - HelperBase.prepare() - - # mapped classes are ready - self.contributors_table = Base.classes.contributors.__table__ - self.contributors_aliases_table = Base.classes.contributors_aliases.__table__ - self.contributor_affiliations_table = Base.classes.contributor_affiliations.__table__ - self.issue_events_table = Base.classes.issue_events.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.issues_table = Base.classes.issues.__table__ - self.message_table = Base.classes.message.__table__ - self.issue_assignees_table = Base.classes.issue_assignees.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5432/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'contributors': - self.contributors_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass + # These 3 are included in every tuple the worker inserts (data collection info) + self.tool_source = 'Contributor Worker' + self.tool_version = '1.0.0' + self.data_source = 'Augur Commit Data' def contributors_model(self, entry_info, repo_id): + # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's + self.logger.info("Querying starting ids info...\n") + + self.cntrb_id_inc = self.get_max_id('contributors', 'cntrb_id') + # Get and insert all users (emails) found by the facade worker self.insert_facade_contributors(entry_info, repo_id) # Get and insert all users github considers to be contributors for this repo - query_github_contributors(self, entry_info, repo_id) + self.query_github_contributors(entry_info, repo_id) - logging.info("Searching users for commits from the facade worker for repo with entry info: {}\n".format(entry_info)) + self.logger.info("Searching users for commits from the facade worker for repo with entry info: {}\n".format(entry_info)) # Get all distinct combinations of emails and names by querying the repo's commits userSQL = s.sql.text(""" @@ -202,7 +63,7 @@ def contributors_model(self, entry_info, repo_id): gh_following_url, gh_gists_url, gh_starred_url, gh_subscriptions_url, gh_organizations_url, gh_repos_url, gh_events_url, gh_received_events_url, gh_type, gh_site_admin, cntrb_last_used FROM commits, contributors - WHERE repo_id = {} + WHERE repo_id = :repo_id AND contributors.cntrb_full_name = cmt_author_name UNION SELECT cmt_author_name AS commit_name, cntrb_id, cmt_author_raw_email AS commit_email, cntrb_email, @@ -213,7 +74,7 @@ def contributors_model(self, entry_info, repo_id): gh_following_url, gh_gists_url, gh_starred_url, gh_subscriptions_url, gh_organizations_url, gh_repos_url, gh_events_url, gh_received_events_url, gh_type, gh_site_admin, cntrb_last_used FROM commits, contributors - WHERE repo_id = {} + WHERE repo_id = :repo_id AND contributors.cntrb_email = cmt_author_raw_email UNION SELECT cmt_committer_name AS commit_name, cntrb_id, cmt_committer_raw_email AS commit_email, @@ -224,7 +85,7 @@ def contributors_model(self, entry_info, repo_id): gh_following_url, gh_gists_url, gh_starred_url, gh_subscriptions_url, gh_organizations_url, gh_repos_url, gh_events_url, gh_received_events_url, gh_type, gh_site_admin, cntrb_last_used FROM commits, contributors - WHERE repo_id = {} + WHERE repo_id = :repo_id AND contributors.cntrb_full_name = cmt_committer_name UNION SELECT cmt_committer_name AS commit_name, cntrb_id, cmt_committer_raw_email AS commit_email, @@ -235,13 +96,14 @@ def contributors_model(self, entry_info, repo_id): gh_following_url, gh_gists_url, gh_starred_url, gh_subscriptions_url, gh_organizations_url, gh_repos_url, gh_events_url, gh_received_events_url, gh_type, gh_site_admin, cntrb_last_used FROM commits, contributors - WHERE repo_id = {} + WHERE repo_id = :repo_id AND contributors.cntrb_email = cmt_committer_raw_email ORDER BY cntrb_id - """.format(repo_id,repo_id,repo_id,repo_id)) + """) - commit_cntrbs = json.loads(pd.read_sql(userSQL, self.db, params={}).to_json(orient="records")) - logging.info("We found {} distinct emails to search for in this repo (repo_id = {})".format( + commit_cntrbs = json.loads(pd.read_sql(userSQL, self.db, \ + params={'repo_id': repo_id}).to_json(orient="records")) + self.logger.info("We found {} distinct emails to search for in this repo (repo_id = {})\n".format( len(commit_cntrbs), repo_id)) # For every unique commit contributor info combination... @@ -282,7 +144,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(times_used_tuple)) self.results_counter += 1 - logging.info("Updated cntrb_created_at and cntrb_last_used columns for existing " + self.logger.info("Updated cntrb_created_at and cntrb_last_used columns for existing " "tuple in the contributors table with email: {}\n".format(contributor['commit_email'])) # If cntrb_full_name column is not filled, go ahead and fill it bc we have that info @@ -296,7 +158,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(name_col)) - logging.info("Inserted cntrb_full_name column for existing tuple in the contributors " + self.logger.info("Inserted cntrb_full_name column for existing tuple in the contributors " "table with email: {}\n".format(contributor['cntrb_email'])) # If cntrb_canonical column is not filled, go ahead and fill it w main email bc @@ -311,7 +173,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(canonical_col)) - logging.info("Inserted cntrb_canonical column for existing tuple in the contributors " + self.logger.info("Inserted cntrb_canonical column for existing tuple in the contributors " "table with email: {}\n".format(contributor['cntrb_email'])) @@ -346,20 +208,20 @@ def contributors_model(self, entry_info, repo_id): url = 'https://api.github.com/search/users?q={}+in:email'.format( cmt_cntrb['email']) - logging.info("Hitting endpoint: " + url + " ...\n") + self.logger.info("Hitting endpoint: " + url + " ...\n") r = requests.get(url=url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) results = r.json() # If no matches or bad response, continue with other contributors if 'total_count' not in results: - logging.info("Search query returned an empty response, moving on...\n") + self.logger.info("Search query returned an empty response, moving on...\n") continue if results['total_count'] == 0: - logging.info("Search query did not return any results, moving on...\n") + self.logger.info("Search query did not return any results, moving on...\n") continue - logging.info("When searching for a contributor with info {}, we found the following users: {}\n".format( + self.logger.info("When searching for a contributor with info {}, we found the following users: {}\n".format( cmt_cntrb, results)) # Grab first result and make sure it has the highest match score @@ -369,9 +231,9 @@ def contributors_model(self, entry_info, repo_id): match = item cntrb_url = ("https://api.github.com/users/" + match['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) contributor = r.json() # Fill in all github information @@ -406,14 +268,78 @@ def contributors_model(self, entry_info, repo_id): } result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==self.cntrb_id_inc).values(cntrb_gh_info)) - logging.info("Updated existing tuple in the contributors table with github info after " + self.logger.info("Updated existing tuple in the contributors table with github info after " "a successful search query on a facade commit's author : {} {}\n".format(contributor, cntrb_gh_info)) + + # Dupe check + self.logger.info('Checking dupes.\n') + dupe_cntrb_sql = s.sql.text(""" + SELECT contributors.* + FROM contributors inner join ( + SELECT MIN(cntrb_id) as cntrb_id + FROM contributors + GROUP BY cntrb_email HAVING COUNT(*) > 1 + ORDER BY cntrb_email + ) a on contributors.cntrb_id = a.cntrb_id + """) + + dupe_cntrbs = pd.read_sql(dupe_cntrb_sql, self.db, params={}) + + self.logger.info(f'There are {len(dupe_cntrbs)} duplicates.\n') + + # Turn columns from nan/nat to None + dupe_cntrbs = dupe_cntrbs.replace({pd.NaT: None}) + + for i, cntrb_existing in dupe_cntrbs.iterrows(): + + self.logger.info(f'Processing dupe: {cntrb_existing}.\n') + if i == 0: + self.logger.info('skipping first\n') + continue + + cntrb_new = cntrb_existing.copy() + del cntrb_new['cntrb_id'] + del cntrb_new['data_collection_date'] + cntrb_new = cntrb_new.to_dict() + + result = self.db.execute(self.contributors_table.insert().values(cntrb_new)) + pk = int(result.inserted_primary_key[0]) + + dupe_ids_sql = s.sql.text(""" + SELECT cntrb_id + FROM contributors + WHERE + cntrb_id <> :pk + AND cntrb_email = :email + """) + + dupe_ids = pd.read_sql(dupe_ids_sql, self.db, params={'pk': pk, \ + 'email': cntrb_new['cntrb_email']})['cntrb_id'].values.tolist() + + self.map_new_id(dupe_ids, pk) + + delete_dupe_ids_sql = s.sql.text(""" + DELETE + FROM contributors + WHERE cntrb_id <> {} + AND cntrb_email = '{}'; + """.format(pk, cntrb_new['cntrb_email'])) + + self.logger.info(f'Trying to delete dupes with sql: {delete_dupe_ids_sql}') + + try: + result = self.db.execute(delete_dupe_ids_sql) + except Exception as e: + self.logger.info(f'Deleting dupes failed with error: {e}') + + self.logger.info('Deleted duplicates.\n') + # Register this task as completed - register_task_completion(self, entry_info, repo_id, "contributors") + self.register_task_completion(entry_info, repo_id, "contributors") def insert_facade_contributors(self, entry_info, repo_id): - logging.info("Beginning process to insert contributors from facade commits for repo w entry info: {}\n".format(entry_info)) + self.logger.info("Beginning process to insert contributors from facade commits for repo w entry info: {}\n".format(entry_info)) # Get all distinct combinations of emails and names by querying the repo's commits userSQL = s.sql.text(""" @@ -449,7 +375,7 @@ def insert_facade_contributors(self, entry_info, repo_id): """) commit_cntrbs = json.loads(pd.read_sql(userSQL, self.db, params={'repo_id': repo_id}).to_json(orient="records")) - logging.info("We found {} distinct contributors needing insertion (repo_id = {})".format( + self.logger.info("We found {} distinct contributors needing insertion (repo_id = {})".format( len(commit_cntrbs), repo_id)) for cntrb in commit_cntrbs: @@ -462,10 +388,10 @@ def insert_facade_contributors(self, entry_info, repo_id): 'cntrb_full_name': cntrb['name'] } result = self.db.execute(self.contributors_table.insert().values(cntrb_tuple)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: {}\n".format(cntrb['email'])) + self.logger.info("Inserted contributor: {}\n".format(cntrb['email'])) def handle_alias(self, tuple): cntrb_email = tuple['cntrb_email'] # canonical @@ -473,13 +399,13 @@ def handle_alias(self, tuple): cntrb_id = tuple['cntrb_id'] # Check existing contributors table tuple - existing_tuples = retrieve_tuple(self, {'cntrb_email': tuple['commit_email']}, ['contributors']) + existing_tuples = self.retrieve_tuple({'cntrb_email': tuple['commit_email']}, ['contributors']) if len(existing_tuples) == 0: """ Insert alias tuple into the contributor table """ # Prepare tuple for insertion to contributor table (build it off of the tuple queried) - cntrb = tuple + cntrb = tuple.copy() cntrb['cntrb_created_at'] = datetime.fromtimestamp(cntrb['cntrb_created_at']/1000) \ if cntrb['cntrb_created_at'] else None @@ -494,15 +420,15 @@ def handle_alias(self, tuple): del cntrb['cntrb_id'] result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) + self.logger.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) alias_id = self.cntrb_id_inc elif len(existing_tuples) > 1: # fix all dupe references to dupe cntrb ids before we delete them - logging.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the contributors table, we will delete all tuples with this cntrb_email and re-insert only 1\n") - logging.info("For cntrb_email: {}".format(tuple['commit_email'])) + self.logger.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the contributors table, we will delete all tuples with this cntrb_email and re-insert only 1\n") + self.logger.info("For cntrb_email: {}".format(tuple['commit_email'])) """ Insert alias tuple into the contributor table """ @@ -527,7 +453,7 @@ def handle_alias(self, tuple): del cntrb['cntrb_id'] result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) + self.logger.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) alias_id = self.cntrb_id_inc @@ -540,77 +466,9 @@ def handle_alias(self, tuple): cntrb_id NOT IN (SELECT cntrb_id FROM contributors_aliases); """.format(commit_email)) - dupe_ids = json.loads(pd.read_sql(dupeIdsSQL, self.db, params={}).to_json(orient="records")) - - alias_update_col = {'cntrb_a_id': self.cntrb_id_inc} - update_col = {'cntrb_id': self.cntrb_id_inc} - reporter_col = {'reporter_id': self.cntrb_id_inc} - pr_assignee_col = {'contrib_id': self.cntrb_id_inc} - pr_repo_col = {'pr_cntrb_id': self.cntrb_id_inc} + dupe_ids = pd.read_sql(dupeIdsSQL, self.db, params={})['cntrb_id'].values.tolist() - # def delete_fk(table, column): - - # tables_with_fk = { - # 'contributors_aliases_table': ['cntrb_a_id', alias_update_col], - # 'issue_events_table':, - # 'pull_request_events_table', - # 'issues_table', - # 'issues_table' - # } - for id in dupe_ids: - - try: - alias_result = self.db.execute(self.contributors_aliases_table.update().where( - self.contributors_aliases_table.c.cntrb_a_id==id['cntrb_id']).values(alias_update_col)) - logging.info("Updated cntrb_a_id column for tuples in the contributors_aliases table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) - - #temp - alias_email_result = self.db.execute(self.contributors_aliases_table.update().where( - self.contributors_aliases_table.c.alias_email==commit_email).values(alias_update_col)) - logging.info("Updated cntrb_a_id column for tuples in the contributors_aliases table with value: {} replaced with new cntrb id: {}".format(commit_email, self.cntrb_id_inc)) - #tempend - except Exception as e: - logging.info(f'Alias re-map already done... error: {e}') - - issue_events_result = self.db.execute(self.issue_events_table.update().where( - self.issue_events_table.c.cntrb_id==id['cntrb_id']).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the issue_events table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) - - pr_events_result = self.db.execute(self.pull_request_events_table.update().where( - self.pull_request_events_table.c.cntrb_id==id['cntrb_id']).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the pull_request_events table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) - - issues_cntrb_result = self.db.execute(self.issues_table.update().where( - self.issues_table.c.cntrb_id==id['cntrb_id']).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the issues table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) - - issues_reporter_result = self.db.execute(self.issues_table.update().where( - self.issues_table.c.reporter_id==id['cntrb_id']).values(reporter_col)) - logging.info("Updated reporter_id column in the issues table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) - - issue_assignee_result = self.db.execute(self.issue_assignees_table.update().where( - self.issue_assignees_table.c.cntrb_id==id['cntrb_id']).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the issue_assignees table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) - - pr_assignee_result = self.db.execute(self.pull_request_assignees_table.update().where( - self.pull_request_assignees_table.c.contrib_id==id['cntrb_id']).values(pr_assignee_col)) - logging.info("Updated contrib_id column for tuple in the pull_request_assignees table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) - - message_result = self.db.execute(self.message_table.update().where( - self.message_table.c.cntrb_id==id['cntrb_id']).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the message table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) - - pr_reviewers_result = self.db.execute(self.pull_request_reviewers_table.update().where( - self.pull_request_reviewers_table.c.cntrb_id==id['cntrb_id']).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_reviewers table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) - - pr_meta_result = self.db.execute(self.pull_request_meta_table.update().where( - self.pull_request_meta_table.c.cntrb_id==id['cntrb_id']).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_meta table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) - - pr_repo_result = self.db.execute(self.pull_request_repo_table.update().where( - self.pull_request_repo_table.c.pr_cntrb_id==id['cntrb_id']).values(pr_repo_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_repo table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.map_new_id(dupe_ids, self.cntrb_id_inc) deleteSQL = """ DELETE @@ -629,17 +487,40 @@ def handle_alias(self, tuple): try: # Delete all dupes result = self.db.execute(deleteSQL) - logging.info("Deleted all non-canonical contributors with the email: {}\n".format(commit_email)) + self.logger.info("Deleted all non-canonical contributors with the email: {}\n".format(commit_email)) except Exception as e: - logging.info("When trying to delete a duplicate contributor, worker ran into error: {}".format(e)) + self.logger.info("When trying to delete a duplicate contributor, worker ran into error: {}".format(e)) else: #then there would be exactly 1 existing tuple, so that id is the one we want alias_id = existing_tuples[0]['cntrb_id'] + self.logger.info('Checking canonicals match.\n') + alias_sql = s.sql.text(""" + SELECT * + FROM contributors + WHERE cntrb_id = :alias_id + """) + canonical_id_result = pd.read_sql(alias_sql, self.db, params={'alias_id': alias_id}) + + if canonical_id_result.iloc[0]['cntrb_canonical'] != tuple['cntrb_canonical']: + canonical_col = { + 'cntrb_canonical': tuple['cntrb_canonical'], + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source + } + + result = self.db.execute(self.contributors_table.update().where( + self.contributors_table.c.cntrb_canonical==canonical_id_result.iloc[0]['cntrb_canonical'] + ).values(canonical_col)) + self.logger.info("Updated cntrb_canonical column for existing tuple in the contributors " + "table with email: {}\n".format(tuple['cntrb_email'])) + + # Now check existing alias table tuple - existing_tuples = retrieve_tuple(self, {'alias_email': commit_email}, ['contributors_aliases']) + existing_tuples = self.retrieve_tuple({'alias_email': commit_email}, ['contributors_aliases']) if len(existing_tuples) == 0: - logging.info("Finding cntrb_id for canonical email: {}".format(cntrb_email)) + self.logger.info("Finding cntrb_id for canonical email: {}".format(cntrb_email)) canonical_id_sql = s.sql.text(""" SELECT cntrb_id as canonical_id from contributors @@ -648,7 +529,7 @@ def handle_alias(self, tuple): canonical_id_result = json.loads(pd.read_sql(canonical_id_sql, self.db, params={'email': cntrb_email}).to_json( orient="records")) if len(canonical_id_result) > 1: - logging.info("MORE THAN ONE CANONICAL CONTRIBUTOR found for email: {}".format(cntrb_email)) + self.logger.info("MORE THAN ONE CANONICAL CONTRIBUTOR found for email: {}".format(cntrb_email)) alias_tuple = { 'cntrb_id': canonical_id_result[0]['canonical_id'], 'cntrb_a_id': alias_id, @@ -661,8 +542,77 @@ def handle_alias(self, tuple): } result = self.db.execute(self.contributors_aliases_table.insert().values(alias_tuple)) self.results_counter += 1 - logging.info("Inserted alias with email: {}\n".format(commit_email)) + self.logger.info("Inserted alias with email: {}\n".format(commit_email)) if len(existing_tuples) > 1: - logging.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the alias " + self.logger.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the alias " "table AND NEED TO ADD DELETION LOGIC: {}\n".format(existing_tuples)) + def map_new_id(self, dupe_ids, new_id): + alias_update_col = {'cntrb_a_id': new_id} + update_col = {'cntrb_id': new_id} + reporter_col = {'reporter_id': new_id} + pr_assignee_col = {'contrib_id': new_id} + pr_repo_col = {'pr_cntrb_id': new_id} + + # def delete_fk(table, column): + + # tables_with_fk = { + # 'contributors_aliases_table': ['cntrb_a_id', alias_update_col], + # 'issue_events_table':, + # 'pull_request_events_table', + # 'issues_table', + # 'issues_table' + # } + + try: + cntrb_alias_result = self.db.execute(self.contributors_aliases_table.update().where( + self.contributors_aliases_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) + + alias_result = self.db.execute(self.contributors_aliases_table.update().where( + self.contributors_aliases_table.c.cntrb_a_id.in_(dupe_ids)).values(alias_update_col)) + self.logger.info("Updated cntrb_a_id column for tuples in the contributors_aliases table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + + except Exception as e: + self.logger.info(f'Alias re-map already done... error: {e}') + + issue_events_result = self.db.execute(self.issue_events_table.update().where( + self.issue_events_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) + self.logger.info("Updated cntrb_id column for tuples in the issue_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + + pr_events_result = self.db.execute(self.pull_request_events_table.update().where( + self.pull_request_events_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) + self.logger.info("Updated cntrb_id column for tuples in the pull_request_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + + issues_cntrb_result = self.db.execute(self.issues_table.update().where( + self.issues_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) + self.logger.info("Updated cntrb_id column for tuples in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + + issues_reporter_result = self.db.execute(self.issues_table.update().where( + self.issues_table.c.reporter_id.in_(dupe_ids)).values(reporter_col)) + self.logger.info("Updated reporter_id column in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + + issue_assignee_result = self.db.execute(self.issue_assignees_table.update().where( + self.issue_assignees_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) + self.logger.info("Updated cntrb_id column for tuple in the issue_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + + pr_assignee_result = self.db.execute(self.pull_request_assignees_table.update().where( + self.pull_request_assignees_table.c.contrib_id.in_(dupe_ids)).values(pr_assignee_col)) + self.logger.info("Updated contrib_id column for tuple in the pull_request_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + + message_result = self.db.execute(self.message_table.update().where( + self.message_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) + self.logger.info("Updated cntrb_id column for tuple in the message table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + + pr_reviewers_result = self.db.execute(self.pull_request_reviewers_table.update().where( + self.pull_request_reviewers_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_reviewers table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + + pr_meta_result = self.db.execute(self.pull_request_meta_table.update().where( + self.pull_request_meta_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_meta table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + + pr_repo_result = self.db.execute(self.pull_request_repo_table.update().where( + self.pull_request_repo_table.c.pr_cntrb_id.in_(dupe_ids)).values(pr_repo_col)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_repo table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + + self.logger.info('Done mapping new id.\n') diff --git a/workers/contributor_worker/contributor_worker/__init__.py b/workers/contributor_worker/contributor_worker/__init__.py deleted file mode 100644 --- a/workers/contributor_worker/contributor_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.0.1' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/contributor_worker/contributor_worker/runtime.py b/workers/contributor_worker/contributor_worker/runtime.py deleted file mode 100644 --- a/workers/contributor_worker/contributor_worker/runtime.py +++ /dev/null @@ -1,110 +0,0 @@ -from flask import Flask, jsonify, request -from contributor_worker.worker import ContributorWorker -import click, os, json, logging, requests -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}\n".format(str(request.json))) - app.contributor_worker.task = request.json - - #set task - return jsonify({"success": "sucess"}) - - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": app.contributor_worker._queue, - "tasks": [{ - "given": list(app.contributor_worker._queue) - }] - }) - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.contributor_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51252, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'contributor_worker', None, {}) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.contributor_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "anomaly_days": worker_info['anomaly_days'] if 'anomaly_days' in worker_info else 2, - "training_days": worker_info['training_days'] if 'training_days' in worker_info else 365, - "confidence_interval": worker_info['confidence_interval'] if 'confidence_interval' in worker_info else .95, - "contamination": worker_info['contamination'] if 'contamination' in worker_info else 0.041, - 'metrics': worker_info['metrics'] if 'metrics' in worker_info else {"issues-new": "issues", - "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"} - } - - #create instance of the worker - app.contributor_worker = ContributorWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - print("Starting Flask App on host {} with port {} with pid: ".format(broker_host, worker_port) + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - print("Killing Flask App: {} and telling broker that this worker is disconnected.".format(str(os.getpid()))) - try: - logging.info("Sending disconnected message to broker... @ -> {} with info: {}\n".format('http://{}:{}/api/unstable/workers'.format( - config['broker_host'], config['broker_port']), config)) - requests.post('http://{}:{}/api/unstable/workers/remove'.format( - config['broker_host'], config['broker_port']), json=config) #hello message - except Exception as e: - logging.info("Ran into error: {}".format(e)) - logging.info("Broker's port is busy, worker will not be able to accept tasks, " - "please restart Augur if you want this worker to attempt connection again.") - diff --git a/workers/contributor_worker/runtime.py b/workers/contributor_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/contributor_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.contributor_worker.contributor_worker import ContributorWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ContributorWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/contributor_worker/setup.py b/workers/contributor_worker/setup.py --- a/workers/contributor_worker/setup.py +++ b/workers/contributor_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="contributor_worker", - version="0.0.2", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -31,7 +31,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'contributor_worker_start=contributor_worker.runtime:main', + 'contributor_worker_start=workers.contributor_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/facade_worker/facade_worker/facade00mainprogram.py b/workers/facade_worker/facade_worker/facade00mainprogram.py --- a/workers/facade_worker/facade_worker/facade00mainprogram.py +++ b/workers/facade_worker/facade_worker/facade00mainprogram.py @@ -26,20 +26,8 @@ # repos. It also rebuilds analysis data, checks any changed affiliations and # aliases, and caches data for display. -import pymysql -import sys -import platform -import imp -import time -import datetime -import html.parser -import subprocess -import os -import getopt -import xlsxwriter -import configparser +import pymysql, sys, platform, imp, time, datetime, html.parser, subprocess, os, getopt, xlsxwriter, configparser, logging from multiprocessing import Process, Queue - from facade_worker.facade01config import Config#increment_db, update_db, migrate_database_config, database_connection, get_setting, update_status, log_activity from facade_worker.facade02utilitymethods import update_repo_log, trim_commit, store_working_author, trim_author from facade_worker.facade03analyzecommit import analyze_commit @@ -48,55 +36,49 @@ from facade_worker.facade06analyze import analysis from facade_worker.facade07rebuildcache import nuke_affiliations, fill_empty_affiliations, invalidate_caches, rebuild_unknown_affiliation_and_web_caches -from workers.standard_methods import read_config +from workers.util import read_config +from workers.worker_base import Worker + +html = html.parser.HTMLParser() -import logging +class FacadeWorker(Worker): + def __init__(self, config={}, task=None): + worker_type = "facade_worker" -# if platform.python_implementation() == 'PyPy': -# import pymysql -# else: -# import MySQLdb -# ## End Imports + # Define what this worker can be given and know how to interpret + given = [['repo_group']] + models = ['commits'] -html = html.parser.HTMLParser() + # Define the tables needed to insert, update, or delete on + data_tables = [] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Facade-specific config + self.cfg = Config(self.logger) + + # Define data collection info + # self.tool_source = 'Facade Worker' + # self.tool_version = '1.0.0' + # self.data_source = 'Git Log' -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class FacadeWorker: - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(os.getpid())) - - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.cfg = Config() - - ### The real program starts here ### + self.tool_source = '\'Facade Worker\'' + self.tool_version = '\'1.0.1\'' + self.data_source = '\'Git Log\'' + + def initialize_database_connections(self): # Set up the database - db_user = self.config['user'] - db_pass = self.config['password'] - db_name = self.config['database'] - db_host = self.config['host'] - db_port = self.config['port'] - db_user_people = self.config['user'] - db_pass_people = self.config['password'] - db_name_people = self.config['database'] - db_host_people = self.config['host'] - db_port_people = self.config['port'] + db_user = self.config['user_database'] + db_pass = self.config['password_database'] + db_name = self.config['name_database'] + db_host = self.config['host_database'] + db_port = self.config['port_database'] # Open a general-purpose connection - db,cursor = self.cfg.database_connection( + self.db, self.cursor = self.cfg.database_connection( db_host, db_user, db_pass, @@ -104,157 +86,68 @@ def __init__(self, config, task=None): db_port, False, False) # Open a connection for the people database - db_people,cursor_people = self.cfg.database_connection( - db_host_people, - db_user_people, - db_pass_people, - db_name_people, - db_port_people, True, False) + self.db_people,self.cursor_people = self.cfg.database_connection( + db_host, + db_user, + db_pass, + db_name, + db_port, True, False) # Check if the database is current and update it if necessary try: - current_db = int(self.cfg.get_setting('database_version')) + self.current_db = int(self.cfg.get_setting('database_version')) except: # Catch databases which existed before database versioning - current_db = -1 - - #WHAT IS THE UPSTREAM_DB??? - # if current_db < upstream_db: - - # print(("Current database version: %s\nUpstream database version %s\n" % - # (current_db, upstream_db))) - - # self.cfg.update_db(current_db); + self.current_db = -1 - self.commit_model() - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - rg_id = value['given']['repo_group_id'] - - """ Query all repos """ - # repoUrlSQL = s.sql.text(""" - # SELECT repo_id,repo_group_id,repo_git FROM repo WHERE repo_group_id = '{}' - # """.format(rg_id)) - # rs = pd.read_sql(repoUrlSQL, self.db, params={}) - try: - if value['job_type'] == "UPDATE": - self._queue.put(CollectorTask(message_type='TASK', entry_info=value)) - elif value['job_type'] == "MAINTAIN": - self._maintain_queue.put(CollectorTask(message_type='TASK', entry_info=value)) - - except Exception as e: - logging.info("error: {}".format(e)) - - self._task = CollectorTask(message_type='TASK', entry_info={"task": value, "repo_id": repo_id}) - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...") - if self._child is None: - self._child = Process(target=self.collect, args=()) - self._child.start() - def collect(self): """ Function to process each entry in the worker's task queue Determines what action to take based off the message type """ + self.initialize_logging() # need to initialize logging again in child process cause multiprocessing + self.logger.info("Starting data collection process\n") + self.initialize_database_connections() while True: - time.sleep(0.5) if not self._queue.empty(): - message = self._queue.get() - logging.info("Popped off message: {}".format(message.entry_info)) - self.working_on = "UPDATE" + message = self._queue.get() # Get the task off our MP queue else: - if not self._maintain_queue.empty(): - message = self._maintain_queue.get() - logging.info("Popped off message: {}".format(message.entry_info)) - self.working_on = "MAINTAIN" - else: - break - - if message.type == 'EXIT': + break + self.logger.info("Popped off message: {}\n".format(str(message))) + + if message['job_type'] == 'STOP': break - if message.type != 'TASK': - raise ValueError(f'{message.type} is not a recognized task type') - - if message.type == 'TASK': - try: - git_url = message.entry_info['task']['given']['git_url'] - self.query_issues({'git_url': git_url, 'repo_id': message.entry_info['repo_id']}) - except Exception as e: - logging.info("Worker ran into an error for task: {}\n".format(message.entry_info['task'])) - logging.info("Error encountered: " + repr(e) + "\n") - logging.info("Notifying broker and logging task failure in database...\n") - message.entry_info['task']['worker_id'] = self.config['id'] - requests.post("http://{}:{}/api/unstable/task_error".format( - self.config['broker_host'],self.config['broker_port']), json=message.entry_info['task']) - # Add to history table - task_history = { - "repo_id": message.entry_info['repo_id'], - "worker": self.config['id'], - "job_model": message.entry_info['task']['models'][0], - "oauth_id": self.config['zombie_id'], - "timestamp": datetime.datetime.now(), - "status": "Error", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job error for: " + str(message.entry_info['task']) + "\n") - - # Update job process table - updated_job = { - "since_id_str": message.entry_info['repo_id'], - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==message.entry_info['task']['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + message.entry_info['task']['models'][0] + "\n") - - # Reset results counter for next task - self.results_counter = 0 - pass - - def commit_model(self): + # If task is not a valid job type + if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': + raise ValueError('{} is not a recognized task type'.format(message['job_type'])) + pass + try: + self.commits_model(message) + except Exception as e: + self.logger.error(e) + raise(e) + break + + def commits_model(self, message): # Figure out what we need to do - limited_run = read_config("Facade", name="limited_run", default=0) - delete_marked_repos = read_config("Facade", name="delete_marked_repos", default=0) - pull_repos = read_config("Facade", name="pull_repos", default=0) - clone_repos = read_config("Facade", name="clone_repos", default=1) - check_updates = read_config("Facade", name="check_updates", default=0) - force_updates = read_config("Facade", name="force_updates", default=0) - run_analysis = read_config("Facade", name="run_analysis", default=0) - force_analysis = read_config("Facade", name="force_analysis", default=0) - nuke_stored_affiliations = read_config("Facade", name="nuke_stored_affiliations", default=0) - fix_affiliations = read_config("Facade", name="fix_affiliations", default=1) - force_invalidate_caches = read_config("Facade", name="force_invalidate_caches", default=0) - rebuild_caches = read_config("Facade", name="rebuild_caches", default=1) #if abs((datetime.datetime.strptime(self.cfg.get_setting('aliases_processed')[:-3], + limited_run = self.augur_config.get_value("Facade", "limited_run") + delete_marked_repos = self.augur_config.get_value("Facade", "delete_marked_repos") + pull_repos = self.augur_config.get_value("Facade", "pull_repos") + clone_repos = self.augur_config.get_value("Facade", "clone_repos") + check_updates = self.augur_config.get_value("Facade", "check_updates") + force_updates = self.augur_config.get_value("Facade", "force_updates") + run_analysis = self.augur_config.get_value("Facade", "run_analysis") + force_analysis = self.augur_config.get_value("Facade", "force_analysis") + nuke_stored_affiliations = self.augur_config.get_value("Facade", "nuke_stored_affiliations") + fix_affiliations = self.augur_config.get_value("Facade", "fix_affiliations") + force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches") + rebuild_caches = self.augur_config.get_value("Facade", "rebuild_caches") #if abs((datetime.datetime.strptime(self.cfg.get_setting('aliases_processed')[:-3], # '%Y-%m-%d %I:%M:%S.%f') - datetime.datetime.now()).total_seconds()) // 3600 > int(self.cfg.get_setting( # 'update_frequency')) else 0 - force_invalidate_caches = read_config("Facade", name="force_invalidate_caches", default=0) - create_xlsx_summary_files = read_config("Facade", name="create_xlsx_summary_files", default=0) - multithreaded = read_config("Facade", name="multithreaded", default=1) + force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches") + create_xlsx_summary_files = self.augur_config.get_value("Facade", "create_xlsx_summary_files") + multithreaded = self.augur_config.get_value("Facade", "multithreaded") opts,args = getopt.getopt(sys.argv[1:],'hdpcuUaAmnfIrx') for opt in opts: @@ -355,9 +248,9 @@ def commit_model(self): if len(repo_base_directory) == 0: self.cfg.log_activity('Error','No base directory. It is unsafe to continue.') - update_status('Failed: No base directory') + self.cfg.update_status('Failed: No base directory') sys.exit(1) - + # Begin working start_time = time.time() diff --git a/workers/facade_worker/facade_worker/facade01config.py b/workers/facade_worker/facade_worker/facade01config.py --- a/workers/facade_worker/facade_worker/facade01config.py +++ b/workers/facade_worker/facade_worker/facade01config.py @@ -39,15 +39,15 @@ import json import logging -from workers.standard_methods import read_config - +from workers.util import read_config class Config: - def __init__(self): + def __init__(self, logger): self.upstream_db = 7 self.cursor = None self.cursor_people = None + self.logger = logger self.db = None self.db_people = None @@ -60,9 +60,14 @@ def __init__(self): " in your \'Workers\' -> \'facade_worker\' object in your config " "to the directory in which you want to clone repos. Exiting...") sys.exit(1) - self.tool_source = '\'FacadeAugur\'' - self.tool_version = '\'0.0.1\'' - self.data_source = '\'git_repository\'' + + # self.tool_source = 'Facade Worker' + # self.tool_version = '1.0.0' + # self.data_source = 'Git Log' + + self.tool_source = '\'Facade Worker\'' + self.tool_version = '\'1.0.1\'' + self.data_source = '\'Git Log\'' # Figure out how much we're going to log logging.basicConfig(filename='worker_{}.log'.format(worker_options['port']), filemode='w', level=logging.INFO) @@ -199,7 +204,7 @@ def log_activity(self, level, status): # "Debug", then just print it and don't save it in the database. log_options = ('Error','Quiet','Info','Verbose','Debug') - logging.info("* %s\n" % status) + self.logger.info("* %s\n" % status) if self.log_level == 'Debug' and level == 'Debug': return @@ -209,7 +214,7 @@ def log_activity(self, level, status): self.cursor.execute(query, (level, status)) self.db.commit() except Exception as e: - logging.info('Error encountered: {}\n'.format(e)) + self.logger.info('Error encountered: {}\n'.format(e)) # Set up the database db_user = read_config('Database', 'user', 'AUGUR_DB_USER', 'augur') diff --git a/workers/facade_worker/facade_worker/facade03analyzecommit.py b/workers/facade_worker/facade_worker/facade03analyzecommit.py --- a/workers/facade_worker/facade_worker/facade03analyzecommit.py +++ b/workers/facade_worker/facade_worker/facade03analyzecommit.py @@ -38,7 +38,7 @@ import configparser import traceback -from workers.standard_methods import read_config +from workers.util import read_config def analyze_commit(cfg, repo_id, repo_loc, commit, multithreaded): diff --git a/workers/facade_worker/facade_worker/facade07rebuildcache.py b/workers/facade_worker/facade_worker/facade07rebuildcache.py --- a/workers/facade_worker/facade_worker/facade07rebuildcache.py +++ b/workers/facade_worker/facade_worker/facade07rebuildcache.py @@ -156,7 +156,6 @@ def discover_null_affiliations(attribution,email): cfg.log_activity('Debug','Found domain match for %s' % email) - # try: for match in matches: update = ("UPDATE commits " "SET cmt_%s_affiliation = %%s " @@ -164,7 +163,6 @@ def discover_null_affiliations(attribution,email): "AND cmt_%s_affiliation IS NULL " "AND cmt_%s_date::date >= %%s::date" % (attribution, attribution, attribution, attribution)) - #"AND cmt_%s_date >= TO_TIMESTAMP(%%s, 'YYYY-MM-DD')" % cfg.log_activity('Info', 'attr: {} \nmatch:{}\nsql: {}'.format(attribution, match, update)) @@ -175,15 +173,6 @@ def discover_null_affiliations(attribution,email): cfg.log_activity('Info', 'Error encountered: {}'.format(e)) cfg.log_activity('Info', 'Affiliation insertion failed for %s ' % email) - # except Exception as e: - # cfg.log_activity('Info', '1st Error encountered: {}'.format(e)) - # cfg.log_activity('Info', 'Attribution matching failed for %s ' % email) - # except Exception as e: - # logging.info('2nd Error encountered: {}'.format(e)) - # cfg.log_activity('Info', 'Attribution matching failed and exception logging failed') - # else: - # cfg.log_activity('Info', 'Attribution matching failed and exception logging failed and the exception to the exception failed.') - def discover_alias(email): # Match aliases with their canonical email diff --git a/workers/facade_worker/facade_worker/runtime.py b/workers/facade_worker/facade_worker/runtime.py --- a/workers/facade_worker/facade_worker/runtime.py +++ b/workers/facade_worker/facade_worker/runtime.py @@ -1,102 +1,23 @@ from flask import Flask, jsonify, request, Response import click, os, json, requests, logging -from facade_worker.facade00mainprogram import FacadeWorker -from workers.standard_methods import read_config +from workers.facade_worker.facade_worker.facade00mainprogram import FacadeWorker +from workers.util import create_server, WorkerGunicornApplication -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(request.json)) - app.facade_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.facade_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51258, help='Port') -def main(augur_url, host, port): +def main(): """ Declares singular worker and creates the server and flask app that it will be running on """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'facade_worker', None, None) - worker_port = worker_info['port'] if 'port' in worker_info else port - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - config = { - "id": "com.augurlabs.core.facade_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } + app = Flask(__name__) + app.worker = FacadeWorker() - #create instance of the worker - app.facade_worker = FacadeWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") + create_server(app) + WorkerGunicornApplication(app).run() - app.run(debug=app.debug, host=host, port=worker_port) + if app.worker._child is not None: + app.worker._child.terminate() try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) except: pass - logging.info("Killing Flask App: " + str(os.getpid())) + os.kill(os.getpid(), 9) - diff --git a/workers/github_worker/github_worker/worker.py b/workers/github_worker/github_worker.py similarity index 56% rename from workers/github_worker/github_worker/worker.py rename to workers/github_worker/github_worker.py --- a/workers/github_worker/github_worker/worker.py +++ b/workers/github_worker/github_worker.py @@ -2,217 +2,61 @@ from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData import requests, time, logging, json, os from datetime import datetime -from sqlalchemy.ext.declarative import declarative_base -from workers.standard_methods import * +from workers.worker_base import Worker -class GitHubWorker: +class GitHubWorker(Worker): """ Worker that collects data from the Github API and stores it in our database task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - - self._task = task # task currently being worked on (dict) - self._child = None # process of currently running task (multiprocessing process) - self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes) - self.db = None # sql alchemy db session + def __init__(self, config={}): - # These 3 are included in every tuple the worker inserts (data collection info) - self.tool_source = 'GitHub API Worker' - self.tool_version = '0.0.3' # See __init__.py - self.data_source = 'GitHub API' - - self.results_counter = 0 # count of tuples inserted in the database (to store stats for each task in op tables) - self.finishing_task = True # if we are finishing a previous task, pagination works differenty - - self.specs = { - "id": self.config['id'], # what the broker knows this worker as - "location": self.config['location'], # host + port worker is running on (so broker can send tasks here) - "qualifications": [ - { - "given": [["github_url"]], # type of repo this worker can be given as a task - "models":["issues"] # models this worker can fill for a repo as a task - } - ], - "config": [self.config] - } - - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - # Create an sqlalchemy engine for both database schemas - logging.info("Making database connections... {}".format(DB_STR)) - db_schema = 'augur_data' - self.db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(db_schema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) + worker_type = 'github_worker' - metadata = MetaData() - helper_metadata = MetaData() + given = [['github_url']] + models = ['issues'] - # Reflect only the tables we will use for each schema's metadata object - metadata.reflect(self.db, only=['contributors', 'issues', 'issue_labels', 'message', + data_tables = ['contributors', 'issues', 'issue_labels', 'message', 'issue_message_ref', 'issue_events','issue_assignees','contributors_aliases', 'pull_request_assignees', 'pull_request_events', 'pull_request_reviewers', 'pull_request_meta', - 'pull_request_repo']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - # So we can access all our tables when inserting, updating, etc - self.contributors_table = Base.classes.contributors.__table__ - self.issues_table = Base.classes.issues.__table__ - self.issue_labels_table = Base.classes.issue_labels.__table__ - self.issue_events_table = Base.classes.issue_events.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.message_table = Base.classes.message.__table__ - self.issues_message_ref_table = Base.classes.issue_message_ref.__table__ - self.issue_assignees_table = Base.classes.issue_assignees.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.contributors_aliases_table = Base.classes.contributors_aliases.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ + 'pull_request_repo'] + operations_tables = ['worker_history', 'worker_job'] - # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's - logging.info("Querying starting ids info...\n") - - self.issue_id_inc = get_max_id(self, 'issues', 'issue_id') - - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - - self.msg_id_inc = get_max_id(self, 'message', 'msg_id') - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) + # These 3 are included in every tuple the worker inserts (data collection info) + self.tool_source = 'GitHub API Worker' + self.tool_version = '1.0.0' + self.data_source = 'GitHub API' - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5433/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - # If the task has one of our "valid" job types - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - # Setting that causes paginating through ALL pages, not just unknown ones - # This setting is set by the housekeeper and is attached to the task before it gets sent here - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - - self._task = value - self.run() + self.finishing_task = True # if we are finishing a previous task, pagination works differenty + self.platform_id = 25150 # GitHub - def cancel(self): - """ Delete/cancel current task - """ - self._task = None + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - # Spawn a subprocess to handle message reading and performing the tasks - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['github_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'issues': - self.issues_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass def issues_model(self, entry_info, repo_id): """ Data collection function Query the GitHub API for issues """ + + # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's + self.logger.info("Querying starting ids info...\n") + + self.issue_id_inc = self.get_max_id('issues', 'issue_id') + + self.msg_id_inc = self.get_max_id('message', 'msg_id') github_url = entry_info['given']['github_url'] - logging.info("Beginning filling the issues model for repo: " + github_url + "\n") - record_model_process(self, repo_id, 'issues') + self.logger.info("Beginning filling the issues model for repo: " + github_url + "\n") # Contributors are part of this model, and finding all for the repo saves us # from having to add them as we discover committers in the issue process - query_github_contributors(self, entry_info, repo_id) + self.query_github_contributors(entry_info, repo_id) # Extract the owner/repo for the endpoint path = urlparse(github_url) @@ -238,14 +82,14 @@ def issues_model(self, entry_info, repo_id): duplicate_col_map = {'gh_issue_id': 'id'} #list to hold issues needing insertion - issues = paginate(self, issues_url, duplicate_col_map, update_col_map, table, table_pkey, + issues = self.paginate(issues_url, duplicate_col_map, update_col_map, table, table_pkey, 'WHERE repo_id = {}'.format(repo_id)) - + self.logger.info(issues) # Discover and remove duplicates before we start inserting - logging.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") + self.logger.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") for issue_dict in issues: - logging.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") + self.logger.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") # Add the FK repo_id to the dict being inserted issue_dict['repo_id'] = repo_id @@ -253,17 +97,17 @@ def issues_model(self, entry_info, repo_id): # Figure out if this issue is a PR # still unsure about this key value pair/what it means pr_id = None - if "pull_request" in issue_dict: - logging.info("Issue is a PR\n") + if 'pull_request' in issue_dict: + self.logger.info("Issue is a PR\n") # Right now we are just storing our issue id as the PR id if it is one pr_id = self.issue_id_inc else: - logging.info("Issue is not a PR\n") + self.logger.info("Issue is not a PR\n") # Begin on the actual issue... issue = { "repo_id": issue_dict['repo_id'], - "reporter_id": find_id_from_login(self, issue_dict['user']['login']), + "reporter_id": self.find_id_from_login(issue_dict['user']['login']), "pull_request": pr_id, "pull_request_id": pr_id, "created_at": issue_dict['created_at'], @@ -292,20 +136,20 @@ def issues_model(self, entry_info, repo_id): if issue_dict['flag'] == 'need_update': result = self.db.execute(self.issues_table.update().where( self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) - logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( + self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( issue_dict['id'])) self.issue_id_inc = issue_dict['pkey'] elif issue_dict['flag'] == 'need_insertion': try: result = self.db.execute(self.issues_table.insert().values(issue)) - logging.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) self.results_counter += 1 self.issue_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + + self.logger.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + " and title of: {} and gh_issue_num of: {}\n".format(issue_dict['title'],issue_dict['number'])) except Exception as e: - logging.info("When inserting an issue, ran into the following error: {}\n".format(e)) - logging.info(issue) + self.logger.info("When inserting an issue, ran into the following error: {}\n".format(e)) + self.logger.info(issue) continue # Check if the assignee key's value is already recorded in the assignees key's value @@ -316,13 +160,13 @@ def issues_model(self, entry_info, repo_id): # Handles case if there are no assignees if collected_assignees[0] is not None: - logging.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") + self.logger.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") for assignee_dict in collected_assignees: if type(assignee_dict) != dict: continue assignee = { "issue_id": self.issue_id_inc, - "cntrb_id": find_id_from_login(self, assignee_dict['login']), + "cntrb_id": self.find_id_from_login(assignee_dict['login']), "tool_source": self.tool_source, "tool_version": self.tool_version, "data_source": self.data_source, @@ -331,13 +175,13 @@ def issues_model(self, entry_info, repo_id): } # Commit insertion to the assignee table result = self.db.execute(self.issue_assignees_table.insert().values(assignee)) - logging.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + + self.logger.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + " with login/cntrb_id: " + assignee_dict['login'] + " " + str(assignee['cntrb_id']) + "\n") else: - logging.info("Issue does not have any assignees\n") + self.logger.info("Issue does not have any assignees\n") # Insert the issue labels to the issue_labels table for label_dict in issue_dict['labels']: @@ -357,10 +201,10 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issue_labels_table.insert().values(label)) - logging.info("Primary key inserted into the issue_labels table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issue_labels table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted issue label with text: " + label_dict['name'] + "\n") + self.logger.info("Inserted issue label with text: " + label_dict['name'] + "\n") #### Messages/comments and events insertion @@ -375,19 +219,19 @@ def issues_model(self, entry_info, repo_id): duplicate_col_map = {'msg_timestamp': 'created_at'} #list to hold contributors needing insertion or update - issue_comments = paginate(self, comments_url, duplicate_col_map, update_col_map, table, table_pkey, + issue_comments = self.paginate(comments_url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="WHERE msg_id IN (SELECT msg_id FROM issue_message_ref WHERE issue_id = {})".format( self.issue_id_inc)) - logging.info("Number of comments needing insertion: {}\n".format(len(issue_comments))) + self.logger.info("Number of comments needing insertion: {}\n".format(len(issue_comments))) for comment in issue_comments: try: - commenter_cntrb_id = find_id_from_login(self, comment['user']['login']) + commenter_cntrb_id = self.find_id_from_login(comment['user']['login']) except: commenter_cntrb_id = None issue_comment = { - "pltfrm_id": 25150, + "pltfrm_id": self.platform_id, "msg_text": comment['body'], "msg_timestamp": comment['created_at'], "cntrb_id": commenter_cntrb_id, @@ -397,13 +241,13 @@ def issues_model(self, entry_info, repo_id): } try: result = self.db.execute(self.message_table.insert().values(issue_comment)) - logging.info("Primary key inserted into the message table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the message table: {}".format(result.inserted_primary_key)) self.results_counter += 1 self.msg_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted issue comment with id: {}\n".format(self.msg_id_inc)) + self.logger.info("Inserted issue comment with id: {}\n".format(self.msg_id_inc)) except Exception as e: - logging.info("Worker ran into error when inserting a message, likely had invalid characters. error: {}".format(e)) + self.logger.info("Worker ran into error when inserting a message, likely had invalid characters. error: {}".format(e)) ### ISSUE MESSAGE REF TABLE ### @@ -417,8 +261,8 @@ def issues_model(self, entry_info, repo_id): "issue_msg_ref_src_node_id": comment['node_id'] } - result = self.db.execute(self.issues_message_ref_table.insert().values(issue_message_ref)) - logging.info("Primary key inserted into the issue_message_ref table: {}".format(result.inserted_primary_key)) + result = self.db.execute(self.issue_message_ref_table.insert().values(issue_message_ref)) + self.logger.info("Primary key inserted into the issue_message_ref table: {}".format(result.inserted_primary_key)) self.results_counter += 1 # Base of the url for event endpoints @@ -434,7 +278,7 @@ def issues_model(self, entry_info, repo_id): pseudo_key_gh = 'url' pseudo_key_augur = 'node_url' table = 'issue_events' - event_table_values = get_table_values(self, [pseudo_key_augur], [table], "WHERE issue_id = {}".format(self.issue_id_inc)) + event_table_values = self.get_table_values([pseudo_key_augur], [table], "WHERE issue_id = {}".format(self.issue_id_inc)) # Paginate backwards through all the events but get first page in order # to determine if there are multiple pages and if the 1st page covers all @@ -442,29 +286,29 @@ def issues_model(self, entry_info, repo_id): multiple_pages = False while True: - logging.info("Hitting endpoint: " + events_url.format(i) + " ...\n") + self.logger.info("Hitting endpoint: " + events_url.format(i) + " ...\n") r = requests.get(url=events_url.format(i), headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) # Find last page so we can decrement from there if 'last' in r.links and not multiple_pages and not self.finishing_task: param = r.links['last']['url'][-6:] i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") + self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n") multiple_pages = True elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") + self.logger.info("Only 1 page of request\n") elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ... " + self.logger.info("Finishing a previous task, paginating forwards ... " "excess rate limit requests will be made\n") j = r.json() # Checking contents of requests with what we already have in the db - new_events = check_duplicates(j, event_table_values, pseudo_key_gh) + new_events = self.check_duplicates(j, event_table_values, pseudo_key_gh) if len(new_events) == 0 and multiple_pages and 'last' in r.links: if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown events, breaking from pagination.\n") + self.logger.info("No more pages with unknown events, breaking from pagination.\n") break elif len(new_events) != 0: to_add = [obj for obj in new_events if obj not in issue_events] @@ -474,29 +318,29 @@ def issues_model(self, entry_info, repo_id): # Since we already wouldve checked the first page... break if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") + self.logger.info("No more pages to check, breaking from pagination.\n") break - logging.info("Number of events needing insertion: " + str(len(issue_events)) + "\n") + self.logger.info("Number of events needing insertion: " + str(len(issue_events)) + "\n") # If the issue is closed, then we search for the closing event and store the user's id cntrb_id = None if 'closed_at' in issue_dict: for event in issue_events: if str(event['event']) != "closed": - logging.info("not closed, continuing") + self.logger.info("not closed, continuing") continue if not event['actor']: continue - cntrb_id = find_id_from_login(self, event['actor']['login']) + cntrb_id = self.find_id_from_login(event['actor']['login']) if cntrb_id is not None: break # Need to hit this single contributor endpoint to get extra created at data... cntrb_url = ("https://api.github.com/users/" + event['actor']['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) contributor = r.json() company = None @@ -543,20 +387,17 @@ def issues_model(self, entry_info, repo_id): # Commit insertion to table result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format( + self.logger.info("Primary key inserted into the contributors table: {}".format( result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - # Increment our global track of the cntrb id for the possibility of it being used as a FK - self.cntrb_id_inc = int(result.inserted_primary_key[0]) + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") for event in issue_events: if event['actor'] is not None: - event['cntrb_id'] = find_id_from_login(self, event['actor']['login']) + event['cntrb_id'] = self.find_id_from_login(event['actor']['login']) if event['cntrb_id'] is None: - logging.info("SOMETHING WRONG WITH FINDING ID FROM LOGIN") + self.logger.info("SOMETHING WRONG WITH FINDING ID FROM LOGIN") continue # event['cntrb_id'] = None else: @@ -578,10 +419,10 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issue_events_table.insert().values(issue_event)) - logging.info("Primary key inserted into the issue_events table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issue_events table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted issue event: " + event['event'] + " for issue id: {}\n".format(self.issue_id_inc)) + self.logger.info("Inserted issue event: " + event['event'] + " for issue id: {}\n".format(self.issue_id_inc)) if cntrb_id is not None: update_closing_cntrb = { @@ -589,11 +430,11 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issues_table.update().where( self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) - logging.info("Updated tuple in the issues table with contributor that closed it, issue_id: {}\n".format( + self.logger.info("Updated tuple in the issues table with contributor that closed it, issue_id: {}\n".format( issue_dict['id'])) self.issue_id_inc += 1 #Register this task as completed - register_task_completion(self, entry_info, repo_id, "issues") + self.register_task_completion(entry_info, repo_id, "issues") diff --git a/workers/github_worker/github_worker/__init__.py b/workers/github_worker/github_worker/__init__.py deleted file mode 100644 --- a/workers/github_worker/github_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.1.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/github_worker/github_worker/runtime.py b/workers/github_worker/github_worker/runtime.py deleted file mode 100644 --- a/workers/github_worker/github_worker/runtime.py +++ /dev/null @@ -1,107 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from github_worker.worker import GitHubWorker -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.github_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.github_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51236, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'github_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: # for multiple instances of workers - try: # trying each port for an already-alive worker until a free port is found - print("New github worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.github_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - app.github_worker = GitHubWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.github_worker._child is not None: - app.github_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/github_worker/runtime.py b/workers/github_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/github_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.github_worker.github_worker import GitHubWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitHubWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/github_worker/setup.py b/workers/github_worker/setup.py --- a/workers/github_worker/setup.py +++ b/workers/github_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="github_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'github_worker_start=github_worker.runtime:main', + 'github_worker_start=workers.github_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/repo_info_worker/repo_info_worker/__init__.py b/workers/gitlab_issues_worker/__init__.py similarity index 50% rename from workers/repo_info_worker/repo_info_worker/__init__.py rename to workers/gitlab_issues_worker/__init__.py --- a/workers/repo_info_worker/repo_info_worker/__init__.py +++ b/workers/gitlab_issues_worker/__init__.py @@ -1,4 +1,4 @@ -"""gh_repo_info_worker - Augur Worker that collects GitHub Repo Info data""" +"""gitlab_issues_worker - Augur Worker that collects Gitlab Issue Info""" __version__ = '0.0.0' __author__ = 'Augur Team <[email protected]>' diff --git a/workers/gitlab_issues_worker/gitlab_issues_worker.py b/workers/gitlab_issues_worker/gitlab_issues_worker.py new file mode 100644 --- /dev/null +++ b/workers/gitlab_issues_worker/gitlab_issues_worker.py @@ -0,0 +1,193 @@ +import logging, os, sys, time, requests, json +from datetime import datetime +from multiprocessing import Process, Queue +import pandas as pd +import sqlalchemy as s +from workers.worker_base import Worker + + +class GitLabIssuesWorker(Worker): + def __init__(self, config={}): + + # Define what this worker can be given and know how to interpret + + # given is usually either [['github_url']] or [['git_url']] (depending if your + # worker is exclusive to repos that are on the GitHub platform) + worker_type = "gitlab_issues_worker" + given = [['git_url']] + + # The name the housekeeper/broker use to distinguish the data model this worker can fill + # You will also need to name the method that does the collection for this model + # in the format *model name*_model() such as fake_data_model() for example + models = ['gitlab_issues'] + + # Define the tables needed to insert, update, or delete on + # The Worker class will set each table you define here as an attribute + # so you can reference all of them like self.message_table or self.repo_table + data_tables = ['contributors', 'issues', 'issue_labels', 'message', 'repo', + 'issue_message_ref', 'issue_events','issue_assignees','contributors_aliases', + 'pull_request_assignees', 'pull_request_events', 'pull_request_reviewers', 'pull_request_meta', + 'pull_request_repo'] + # For most workers you will only need the worker_history and worker_job tables + # from the operations schema, these tables are to log worker task histories + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Request headers updation + + gitlab_api_key = self.augur_config.get_value("Database", "gitlab_api_key") + self.config.update({ + "gitlab_api_key": gitlab_api_key + }) + self.headers = {"PRIVATE-TOKEN" : self.config['gitlab_api_key']} + + + # Define data collection info + self.tool_source = 'Gitlab API Worker' + self.tool_version = '0.0.0' + self.data_source = 'GitLab API' + + + def gitlab_issues_model(self, task, repo_id): + """ This is just an example of a data collection method. All data collection + methods for all workers currently accept this format of parameters. If you + want to change these parameters, you can re-define the collect() method to + overwrite the Worker class' version of it (which is the method that calls + this method). + + :param task: the task generated by the housekeeper and sent to the broker which + was then sent to this worker. Takes the example dict format of: + { + 'job_type': 'MAINTAIN', + 'models': ['fake_data'], + 'display_name': 'fake_data model for url: https://github.com/vmware/vivace', + 'given': { + 'git_url': 'https://github.com/vmware/vivace' + } + } + :param repo_id: the collect() method queries the repo_id given the git/github url + and passes it along to make things easier. An int such as: 27869 + """ + + # Collection and insertion of data happens here + + # Collecting issue info from Gitlab API + self.issue_id_inc = self.get_max_id('issues', 'issue_id') + self.msg_id_inc = self.get_max_id('message', 'msg_id') + self.logger.info('Beginning the process of GitLab Issue Collection...'.format(str(os.getpid()))) + gitlab_base = 'https://gitlab.com/api/v4' + intermediate_url = '{}/projects/{}/issues?per_page=100&state=opened&'.format(gitlab_base, 18754962) + gitlab_issues_url = intermediate_url + "page={}" + + + # Get issues that we already have stored + # Set pseudo key (something other than PK) to + # check dupicates with + table = 'issues' + table_pkey = 'issue_id' + update_col_map = {'issue_state': 'state'} + duplicate_col_map = {'gh_issue_id': 'id'} + + #list to hold issues needing insertion + issues = self.paginate(gitlab_issues_url, duplicate_col_map, update_col_map, table, table_pkey, + 'WHERE repo_id = {}'.format(repo_id), platform="gitlab") + + self.logger.info(issues) + self.logger.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") + for issue_dict in issues: + self.logger.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") + pr_id = None + if "pull_request" in issue_dict: + self.logger.info("This is an MR\n") + # Right now we are just storing our issue id as the MR id if it is one + pr_id = self.issue_id_inc + else: + self.logger.info("Issue is not an MR\n") + + # Insert data into models + issue = { + "repo_id": issue_dict['project_id'], + "reporter_id": self.find_id_from_login(issue_dict['author']['username'], platform='gitlab'), + "pull_request": pr_id, + "pull_request_id": pr_id, + "created_at": issue_dict['created_at'], + "issue_title": issue_dict['title'], + "issue_body": issue_dict['description'] if 'description' in issue_dict else None, + "comment_count": issue_dict['user_notes_count'], + "updated_at": issue_dict['updated_at'], + "closed_at": issue_dict['closed_at'], + "repository_url": issue_dict['_links']['project'], + "issue_url": issue_dict['_links']['self'], + "labels_url": issue_dict['labels'], + "comments_url": issue_dict['_links']['notes'], + "events_url": None, + "html_url": issue_dict['_links']['self'], + "issue_state": issue_dict['state'], + "issue_node_id": None, + "gh_issue_id": issue_dict['id'], + "gh_issue_number": issue_dict['iid'], + "gh_user_id": issue_dict['author']['id'], + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + # Commit insertion to the issues table + if issue_dict['flag'] == 'need_update': + self.logger.info("UPDATE FLAG") + result = self.db.execute(self.issues_table.update().where( + self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) + self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( + issue_dict['id'])) + self.issue_id_inc = issue_dict['pkey'] + elif issue_dict['flag'] == 'need_insertion': + self.logger.info("INSERT FLAG") + try: + result = self.db.execute(self.issues_table.insert().values(issue)) + self.logger.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) + self.results_counter += 1 + self.issue_id_inc = int(result.inserted_primary_key[0]) + self.logger.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + + " and title of: {} and gh_issue_num of: {}\n".format(issue_dict['title'], issue_dict['iid'])) + except Exception as e: + self.logger.info("When inserting an issue, ran into the following error: {}\n".format(e)) + self.logger.info(issue) + # continue + + # issue_assigness + self.logger.info("assignees", issue_dict['assignees']) + collected_assignees = issue_dict['assignees'] + if issue_dict['assignee'] not in collected_assignees: + collected_assignees.append(issue_dict['assignee']) + if collected_assignees[0] is not None: + self.logger.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") + for assignee_dict in collected_assignees: + if type(assignee_dict) != dict: + continue + assignee = { + "issue_id": self.issue_id_inc, + "cntrb_id": self.find_id_from_login(assignee_dict['username'], platform='gitlab'), + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source, + "issue_assignee_src_id": assignee_dict['id'], + "issue_assignee_src_node": None + } + self.logger.info("assignee info", assignee) + # Commit insertion to the assignee table + result = self.db.execute(self.issue_assignees_table.insert().values(assignee)) + self.logger.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) + self.results_counter += 1 + + self.logger.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + + " with login/cntrb_id: " + assignee_dict['username'] + " " + str(assignee['cntrb_id']) + "\n") + else: + self.logger.info("Issue does not have any assignees\n") + + # Register this task as completed. + # This is a method of the worker class that is required to be called upon completion + # of any data collection model, this lets the broker know that this worker is ready + # for another task + self.register_task_completion(task, repo_id, 'gitlab_issues') + diff --git a/workers/gitlab_issues_worker/runtime.py b/workers/gitlab_issues_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/gitlab_issues_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.gitlab_issues_worker.gitlab_issues_worker import GitLabIssuesWorker +from workers.util import WorkerGunicornApplication, create_server + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitLabIssuesWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) \ No newline at end of file diff --git a/workers/gitlab_issues_worker/setup.py b/workers/gitlab_issues_worker/setup.py new file mode 100644 --- /dev/null +++ b/workers/gitlab_issues_worker/setup.py @@ -0,0 +1,41 @@ +import io +import os +import re + +from setuptools import find_packages +from setuptools import setup + +def read(filename): + filename = os.path.join(os.path.dirname(__file__), filename) + text_type = type(u"") + with io.open(filename, mode="r", encoding='utf-8') as fd: + return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) + +setup( + name="gitlab_issues_worker", + version="0.0.0", + url="https://github.com/chaoss/augur", + license='MIT', + author="Augur Team", + author_email="", + description="Gitlab Worker", + packages=find_packages(exclude=('tests',)), + install_requires=[ + 'flask', + 'requests', + 'psycopg2-binary', + 'click' + ], + entry_points={ + 'console_scripts': [ + 'gitlab_issues_worker_start=workers.gitlab_issues_worker.runtime:main', + ], + }, + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + ] +) diff --git a/workers/insight_worker/insight_worker/__init__.py b/workers/insight_worker/__init__.py similarity index 100% rename from workers/insight_worker/insight_worker/__init__.py rename to workers/insight_worker/__init__.py diff --git a/workers/insight_worker/insight_worker/worker.py b/workers/insight_worker/insight_worker.py similarity index 79% rename from workers/insight_worker/insight_worker/worker.py rename to workers/insight_worker/insight_worker.py --- a/workers/insight_worker/insight_worker/worker.py +++ b/workers/insight_worker/insight_worker.py @@ -10,179 +10,55 @@ import scipy.stats import datetime from sklearn.ensemble import IsolationForest -from workers.standard_methods import * #init_oauths, get_table_values, get_max_id, register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process, paginate +from workers.worker_base import Worker import warnings warnings.filterwarnings('ignore') -class InsightWorker: +class InsightWorker(Worker): """ Worker that detects anomalies on a select few of our metrics task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self._task = task - self._child = None - self._queue = Queue() - self.db = None + def __init__(self, config={}): + + worker_type = "insight_worker" + + given = [['git_url']] + models = ['insights'] + + data_tables = ['chaoss_metric_status', 'repo_insights', 'repo_insights_records'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({ + 'api_host': self.augur_config.get_value('Server', 'host'), + 'api_port': self.augur_config.get_value('Server', 'port') + }) + + # These 3 are included in every tuple the worker inserts (data collection info) self.tool_source = 'Insight Worker' - self.tool_version = '0.0.3' # See __init__.py + self.tool_version = '1.0.0' self.data_source = 'Augur API' + self.refresh = True self.send_insights = True - self.finishing_task = False self.anomaly_days = self.config['anomaly_days'] self.training_days = self.config['training_days'] self.contamination = self.config['contamination'] self.confidence = self.config['confidence_interval'] / 100 self.metrics = self.config['metrics'] - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["insights"] - } - ], - "config": [self.config] - } - - self.results_counter = 0 - - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - # produce our own MetaData object - metadata = MetaData() - helper_metadata = MetaData() - - # we can reflect it ourselves from a database, using options - # such as 'only' to limit what tables we look at... - metadata.reflect(self.db, only=['chaoss_metric_status', 'repo_insights', 'repo_insights_records']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job']) - - # we can then produce a set of mappings from this MetaData. - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - # calling prepare() just sets up mapped classes and relationships. - Base.prepare() - HelperBase.prepare() - - # mapped classes are ready - self.chaoss_metric_status_table = Base.classes['chaoss_metric_status'].__table__ - self.repo_insights_table = Base.classes['repo_insights'].__table__ - self.repo_insights_records_table = Base.classes['repo_insights_records'].__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5432/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'insights': - self.insights_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - def insights_model(self, entry_info, repo_id): logging.info("Discovering insights for task with entry info: {}\n".format(entry_info)) - record_model_process(self, repo_id, 'insights') """ Collect data """ base_url = 'http://{}:{}/api/unstable/repo-groups/9999/repos/{}/'.format( - self.config['broker_host'], self.config['broker_port'], repo_id) + self.config['api_host'], self.config['api_port'], repo_id) # Dataframe to hold all endpoint results # Subtract configurable amount of time @@ -218,7 +94,7 @@ def insights_model(self, entry_info, repo_id): # If none of the endpoints returned data if df.size == 0: logging.info("None of the provided endpoints provided data for this repository. Anomaly detection is 'done'.\n") - register_task_completion(self, entry_info, repo_id, "insights") + self.register_task_completion(entry_info, repo_id, "insights") return """ Deletion of old insights """ @@ -258,7 +134,7 @@ def insights_model(self, entry_info, repo_id): result = self.db.execute(delete_points_SQL, repo_id=repo_id, min_date=min_date) # get table values to check for dupes later on - insight_table_values = get_table_values(self, ['*'], ['repo_insights_records'], where_clause="WHERE repo_id = {}".format(repo_id)) + insight_table_values = self.get_table_values(['*'], ['repo_insights_records'], where_clause="WHERE repo_id = {}".format(repo_id)) to_model_columns = df.columns[0:len(self.metrics)+1] @@ -415,7 +291,7 @@ def classify_anomalies(df,metric): logging.info("error occurred while storing datapoint: {}\n".format(repr(e))) break - register_task_completion(self, entry_info, repo_id, "insights") + self.register_task_completion(entry_info, repo_id, "insights") def confidence_interval_insights(self, entry_info): """ Anomaly detection method based on confidence intervals @@ -423,7 +299,6 @@ def confidence_interval_insights(self, entry_info): # Update table of endpoints before we query them all logging.info("Discovering insights for task with entry info: {}".format(entry_info)) - record_model_process(self, repo_id, 'insights') # Set the endpoints we want to discover insights for endpoints = [{'cm_info': "issues-new"}, {'cm_info': "code-changes"}, {'cm_info': "code-changes-lines"}, @@ -445,10 +320,10 @@ def confidence_interval_insights(self, entry_info): # If we are discovering insights for a group vs repo, the base url will change if 'repo_group_id' in entry_info and 'repo_id' not in entry_info: base_url = 'http://{}:{}/api/unstable/repo-groups/{}/'.format( - self.config['broker_host'],self.config['broker_port'], entry_info['repo_group_id']) + self.config['api_host'],self.config['api_port'], entry_info['repo_group_id']) else: base_url = 'http://{}:{}/api/unstable/repo-groups/9999/repos/{}/'.format( - self.config['broker_host'],self.config['broker_port'], repo_id) + self.config['api_host'],self.config['api_port'], repo_id) # Hit and discover insights for every endpoint we care about for endpoint in endpoints: @@ -610,50 +485,6 @@ def is_unique_key(key): self.register_task_completion(entry_info, "insights") - def register_task_completion(self, entry_info, model): - # Task to send back to broker - task_completed = { - 'worker_id': self.config['id'], - 'job_type': entry_info['job_type'], - 'repo_id': repo_id, - 'git_url': entry_info['git_url'] - } - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.config['zombie_id'], - "timestamp": datetime.datetime.now(), - "status": "Success", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job completion for: " + str(task_completed) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where( - self.job_table.c.job_model==model).values(updated_job)) - logging.info("Update job process for model: " + model + "\n") - - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - - # Reset results counter for next task - self.results_counter = 0 - def send_insight(self, insight, units_from_mean): try: repoSQL = s.sql.text(""" @@ -821,9 +652,9 @@ def confidence_interval(self, data, timeperiod='week', confidence=.95): def update_metrics(self): logging.info("Preparing to update metrics ...\n\n" + "Hitting endpoint: http://{}:{}/api/unstable/metrics/status ...\n".format( - self.config['broker_host'],self.config['broker_port'])) + self.config['api_host'],self.config['api_port'])) r = requests.get(url='http://{}:{}/api/unstable/metrics/status'.format( - self.config['broker_host'],self.config['broker_port'])) + self.config['api_host'],self.config['api_port'])) data = r.json() active_metrics = [metric for metric in data if metric['backend_status'] == 'implemented'] diff --git a/workers/insight_worker/insight_worker/runtime.py b/workers/insight_worker/insight_worker/runtime.py deleted file mode 100644 --- a/workers/insight_worker/insight_worker/runtime.py +++ /dev/null @@ -1,110 +0,0 @@ -from flask import Flask, jsonify, request -from insight_worker.worker import InsightWorker -import click, os, json, logging, requests -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}\n".format(str(request.json))) - app.insight_worker.task = request.json - - #set task - return jsonify({"success": "sucess"}) - - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": app.insight_worker._queue, - "tasks": [{ - "given": list(app.insight_worker._queue) - }] - }) - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.insight_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51252, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'insight_worker', None, {}) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.insight_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "anomaly_days": worker_info['anomaly_days'] if 'anomaly_days' in worker_info else 2, - "training_days": worker_info['training_days'] if 'training_days' in worker_info else 365, - "confidence_interval": worker_info['confidence_interval'] if 'confidence_interval' in worker_info else .95, - "contamination": worker_info['contamination'] if 'contamination' in worker_info else 0.041, - 'metrics': worker_info['metrics'] if 'metrics' in worker_info else {"issues-new": "issues", - "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"} - } - - #create instance of the worker - app.insight_worker = InsightWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - print("Starting Flask App on host {} with port {} with pid: ".format(broker_host, worker_port) + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - print("Killing Flask App: {} and telling broker that this worker is disconnected.".format(str(os.getpid()))) - try: - logging.info("Sending disconnected message to broker... @ -> {} with info: {}\n".format('http://{}:{}/api/unstable/workers'.format( - config['broker_host'], config['broker_port']), config)) - requests.post('http://{}:{}/api/unstable/workers/remove'.format( - config['broker_host'], config['broker_port']), json=config) #hello message - except Exception as e: - logging.info("Ran into error: {}".format(e)) - logging.info("Broker's port is busy, worker will not be able to accept tasks, " - "please restart Augur if you want this worker to attempt connection again.") - diff --git a/workers/insight_worker/runtime.py b/workers/insight_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/insight_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.insight_worker.insight_worker import InsightWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = InsightWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/insight_worker/setup.py b/workers/insight_worker/setup.py --- a/workers/insight_worker/setup.py +++ b/workers/insight_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="insight_worker", - version="0.0.2", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -31,7 +31,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'insight_worker_start=insight_worker.runtime:main', + 'insight_worker_start=workers.insight_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/linux_badge_worker/__init__.py b/workers/linux_badge_worker/__init__.py new file mode 100644 diff --git a/workers/linux_badge_worker/linux_badge_worker.py b/workers/linux_badge_worker/linux_badge_worker.py new file mode 100644 --- /dev/null +++ b/workers/linux_badge_worker/linux_badge_worker.py @@ -0,0 +1,63 @@ +import os +from datetime import datetime +import logging +import requests +import json +from urllib.parse import quote +from multiprocessing import Process, Queue + +import pandas as pd +import sqlalchemy as s +from sqlalchemy.ext.automap import automap_base +from sqlalchemy import MetaData +from workers.worker_base import Worker + +class LinuxBadgeWorker(Worker): + """ Worker that collects repo badging data from CII + config: database credentials, broker information, and ID + """ + def __init__(self, config={}): + + worker_type = "linux_badge_worker" + + given = [['git_url']] + models = ['badges'] + + data_tables = ['repo_badging'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({"endpoint": "https://bestpractices.coreinfrastructure.org/projects.json?pq="}) + self.tool_source = 'Linux Badge Worker' + self.tool_version = '1.0.0' + self.data_source = 'CII Badging API' + + + def badges_model(self, entry_info, repo_id): + """ Data collection and storage method + Query the CII API and store the result in the DB for the badges model + """ + git_url = entry_info['given']['git_url'] + self.logger.info("Collecting data for {}".format(git_url)) + extension = quote(git_url[0:-4]) + + url = self.config['endpoint'] + extension + self.logger.info("Hitting CII endpoint: " + url + " ...") + data = requests.get(url=url).json() + + if data != []: + self.logger.info("Inserting badging data for " + git_url) + self.db.execute(self.repo_badging_table.insert()\ + .values(repo_id=repo_id, + data=data, + tool_source=self.tool_source, + tool_version=self.tool_version, + data_source=self.data_source)) + + self.results_counter += 1 + else: + self.logger.info("No CII data found for {}\n".format(git_url)) + + self.register_task_completion(entry_info, repo_id, "badges") diff --git a/workers/linux_badge_worker/linux_badge_worker/__init__.py b/workers/linux_badge_worker/linux_badge_worker/__init__.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""linux_badge_worker - Augur worker that collects CII badging data""" - -__tool_source__ = 'Linux Badge Worker' -__tool_version__ = '0.1.0' -__data_source__ = 'CII Badging API' - -__author__ = 'Augur Team <[email protected]>' -__all__ = [] - diff --git a/workers/linux_badge_worker/linux_badge_worker/runtime.py b/workers/linux_badge_worker/linux_badge_worker/runtime.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/runtime.py +++ /dev/null @@ -1,107 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from linux_badge_worker.worker import BadgeWorker -from workers.standard_methods import read_config - -def create_server(app): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.linux_badge_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.linux_badge_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51235, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'linux_badge_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.linux_badge_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json?pq=", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - app.linux_badge_worker = BadgeWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - - if app.linux_badge_worker._child is not None: - app.linux_badge_worker._child.terminate() - - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/linux_badge_worker/linux_badge_worker/worker.py b/workers/linux_badge_worker/linux_badge_worker/worker.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/worker.py +++ /dev/null @@ -1,240 +0,0 @@ -import os -from datetime import datetime -import logging -import requests -import json -from urllib.parse import quote -from multiprocessing import Process, Queue - -from linux_badge_worker import __data_source__, __tool_source__, __tool_version__ -import pandas as pd -import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData -from workers.standard_methods import register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class BadgeWorker: - """ Worker that collects repo badging data from CII - config: database credentials, broker information, and ID - """ - def __init__(self, config, task=None): - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.config = config - - self.db = None - self.repo_badging_table = None - - self._task = task - self._queue = Queue() - self._child = None - - self.history_id = None - self.finishing_task = False - self.working_on = None - self.results_counter = 0 - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["badges"] - } - ], - "config": [self.config] - } - - self._db_str = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], - self.config['password'], - self.config['host'], - self.config['port'], - self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self._db_str, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self._db_str, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - logging.info("Database connection established...") - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['repo_badging']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - self.repo_badging_table = Base.classes.repo_badging.__table__ - logging.info("ORM setup complete...") - - # Organize different api keys/oauths available - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauth_sql = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(0)) - - for oauth in [{'oauth_id': 0, 'access_token': 0}] + json.loads(pd.read_sql(oauth_sql, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) \ - - datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - - # Send broker hello message - connect_to_broker(self) - logging.info("Connected to the broker...\n") - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "key": "", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['github_api_key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - - self._task = value - self.run() - - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def badges_model(self, entry_info, repo_id): - """ Data collection and storage method - Query the CII API and store the result in the DB for the badges model - """ - git_url = entry_info['given']['git_url'] - logging.info("Collecting data for {}".format(git_url)) - extension = quote(git_url[0:-4]) - - url = self.config['endpoint'] + extension - logging.info("Hitting CII endpoint: " + url + " ...") - data = requests.get(url=url).json() - - if data != []: - logging.info("Inserting badging data for " + git_url) - self.db.execute(self.repo_badging_table.insert()\ - .values(repo_id=repo_id, - data=data, - tool_source=__tool_source__, - tool_version=__tool_version__, - data_source=__data_source__)) - - self.results_counter += 1 - else: - logging.info("No CII data found for {}\n".format(git_url)) - - register_task_completion(self, entry_info, repo_id, "badges") - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'badges': - self.badges_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() diff --git a/workers/linux_badge_worker/runtime.py b/workers/linux_badge_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/linux_badge_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.linux_badge_worker.linux_badge_worker import LinuxBadgeWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = LinuxBadgeWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/linux_badge_worker/setup.py b/workers/linux_badge_worker/setup.py --- a/workers/linux_badge_worker/setup.py +++ b/workers/linux_badge_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="linux_badge_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'linux_badge_worker_start=linux_badge_worker.runtime:main', + 'linux_badge_worker_start=workers.linux_badge_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/metric_status_worker/metric_status_worker/__init__.py b/workers/metric_status_worker/metric_status_worker/__init__.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.1.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/metric_status_worker/metric_status_worker/runtime.py b/workers/metric_status_worker/metric_status_worker/runtime.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/runtime.py +++ /dev/null @@ -1,108 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, logging, requests, json -from metric_status_worker.worker import MetricStatusWorker -import os -import json -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.metric_status_worker.task = request.json - - #set task - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "success" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.metric_status_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51263, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'metric_status_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.metric_status_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - app.metric_status_worker = MetricStatusWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=port) - if app.metric_status_worker._child is not None: - app.metric_status_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/metric_status_worker/metric_status_worker/worker.py b/workers/metric_status_worker/metric_status_worker/worker.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/worker.py +++ /dev/null @@ -1,719 +0,0 @@ -import base64 -import logging -import os -import re -import sys -import json -import time -from abc import ABC -from datetime import datetime -from multiprocessing import Process, Queue -from urllib.parse import urlparse - -import pandas as pd -import requests -import sqlalchemy as s -from github import Github -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - - -class MetricStatusWorker: - def __init__(self, config, task=None): - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.db = None - self.table = None - self.API_KEY = self.config['key'] - self.tool_source = 'Metric Status Worker' - self.tool_version = '0.0.1' - self.data_source = 'GitHub API' - self.results_counter = 0 - self.working_on = None - - - # url = 'https://api.github.com' - # response = requests.get(url, headers=self.headers) - # self.rate_limit = int(response.headers['X-RateLimit-Remaining']) - - specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["chaoss_metric_status"] - } - ], - "config": [self.config] - } - - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - logging.info("Making database connections...") - - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['chaoss_metric_status']) - # helper_metadata.reflect(self.helper_db) - - Base = automap_base(metadata=metadata) - - Base.prepare() - - self.chaoss_metric_status_table = Base.classes['chaoss_metric_status'].__table__ - - try: - requests.post('http://{}:{}/api/unstable/workers'.format( - self.config['broker_host'],self.config['broker_port']), json=specs) - except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker') - sys.exit('Cannot connect to the broker! Quitting...') - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced """ - return self._task - - @task.setter - def task(self, value): - try: - if value['job_type'] == 'UPDATE': - self._queue.put(CollectorTask('TASK', {})) - elif value['job_type'] == 'MAINTAIN': - self._maintain_queue.put(CollectorTask('TASK', {})) - - if 'focused_task' in value: - if value['focused_task'] == 1: - self.finishing_task = True - except Exception as e: - logging.error("Error: {},".format(str(e))) - - self._task = CollectorTask(message_type='TASK', entry_info={}) - self.run() - - def cancel(self): - """ Delete/cancel current task """ - self._task = None - - def run(self): - logging.info("Running...") - if self._child is None: - self._child = Process(target=self.collect, args=()) - self._child.start() - requests.post("http://{}:{}/api/unstable/add_pids".format( - self.config['broker_host'],self.config['broker_port']), json={'pids': [self._child.pid, os.getpid()]}) - - def collect(self): - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = 'UPDATE' - elif not self._maintain_queue.empty(): - message = self._maintain_queue.get() - logging.info("Popped off message: {}".format(str(message.entry_info))) - self.working_on = "MAINTAIN" - else: - break - - - if message.type == 'EXIT': - break - if message.type != 'TASK': - raise ValueError( - f'{message.type} is not a recognized task type') - - if message.type == 'TASK': - self.update_metrics(message.entry_info) - - def update_metrics(self, entry_info): - """ Data colletction function - Query the github api for metric status - """ - status = MetricsStatus(self.API_KEY) - status.create_metrics_status() - metrics = status.metrics_status - - # convert to dict - dict_metrics = [] - for metric in metrics: - metric_info = { - 'cm_group': metric['group'], - 'cm_source': metric['data_source'], - 'cm_type': metric['metric_type'], - 'cm_backend_status': metric['backend_status'], - 'cm_frontend_status': metric['frontend_status'], - 'cm_api_endpoint_repo': metric['endpoint_repo'], - 'cm_api_endpoint_rg': metric['endpoint_group'], - 'cm_defined': metric['is_defined'], - 'cm_name': metric['display_name'], - 'cm_working_group': metric['group'], - 'cm_info': metric['tag'], - 'cm_working_group_focus_area': metric['focus_area'], - 'tool_source': self.tool_source, - 'tool_version': self.tool_version, - 'data_source': self.data_source, - } - dict_metrics.append(metric_info) - - need_insertion = self.filter_duplicates({'cm_api_endpoint_repo': "cm_api_endpoint_repo", 'cm_backend_status':'cm_api_endpoint_rg'}, ['chaoss_metric_status'], - dict_metrics) - logging.info("Count of contributors needing insertion: " + str(len(need_insertion)) + "\n") - for metric in need_insertion: - result = self.db.execute(self.chaoss_metric_status_table.insert().values(metric)) - logging.info("Primary key inserted into the metrics table: " + str(result.inserted_primary_key)) - self.results_counter += 1 - - self.register_task_completion() - - - # def filter_duplicates(self, og_data): - # need_insertion = [] - # colSQL = s.sql.text(""" - # SELECT * FROM chaoss_metric_status - # """) - # values = pd.read_sql(colSQL, self.db) - # for obj in og_data: - # location = values.loc[ (values['cm_name']==obj['cm_name'] ) & ( values['cm_working_group']==obj[ - # 'cm_working_group']) & ()] - # if not location.empty: - # logging.info("value of tuple exists: " + str(obj['cm_name'])) - # else: - # need_insertion.append(obj) - # - # logging.info("While filtering duplicates, we reduced the data size from " + str(len(og_data)) + - # " to " + str(len(need_insertion)) + "\n") - # - # return need_insertion - - def filter_duplicates(self, cols, tables, og_data): - need_insertion = [] - - table_str = tables[0] - del tables[0] - for table in tables: - table_str += ", " + table - for col in cols.keys(): - colSQL = s.sql.text(""" - SELECT {} FROM {} - """.format(col, table_str)) - values = pd.read_sql(colSQL, self.db, params={}) - - for obj in og_data: - if values.isin([obj[cols[col]]]).any().any(): - logging.info("value of tuple exists: " + str(obj[cols[col]]) + "\n") - elif obj not in need_insertion: - need_insertion.append(obj) - logging.info("While filtering duplicates, we reduced the data size from " + str(len(og_data)) + - " to " + str(len(need_insertion)) + "\n") - return need_insertion - - def update_exist_metrics(self, metrics): - need_update = [] - need_insert = [] - - for metric in metrics: - result = self.db.execute(self.chaoss_metric_status_table.update().where((self.chaoss_metric_status_table.c.cm_name == metric['cm_name'])&(self.chaoss_metric_status_table.c.cm_group == metric['cm_group']) & ((self.chaoss_metric_status_table.c.cm_api_endpoint_repo != metric['cm_api_endpoint_repo']) | (self.chaoss_metric_status_table.c.cm_api_endpoint_rg != metric['cm_api_endpoint_rg'])|(self.chaoss_metric_status_table.c.cm_source != metric['cm_source'])) - ).values(metric)) - - if result.rowcount: - logging.info("Update Metric {}-{}".format(metric['cm_group'], metric['cm_name'])) - - def register_task_completion(self): - task_completed = { - 'worker_id': self.config['id'], - 'job_type': self.working_on, - } - - logging.info("Telling broker we completed task: " + str(task_completed) + "\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - self.results_counter = 0 - - - - -class FrontendStatusExtractor(object): - - def __init__(self): - pass - self.api_text = open(os.path.abspath(os.path.dirname(os.path.dirname(os.getcwd()))) + - "/frontend/src/AugurAPI.ts", 'r').read() - self.attributes = re.findall( - r'(?:(GitEndpoint|Endpoint|Timeseries|addRepoMetric|addRepoGroupMetric)\()\'(.*)\', \'(.*)\'', - self.api_text) - self.timeseries = [ - attribute for attribute in self.attributes if attribute[0] == "Timeseries"] - self.endpoints = [ - attribute for attribute in self.attributes if attribute[0] == "Endpoint"] - self.git_endpoints = [ - attribute for attribute in self.attributes if attribute[0] == "GitEndpoint"] - self.repo_metrics = [ - attribute for attribute in self.attributes if attribute[0] == 'addRepoMetric'] - self.group_metric = [ - attribute for attribute in self.attributes if attribute[0] == 'addRepoMetric'] - - def determine_frontend_status(self, metric): - metric.frontend_status = 'unimplemented' - attribute = None - - if metric.metric_type == "timeseries": - attribute = next((attribute for attribute in self.timeseries if - "/api/unstable/<owner>/<repo>/timeseries/{}".format(attribute[2]) == metric.endpoint_repo), - None) - - elif metric.metric_type == "metric": - attribute = next((attribute for attribute in self.endpoints if - "/api/unstable/<owner>/<repo>/{}".format(attribute[2]) == metric.endpoint_repo), None) - if not attribute: - attribute = next((attribute for attribute in self.repo_metrics if - "/api/unstable/repo-groups/<repo_group_id>/repos/<repo_id>/{}".format( - attribute[2]) == metric.endpoint_repo), None) - if not attribute and metric.endpoint_group: - attribute = next((attribute for attribute in self.repo_metrics if - "/api/unstable/repo-groups/<repo_group_id>/{}".format(attribute[2]) == metric.endpoint_group), None) - - elif metric.metric_type == "git": - attribute = next((attribute for attribute in self.git_endpoints if - "/api/unstable/git/{}".format(attribute[2]) == metric.endpoint_repo), None) - - if attribute is not None: - metric.frontend_status = 'implemented' - metric.chart_mapping = attribute[1] - else: - metric.frontend_status = 'unimplemented' - - -class Metric(ABC): - - def __init__(self): - self.ID = None - self.tag = None - self.display_name = None - self.group = None - self.backend_status = 'unimplemented' - self.frontend_status = 'unimplemented' - self.chart_mapping = None - self.data_source = None - self.metric_type = None - self.documentation_url = None - self.is_defined = False - self.focus_area = None - self.endpoint_group = None - self.endpoint_repo = None - - -class GroupedMetric(Metric): - - def __init__(self, display_name, group, tag, focus_area): - Metric.__init__(self) - self.display_name = display_name - self.tag = tag - self.ID = re.sub(r'-$|\*', '', 'none' + '-' + self.tag) - self.group = group - self.focus_area = focus_area - - -class ImplementedMetric(Metric): - - def __init__(self, metadata, frontend_status_extractor): - Metric.__init__(self) - - self.ID = metadata['ID'] - self.tag = metadata['tag'] - self.display_name = metadata['metric_name'] - self.backend_status = 'implemented' - self.data_source = metadata['source'] - self.group = "experimental" - self.endpoint_group = None - self.endpoint_repo = None - - - if 'metric_type' in metadata: - self.metric_type = metadata['metric_type'] - else: - self.metric_type = 'metric' - - if 'endpoint' in metadata: - if 'group_endpoint' in metadata: - self.endpoint_group = metadata['group_endpoint'] - if 'repo_endpoint' in metadata: - self.endpoint_repo = metadata['repo_endpoint'] - else: - self.endpoint_repo = metadata['endpoint'] - frontend_status_extractor.determine_frontend_status(self) - - -class MetricsStatus(object): - wg_evolution = { - "repo": "chaoss/wg-evolution", - "focus_area": "focus_areas", - "name": 'evolution' - } - - wg_diversity_inclusion = { - "repo": "chaoss/wg-diversity-inclusion", - "focus_area": "focus-areas", - "name": "diversity-inclusion" - } - - wg_value = { - "repo": "chaoss/wg-value", - "focus_area": 'focus-areas', - "name": "value" - } - - wg_common = { - "repo": "chaoss/wg-common", - "focus_area": "focus-areas", - "name": "common" - } - - wg_risk = { - "repo": "chaoss/wg-risk", - "focus_area": "focus-areas", - "name": "risk" - } - - def __init__(self, githubapi): - self.__githubapi = githubapi - self.github = Github(self.__githubapi) - - # TODO: don't hardcode this - self.groups = { - "evolution": "Evolution", - "diversity-inclusion": "Diversity and Inclusion metrics", - "value": "Value", - "risk": "Risk", - "common": "Common", - "experimental": "Experimental", - "all": "All" - } - - self.implemented_metrics = [] - - self.evo_metrics = [] - self.di_metrics = [] - self.risk_metrics = [] - self.value_metrics = [] - self.common_metrics = [] - self.experimental_metrics = [] - - self.metrics_by_group = [] - - self.metrics_status = [] - - self.data_sources = [] - self.metric_types = [] - self.tags = {} - self.metadata = [] - - def create_metrics_status(self): - - self.build_implemented_metrics() - - self.evo_metrics = self.create_grouped_metrics( - self.wg_evolution, "evolution") - self.risk_metrics = self.create_grouped_metrics(self.wg_risk, "risk") - self.common_metrics = self.create_grouped_metrics( - self.wg_common, 'common') - self.di_metrics = self.create_grouped_metrics( - self.wg_diversity_inclusion, 'diversity-inclusion') - self.value_metrics = self.create_grouped_metrics( - self.wg_value, 'value') - - self.metrics_by_group = [self.evo_metrics, self.risk_metrics, - self.common_metrics, self.di_metrics, self.value_metrics] - - self.create_experimental_metrics() - self.metrics_by_group.append(self.experimental_metrics) - # - self.copy_implemented_metrics() - - self.find_defined_metrics() - - self.build_metrics_status() - - # self.build_metadata() - - def build_implemented_metrics(self): - frontend_status_extractor = FrontendStatusExtractor() - - r = requests.get( - url='http://{}:{}/api/unstable/batch/metadata'.format( - self.config['broker_host'],self.config['broker_port'])) - data = json.loads(r.text) - - for metric in data: - if "ID" in metric.keys(): - self.implemented_metrics.append( - ImplementedMetric(metric, frontend_status_extractor)) - - def create_grouped_metrics(self, group, group_name): - metrics = self.find_metrics_from_focus_area( - group['repo'], group['focus_area']) - - remote_metrics = [] - for metric in metrics: - remote_metrics.append(GroupedMetric(metric.display_name, group['name'], metric.tag, - metric.focus_area)) - - return remote_metrics - - def find_metrics_from_focus_area(self, repo_name, focus_area_path): - focus_areas = self.github.get_repo( - repo_name).get_dir_contents(focus_area_path) - metrics = [] - for area in focus_areas: - # get focus area name from filename - # focus_area_name = re.sub('.md','',re.sub('-', ' ',area.name)) - focus_area_name = None - focus_area_name_splited = [a.capitalize() for a in re.sub( - '.md', '', re.sub('[_]|[-]', ' ', area.name)).split()] - focus_area_name = ' '.join(focus_area_name_splited) - - # extract structure :focus_area_name/readme.md - if area.type == 'dir': - tmp = self.github.get_repo( - repo_name).get_dir_contents(area.path) - readme = [a for a in tmp if 'readme' in a.name.lower()] - if len(readme) == 0: - continue - else: - area = readme[0] - elif 'readme' in area.name.lower() or 'changelog' in area.name.lower(): - continue - - # decode content; github api return encoded content - decoded_content = base64.b64decode(area.content).decode('utf-8') - metric_name_tag = self.parse_table( - decoded_content) or self.parse_list(decoded_content) - - for name, tag in metric_name_tag.items(): - add_metric = Metric() - add_metric.display_name = name - add_metric.tag = tag - add_metric.focus_area = focus_area_name - - metrics.append(add_metric) - - if metric_name_tag is None: - continue - - return metrics - - def parse_table(self, md_content): - # group 0 is header, group 2 is |---|--|, and group 3 is table content - tables = re.findall( - r'^(\|?[^\n]+\|[^\n]+\|?\r?\n)((?:\|?\s*:?[-]+\s*:?)+\|?)(\n(?:\|?[^\n]+\|[^\n]+\|?\r?\n?)*)?$', md_content, - re.MULTILINE) - - if not tables: - return None - - box = [] - metrics_name_tag = {} - for table in tables: - # get metric name by 'metric_name' index in column - metric_index, length_in_row = self.get_metric_index_in_table_row( - table[0]) - table_content = [x.strip() - for x in table[2].replace('\n', '|').split('|')] - # remove two empty str - table_content.pop(0) - table_content.pop() - - raw_metrics = [table_content[a] for a in range( - metric_index, len(table_content), length_in_row)] - - for raw_metric in raw_metrics: - metric_name, metric_link = self.is_has_link( - raw_metric, md_content) - metric_name = re.sub('[\[]|[\]]', '', metric_name) - if not metric_link: - metric_link = re.sub(' ', '-', metric_name).lower() - metrics_name_tag[metric_name] = self.link_to_tag( - metric_name, str(metric_link)) - - return metrics_name_tag - - def get_metric_index_in_table_row(self, row): - header_names = [x.strip().lower() for x in row.split('|')] - # print(header_names) - index = None - if 'metric' in header_names: - index = header_names.index('metric') - elif 'name' in header_names: - index = header_names.index('name') - - return index, len(header_names) - - def parse_list(self, md_content): - matched_lists = re.findall(r'[-]\s+(.+)\n', md_content) - metric_names = {} - # print(matched_lists) - for matched in matched_lists: - # print(matched) - metirc_name = re.sub(r'.+:\s', '', matched) - metirc_name, metric_link = self.is_has_link( - metirc_name, md_content) - metirc_name = re.sub('[\[]|[\]]', '', metirc_name) - metric_names[metirc_name] = self.link_to_tag( - metirc_name, metric_link) - return metric_names - - def is_has_link(self, s, md_content): - # remove leading whitespace if exist - s = s.strip() - pattern_inline = re.compile(r'\[([^\[\]]+)\]\(([^)]+)') - match = pattern_inline.match(s) - - if match: - return match.group(1), match.group(2) - - pattern_ref = re.compile(r'\[([^\[\]]+)\]\[([^]]+)') - match2 = pattern_ref.match(s) - - if match2: - link = match2.group(2) - p = re.compile(r'\n\[' + link + r'\]:\s+(.+)\n') - res = p.search(md_content, re.DOTALL) - if res: - return match2.group(1), res.group(1) - else: - return s, None - - def link_to_tag(self, name, s): - - # generate tag if undefined metric - if not s: - return re.sub(' ', '-', name.lower()) - - pattern = re.compile(r'\/?([a-zA-Z_-]+)(\.md)?$') - m = pattern.search(s) - if m: - return re.sub('_', '-', re.sub('.md', '', m.group(1).lower())) - else: - return re.sub(' ', '-', re.sub('\(s\)', 's', name)) - - def create_experimental_metrics(self): - tags = [] - for group in self.metrics_by_group: - for metric in group: - tags.append(metric.tag) - - self.experimental_metrics = [ - metric for metric in self.implemented_metrics if metric.tag not in tags] - - def copy_implemented_metrics(self): - # takes implemented metrics and copies their data to the appropriate metric object - # I am so very sorry - # TODO: burn this into the ground - for group in enumerate(self.metrics_by_group): - if group[1] is not self.experimental_metrics: - for grouped_metric in group[1]: - defined_implemented_metrics = [ - metric for metric in self.implemented_metrics if grouped_metric.tag == metric.tag] - if defined_implemented_metrics != []: - for metric in defined_implemented_metrics: - metric.group = group[1][0].group - metric.focus_area = grouped_metric.focus_area - group[1].append(metric) - self.implemented_metrics.remove(metric) - grouped_metric.ID = 'n/a' - self.metrics_by_group[group[0]] = [ - metric for metric in group[1] if metric.ID != 'n/a'] - - def find_defined_metrics(self): - # return map {tag: html_url} - repo_names = [self.wg_common['repo'], self.wg_evolution['repo'], - self.wg_diversity_inclusion['repo'], self.wg_risk['repo'], self.wg_value['repo']] - - md_files = {} - - for repo_name in repo_names: - repo = self.github.get_repo(repo_name) - contents = repo.get_contents("") - - while len(contents) > 1: - file_content = contents.pop(0) - if file_content.type == "dir": - contents.extend(repo.get_contents(file_content.path)) - elif '.md' in file_content.name: - name = re.sub( - '_', '-', re.sub('.md', '', file_content.name)) - md_files[name.lower()] = file_content.html_url - - for group in self.metrics_by_group: - for metric in group: - if metric.tag in md_files.keys(): - metric.is_defined = True - metric.documentation_url = md_files[metric.tag] - - def build_metrics_status(self): - for group in self.metrics_by_group: - for metric in group: - self.metrics_status.append(metric.__dict__) - - def build_metadata(self): - self.get_metric_sources() - self.get_metric_types() - self.get_metric_tags() - - self.metadata = { - "remotes": { - "diversity_inclusion_urls": self.diversity_inclusion_urls, - "growth_maturity_decline_urls": self.growth_maturity_decline_urls, - "risk_urls": self.risk_urls, - "value_urls": self.value_urls, - "activity_repo_urls": self.activity_urls - }, - "groups": self.groups, - "data_sources": self.data_sources, - "metric_types": self.metric_types, - "tags": self.tags - } - - def get_metric_sources(self): - for data_source in [metric['data_source'] for metric in self.metrics_status]: - data_source = data_source.lower() - if data_source not in self.data_sources and data_source != "none": - self.data_sources.append(data_source) - self.data_sources.append("all") - - def get_metric_types(self): - for metric_type in [metric['metric_type'] for metric in self.metrics_status]: - metric_type = metric_type.lower() - if metric_type not in self.metric_types and metric_type != "none": - self.metric_types.append(metric_type) - self.metric_types.append("all") - - def get_metric_tags(self): - for tag in [(metric['tag'], metric['group']) for metric in self.metrics_status]: - # tag[0] = tag[0].lower() - if tag[0] not in [tag[0] for tag in self.tags] and tag[0] != "none": - self.tags[tag[0]] = tag[1] \ No newline at end of file diff --git a/workers/pull_request_worker/pull_request_worker/__init__.py b/workers/pull_request_worker/__init__.py similarity index 100% rename from workers/pull_request_worker/pull_request_worker/__init__.py rename to workers/pull_request_worker/__init__.py diff --git a/workers/pull_request_worker/pull_request_worker/worker.py b/workers/pull_request_worker/pull_request_worker.py similarity index 61% rename from workers/pull_request_worker/pull_request_worker/worker.py rename to workers/pull_request_worker/pull_request_worker.py --- a/workers/pull_request_worker/pull_request_worker/worker.py +++ b/workers/pull_request_worker/pull_request_worker.py @@ -1,225 +1,42 @@ import ast, json, logging, os, sys, time, traceback, requests from datetime import datetime from multiprocessing import Process, Queue -from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base -from workers.standard_methods import * from sqlalchemy.sql.expression import bindparam +from workers.worker_base import Worker -class GHPullRequestWorker: +class GitHubPullRequestWorker(Worker): """ Worker that collects Pull Request related data from the Github API and stores it in our database. :param task: most recent task the broker added to the worker's queue :param config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.working_on = None - self.config = config - LOG_FORMAT = '%(levelname)s:[%(name)s]: %(message)s' - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO, format=LOG_FORMAT) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self.db = None - self.table = None - self.API_KEY = self.config['key'] - self.tool_source = 'GitHub Pull Request Worker' - self.tool_version = '0.0.1' # See __init__.py - self.data_source = 'GitHub API' - self.results_counter = 0 - self.headers = {'Authorization': f'token {self.API_KEY}'} - self.history_id = None - self.finishing_task = True - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [['github_url']], - "models":['pull_requests', 'pull_request_commits', 'pull_request_files'] - } - ], - "config": [self.config] - } + def __init__(self, config={}): - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], - self.config['port'], self.config['database'] - ) + worker_type = "pull_request_worker" - #Database connections - logging.info("Making database connections...\n") - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) + # Define what this worker can be given and know how to interpret + given = [['github_url']] + models = ['pull_requests', 'pull_request_commits', 'pull_request_files'] - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['contributors', 'pull_requests', + # Define the tables needed to insert, update, or delete on + data_tables = ['contributors', 'pull_requests', 'pull_request_assignees', 'pull_request_events', 'pull_request_labels', 'pull_request_message_ref', 'pull_request_meta', 'pull_request_repo', 'pull_request_reviewers', 'pull_request_teams', 'message', 'pull_request_commits', - 'pull_request_files']) - - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.contributors_table = Base.classes.contributors.__table__ - self.pull_requests_table = Base.classes.pull_requests.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.pull_request_labels_table = Base.classes.pull_request_labels.__table__ - self.pull_request_message_ref_table = Base.classes.pull_request_message_ref.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_teams_table = Base.classes.pull_request_teams.__table__ - self.message_table = Base.classes.message.__table__ - self.pull_request_commits_table = Base.classes.pull_request_commits.__table__ - self.pull_request_files_table = Base.classes.pull_request_files.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - logging.info("Querying starting ids info...\n") - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - self.pr_id_inc = get_max_id(self, 'pull_requests', 'pull_request_id') - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - self.msg_id_inc = get_max_id(self, 'message', 'msg_id') - self.pr_msg_ref_id_inc = get_max_id(self, 'pull_request_message_ref', 'pr_msg_ref_id') - self.label_id_inc = get_max_id(self, 'pull_request_labels', 'pr_label_id') - self.event_id_inc = get_max_id(self, 'pull_request_events', 'pr_event_id') - self.reviewer_id_inc = get_max_id(self, 'pull_request_reviewers', 'pr_reviewer_map_id') - self.assignee_id_inc = get_max_id(self, 'pull_request_assignees', 'pr_assignee_map_id') - self.pr_meta_id_inc = get_max_id(self, 'pull_request_meta', 'pr_repo_meta_id') - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - # self.pull_requests_graphql({ - # 'job_type': 'MAINTAIN', - # 'models': ['pull_request_files'], - # 'display_name': 'pull_request_files model for url: https://github.com/zephyrproject-rtos/actions_sandbox.git', - # 'given': { - # 'github_url': 'https://github.com/zephyrproject-rtos/actions_sandbox.git' - # } - # }, 25201) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - github_url = value['given']['github_url'] - - repo_url_SQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(github_url)) - rs = pd.read_sql(repo_url_SQL, self.db, params={}) - - try: - repo_id = int(rs.iloc[0]['repo_id']) - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - if 'focused_task' in value: - if value['focused_task'] == 1: - self.finishing_task = True - - except Exception as e: - logging.error(f"error: {e}, or that repo is not in our database: {value}\n") - - self._task = value - self.run() + 'pull_request_files'] + operations_tables = ['worker_history', 'worker_job'] - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query all repos with repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['github_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'pull_requests': - self.pull_requests_model(message, repo_id) - elif message['models'][0] == 'pull_request_commits': - self.pull_request_commits_model(message, repo_id) - elif message['models'][0] == 'pull_request_files': - self.pull_requests_graphql(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + # Define data collection info + self.tool_source = 'GitHub Pull Request Worker' + self.tool_version = '1.0.0' + self.data_source = 'GitHub API' + def graphql_paginate(self, query, data_subjects, before_parameters=None): """ Paginate a GitHub GraphQL query backwards @@ -227,7 +44,7 @@ def graphql_paginate(self, query, data_subjects, before_parameters=None): :rtype: A Pandas DataFrame, contains all data contained in the pages """ - logging.info(f'Start paginate with params: \n{data_subjects} ' + self.logger.info(f'Start paginate with params: \n{data_subjects} ' f'\n{before_parameters}') def all_items(dictionary): @@ -249,6 +66,7 @@ def all_items(dictionary): tuples = [] def find_root_of_subject(data, key_subject): + self.logger.info(f'Finding {key_subject} root of {data}') key_nest = None for subject, nest in data.items(): if key_subject in nest: @@ -262,7 +80,7 @@ def find_root_of_subject(data, key_subject): for data_subject, nest in data_subjects.items(): - logging.info(f'Beginning paginate process for field {data_subject} ' + self.logger.info(f'Beginning paginate process for field {data_subject} ' f'for query: {query}') page_count = 0 @@ -274,13 +92,13 @@ def find_root_of_subject(data, key_subject): success = False for attempt in range(num_attempts): - logging.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint ' + self.logger.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint ' f'page number {page_count}\n') response = requests.post(base_url, json={'query': query.format( **before_parameters)}, headers=self.headers) - update_gh_rate_limit(self, response) + self.update_gh_rate_limit(response) try: data = response.json() @@ -288,9 +106,9 @@ def find_root_of_subject(data, key_subject): data = json.loads(json.dumps(response.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) + self.logger.info("Error!: {}".format(data['errors'])) if data['errors'][0]['type'] == 'RATE_LIMITED': - update_gh_rate_limit(self, response) + self.update_gh_rate_limit(response) num_attempts -= 1 continue @@ -302,18 +120,18 @@ def find_root_of_subject(data, key_subject): data = root['edges'] break else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url)) break if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': num_attempts -= 1 - update_gh_rate_limit(self, response, temporarily_disable=True) + self.update_gh_rate_limit(response, temporarily_disable=True) if data['message'] == 'Bad credentials': - update_gh_rate_limit(self, response, bad_credentials=True) + self.update_gh_rate_limit(response, bad_credentials=True) if not success: - logging.info('GraphQL query failed: {}'.format(query)) + self.logger.info('GraphQL query failed: {}'.format(query)) continue before_parameters.update({ @@ -323,7 +141,7 @@ def find_root_of_subject(data, key_subject): tuples += data - logging.info(f'Paged through {page_count} pages and ' + self.logger.info(f'Paged through {page_count} pages and ' f'collected {len(tuples)} data points\n') if not nest: @@ -333,9 +151,9 @@ def find_root_of_subject(data, key_subject): before_parameters=before_parameters) - def pull_requests_graphql(self, task_info, repo_id): + def pull_request_files_model(self, task_info, repo_id): - owner, repo = get_owner_repo(task_info['given']['github_url']) + owner, repo = self.get_owner_repo(task_info['given']['github_url']) # query existing PRs and the respective url we will append the commits url to pr_number_sql = s.sql.text(""" @@ -349,7 +167,7 @@ def pull_requests_graphql(self, task_info, repo_id): for index, pull_request in enumerate(pr_numbers.itertuples()): - logging.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}') + self.logger.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}') query = """ {{ @@ -394,26 +212,24 @@ def pull_requests_graphql(self, task_info, repo_id): WHERE pull_request_files.pull_request_id = pull_requests.pull_request_id AND repo_id = :repo_id """) - logging.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n') + self.logger.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n') table_values = pd.read_sql(table_values_sql, self.db, params={'repo_id': repo_id}) # Compare queried values against table values for dupes/updates if len(pr_file_rows) > 0: table_columns = pr_file_rows[0].keys() else: - logging.info(f'No rows need insertion for repo {repo_id}\n') - register_task_completion(self, task_info, repo_id, 'pull_request_files') + self.logger.info(f'No rows need insertion for repo {repo_id}\n') + self.register_task_completion(task_info, repo_id, 'pull_request_files') + return # Compare queried values against table values for dupes/updates pr_file_rows_df = pd.DataFrame(pr_file_rows) pr_file_rows_df = pr_file_rows_df.dropna(subset=['pull_request_id']) - pr_file_rows_df['need_update'] = 0 dupe_columns = ['pull_request_id', 'pr_file_path'] update_columns = ['pr_file_additions', 'pr_file_deletions'] - logging.info(f'{pr_file_rows_df}') - logging.info(f'{table_values}') need_insertion = pr_file_rows_df.merge(table_values, suffixes=('','_table'), how='outer', indicator=True, on=dupe_columns).loc[ lambda x : x['_merge']=='left_only'][table_columns] @@ -423,14 +239,13 @@ def pull_requests_graphql(self, task_info, repo_id): on=update_columns, suffixes=('','_table'), how='outer',indicator=True ).loc[lambda x : x['_merge']=='left_only'][table_columns] - need_updates['b_pull_request_id'] = need_updates['pull_request_id'] need_updates['b_pr_file_path'] = need_updates['pr_file_path'] pr_file_insert_rows = need_insertion.to_dict('records') pr_file_update_rows = need_updates.to_dict('records') - logging.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and ' + self.logger.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and ' f'{len(need_updates)} updates.\n') if len(pr_file_update_rows) > 0: @@ -447,7 +262,7 @@ def pull_requests_graphql(self, task_info, repo_id): ) success = True except Exception as e: - logging.info('error: {}'.format(e)) + self.logger.info('error: {}'.format(e)) time.sleep(5) if len(pr_file_insert_rows) > 0: @@ -460,14 +275,22 @@ def pull_requests_graphql(self, task_info, repo_id): ) success = True except Exception as e: - logging.info('error: {}'.format(e)) + self.logger.info('error: {}'.format(e)) time.sleep(5) - register_task_completion(self, task_info, repo_id, 'pull_request_files') + self.register_task_completion(task_info, repo_id, 'pull_request_files') def pull_request_commits_model(self, task_info, repo_id): """ Queries the commits related to each pull request already inserted in the db """ + self.logger.info("Querying starting ids info...\n") + + # Increment so we are ready to insert the 'next one' of each of these most recent ids + self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 + self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id') + self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id') + + # query existing PRs and the respective url we will append the commits url to pr_url_sql = s.sql.text(""" SELECT DISTINCT pr_url, pull_requests.pull_request_id @@ -484,7 +307,7 @@ def pull_request_commits_model(self, task_info, repo_id): update_col_map = {} # Use helper paginate function to iterate the commits url and check for dupes - pr_commits = paginate(self, commits_url, duplicate_col_map, update_col_map, table, table_pkey, + pr_commits = self.paginate(commits_url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="where pull_request_id = {}".format(pull_request.pull_request_id)) for pr_commit in pr_commits: # post-pagination, iterate results @@ -500,9 +323,9 @@ def pull_request_commits_model(self, task_info, repo_id): 'data_source': 'GitHub API', } result = self.db.execute(self.pull_request_commits_table.insert().values(pr_commit_row)) - logging.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n") + self.logger.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n") - register_task_completion(self, task_info, repo_id, 'pull_request_commits') + self.register_task_completion(task_info, repo_id, 'pull_request_commits') def pull_requests_model(self, entry_info, repo_id): """Pull Request data collection function. Query GitHub API for PhubRs. @@ -510,11 +333,18 @@ def pull_requests_model(self, entry_info, repo_id): :param entry_info: A dictionary consisiting of 'git_url' and 'repo_id' :type entry_info: dict """ + + self.logger.info("Querying starting ids info...\n") + + # Increment so we are ready to insert the 'next one' of each of these most recent ids + self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 + self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id') + self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id') + github_url = entry_info['given']['github_url'] - logging.info('Beginning collection of Pull Requests...\n') - logging.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n') - record_model_process(self, repo_id, 'pull_requests') + self.logger.info('Beginning collection of Pull Requests...\n') + self.logger.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n') owner, repo = self.get_owner_repo(github_url) @@ -530,12 +360,12 @@ def pull_requests_model(self, entry_info, repo_id): duplicate_col_map = {'pr_src_id': 'id'} #list to hold pull requests needing insertion - prs = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, + prs = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey, where_clause='WHERE repo_id = {}'.format(repo_id), value_update_col_map={'pr_augur_contributor_id': float('nan')}) # Discover and remove duplicates before we start inserting - logging.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n") + self.logger.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n") for pr_dict in prs: @@ -553,7 +383,7 @@ def pull_requests_model(self, entry_info, repo_id): 'pr_src_state': pr_dict['state'], 'pr_src_locked': pr_dict['locked'], 'pr_src_title': pr_dict['title'], - 'pr_augur_contributor_id': find_id_from_login(self, pr_dict['user']['login']), + 'pr_augur_contributor_id': self.find_id_from_login(pr_dict['user']['login']), 'pr_body': pr_dict['body'], 'pr_created_at': pr_dict['created_at'], 'pr_updated_at': pr_dict['updated_at'], @@ -581,21 +411,21 @@ def pull_requests_model(self, entry_info, repo_id): } if pr_dict['flag'] == 'need_insertion': - logging.info(f'PR {pr_dict["id"]} needs to be inserted\n') + self.logger.info(f'PR {pr_dict["id"]} needs to be inserted\n') result = self.db.execute(self.pull_requests_table.insert().values(pr)) - logging.info(f"Added Pull Request: {result.inserted_primary_key}") + self.logger.info(f"Added Pull Request: {result.inserted_primary_key}") self.pr_id_inc = int(result.inserted_primary_key[0]) elif pr_dict['flag'] == 'need_update': result = self.db.execute(self.pull_requests_table.update().where( self.pull_requests_table.c.pr_src_id==pr_dict['id']).values(pr)) - logging.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format( + self.logger.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format( pr_dict['id'])) self.pr_id_inc = pr_dict['pkey'] else: - logging.info("PR does not need to be inserted. Fetching its id from DB") + self.logger.info("PR does not need to be inserted. Fetching its id from DB") pr_id_sql = s.sql.text(""" SELECT pull_request_id FROM pull_requests WHERE pr_src_id={} @@ -609,16 +439,16 @@ def pull_requests_model(self, entry_info, repo_id): self.query_reviewers(pr_dict['requested_reviewers'], self.pr_id_inc) self.query_pr_meta(pr_dict['head'], pr_dict['base'], self.pr_id_inc) - logging.info(f"Inserted PR data for {owner}/{repo}") + self.logger.info(f"Inserted PR data for {owner}/{repo}") self.results_counter += 1 - register_task_completion(self, entry_info, repo_id, 'pull_requests') + self.register_task_completion(entry_info, repo_id, 'pull_requests') def query_labels(self, labels, pr_id): - logging.info('Querying PR Labels\n') + self.logger.info('Querying PR Labels\n') if len(labels) == 0: - logging.info('No new labels to add\n') + self.logger.info('No new labels to add\n') return table = 'pull_request_labels' @@ -629,12 +459,12 @@ def query_labels(self, labels, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - pr_labels_table_values = get_table_values(self, cols_query, [table]) + pr_labels_table_values = self.get_table_values(cols_query, [table]) - new_labels = assign_tuple_action(self, labels, pr_labels_table_values, update_col_map, duplicate_col_map, + new_labels = self.assign_tuple_action(labels, pr_labels_table_values, update_col_map, duplicate_col_map, table_pkey) - logging.info(f'Found {len(new_labels)} labels\n') + self.logger.info(f'Found {len(new_labels)} labels\n') for label_dict in new_labels: @@ -653,14 +483,13 @@ def query_labels(self, labels, pr_id): if label_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_labels_table.insert().values(label)) - logging.info(f"Added PR Label: {result.inserted_primary_key}\n") - logging.info(f"Inserted PR Labels data for PR with id {pr_id}\n") + self.logger.info(f"Added PR Label: {result.inserted_primary_key}\n") + self.logger.info(f"Inserted PR Labels data for PR with id {pr_id}\n") self.results_counter += 1 - self.label_id_inc = int(result.inserted_primary_key[0]) def query_pr_events(self, owner, repo, gh_pr_no, pr_id): - logging.info('Querying PR Events\n') + self.logger.info('Querying PR Events\n') url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' + '/events?per_page=100&page={}') @@ -674,14 +503,14 @@ def query_pr_events(self, owner, repo, gh_pr_no, pr_id): duplicate_col_map = {'issue_event_src_id': 'id'} #list to hold contributors needing insertion or update - pr_events = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey) + pr_events = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n") + self.logger.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n") for pr_event_dict in pr_events: if pr_event_dict['actor']: - cntrb_id = find_id_from_login(self, pr_event_dict['actor']['login']) + cntrb_id = self.find_id_from_login(pr_event_dict['actor']['login']) else: cntrb_id = 1 @@ -700,18 +529,17 @@ def query_pr_events(self, owner, repo, gh_pr_no, pr_id): } result = self.db.execute(self.pull_request_events_table.insert().values(pr_event)) - logging.info(f"Added PR Event: {result.inserted_primary_key}\n") + self.logger.info(f"Added PR Event: {result.inserted_primary_key}\n") self.results_counter += 1 - self.event_id_inc = int(result.inserted_primary_key[0]) - logging.info(f"Inserted PR Events data for PR with id {pr_id}\n") + self.logger.info(f"Inserted PR Events data for PR with id {pr_id}\n") def query_reviewers(self, reviewers, pr_id): - logging.info('Querying Reviewers') + self.logger.info('Querying Reviewers') if reviewers is None or len(reviewers) == 0: - logging.info('No reviewers to add') + self.logger.info('No reviewers to add') return table = 'pull_request_reviewers' @@ -722,15 +550,15 @@ def query_reviewers(self, reviewers, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - reviewers_table_values = get_table_values(self, cols_query, [table]) + reviewers_table_values = self.get_table_values(cols_query, [table]) - new_reviewers = assign_tuple_action(self, reviewers, reviewers_table_values, update_col_map, duplicate_col_map, + new_reviewers = self.assign_tuple_action(reviewers, reviewers_table_values, update_col_map, duplicate_col_map, table_pkey) for reviewers_dict in new_reviewers: if 'login' in reviewers_dict: - cntrb_id = find_id_from_login(self, reviewers_dict['login']) + cntrb_id = self.find_id_from_login(reviewers_dict['login']) else: cntrb_id = 1 @@ -744,18 +572,17 @@ def query_reviewers(self, reviewers, pr_id): if reviewers_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_reviewers_table.insert().values(reviewer)) - logging.info(f"Added PR Reviewer {result.inserted_primary_key}") + self.logger.info(f"Added PR Reviewer {result.inserted_primary_key}") - self.reviewer_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 - logging.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}") + self.logger.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}") def query_assignee(self, assignees, pr_id): - logging.info('Querying Assignees') + self.logger.info('Querying Assignees') if assignees is None or len(assignees) == 0: - logging.info('No assignees to add') + self.logger.info('No assignees to add') return table = 'pull_request_assignees' @@ -766,15 +593,15 @@ def query_assignee(self, assignees, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - assignee_table_values = get_table_values(self, cols_query, [table]) + assignee_table_values = self.get_table_values(cols_query, [table]) - assignees = assign_tuple_action(self, assignees, assignee_table_values, update_col_map, duplicate_col_map, + assignees = self.assign_tuple_action(assignees, assignee_table_values, update_col_map, duplicate_col_map, table_pkey) for assignee_dict in assignees: if 'login' in assignee_dict: - cntrb_id = find_id_from_login(self, assignee_dict['login']) + cntrb_id = self.find_id_from_login(assignee_dict['login']) else: cntrb_id = 1 @@ -788,15 +615,14 @@ def query_assignee(self, assignees, pr_id): if assignee_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_assignees_table.insert().values(assignee)) - logging.info(f'Added PR Assignee {result.inserted_primary_key}') + self.logger.info(f'Added PR Assignee {result.inserted_primary_key}') - self.assignee_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 - logging.info(f'Finished inserting PR Assignee data for PR with id {pr_id}') + self.logger.info(f'Finished inserting PR Assignee data for PR with id {pr_id}') def query_pr_meta(self, head, base, pr_id): - logging.info('Querying PR Meta') + self.logger.info('Querying PR Meta') table = 'pull_request_meta' duplicate_col_map = {'pr_sha': 'sha'} @@ -808,12 +634,12 @@ def query_pr_meta(self, head, base, pr_id): update_keys += list(value_update_col_map.keys()) cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - meta_table_values = get_table_values(self, cols_query, [table]) + meta_table_values = self.get_table_values(cols_query, [table]) pr_meta_dict = { - 'head': assign_tuple_action(self, [head], meta_table_values, update_col_map, duplicate_col_map, + 'head': self.assign_tuple_action([head], meta_table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map=value_update_col_map)[0], - 'base': assign_tuple_action(self, [base], meta_table_values, update_col_map, duplicate_col_map, + 'base': self.assign_tuple_action([base], meta_table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map=value_update_col_map)[0] } @@ -824,7 +650,7 @@ def query_pr_meta(self, head, base, pr_id): 'pr_src_meta_label': pr_meta_data['label'], 'pr_src_meta_ref': pr_meta_data['ref'], 'pr_sha': pr_meta_data['sha'], - 'cntrb_id': find_id_from_login(self, pr_meta_data['user']['login']) if pr_meta_data['user'] \ + 'cntrb_id': self.find_id_from_login(pr_meta_data['user']['login']) if pr_meta_data['user'] \ and 'login' in pr_meta_data['user'] else None, 'tool_source': self.tool_source, 'tool_version': self.tool_version, @@ -836,13 +662,12 @@ def query_pr_meta(self, head, base, pr_id): self.pull_request_meta_table.c.pr_sha==pr_meta['pr_sha'] and self.pull_request_meta_table.c.pr_head_or_base==pr_side ).values(pr_meta)) - logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( - issue_dict['id'])) - self.issue_id_inc = issue_dict['pkey'] + # self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format(issue_dict['id'])) + self.pr_meta_id_inc = pr_meta_data['pkey'] elif pr_meta_data['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_meta_table.insert().values(pr_meta)) - logging.info(f'Added PR Head {result.inserted_primary_key}') + self.logger.info(f'Added PR Head {result.inserted_primary_key}') self.pr_meta_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 @@ -857,12 +682,12 @@ def query_pr_meta(self, head, base, pr_id): if pr_meta_data['repo']: self.query_pr_repo(pr_meta_data['repo'], pr_side, self.pr_meta_id_inc) else: - logging.info('No new PR Head data to add') + self.logger.info('No new PR Head data to add') - logging.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}') + self.logger.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}') def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): - logging.info('Querying PR Comments') + self.logger.info('Querying PR Comments') url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' + '/comments?per_page=100&page={}') @@ -876,20 +701,21 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): duplicate_col_map = {'pr_message_ref_src_comment_id': 'id'} #list to hold contributors needing insertion or update - pr_messages = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey) + pr_messages = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n") + self.logger.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n") for pr_msg_dict in pr_messages: if pr_msg_dict['user'] and 'login' in pr_msg_dict['user']: - cntrb_id = find_id_from_login(self, pr_msg_dict['user']['login']) + cntrb_id = self.find_id_from_login(pr_msg_dict['user']['login']) else: cntrb_id = 1 msg = { 'rgls_id': None, - 'msg_text': pr_msg_dict['body'], + 'msg_text': pr_msg_dict['body'].replace("0x00", "____") if \ + 'body' in pr_msg_dict else None, 'msg_timestamp': pr_msg_dict['created_at'], 'msg_sender_email': None, 'msg_header': None, @@ -901,12 +727,11 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): } result = self.db.execute(self.message_table.insert().values(msg)) - logging.info(f'Added PR Comment {result.inserted_primary_key}') - self.msg_id_inc = int(result.inserted_primary_key[0]) + self.logger.info(f'Added PR Comment {result.inserted_primary_key}') pr_msg_ref = { 'pull_request_id': pr_id, - 'msg_id': self.msg_id_inc, + 'msg_id': int(result.inserted_primary_key[0]), 'pr_message_ref_src_comment_id': pr_msg_dict['id'], 'pr_message_ref_src_node_id': pr_msg_dict['node_id'], 'tool_source': self.tool_source, @@ -917,15 +742,14 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): result = self.db.execute( self.pull_request_message_ref_table.insert().values(pr_msg_ref) ) - logging.info(f'Added PR Message Ref {result.inserted_primary_key}') - self.pr_msg_ref_id_inc = int(result.inserted_primary_key[0]) + self.logger.info(f'Added PR Message Ref {result.inserted_primary_key}') self.results_counter += 1 - logging.info(f'Finished adding PR Message data for PR with id {pr_id}') + self.logger.info(f'Finished adding PR Message data for PR with id {pr_id}') def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): - logging.info(f'Querying PR {pr_repo_type} repo') + self.logger.info(f'Querying PR {pr_repo_type} repo') table = 'pull_request_repo' duplicate_col_map = {'pr_src_repo_id': 'id'} @@ -935,13 +759,13 @@ def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - pr_repo_table_values = get_table_values(self, cols_query, [table]) + pr_repo_table_values = self.get_table_values(cols_query, [table]) - new_pr_repo = assign_tuple_action(self, [pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map, + new_pr_repo = self.assign_tuple_action([pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map, table_pkey)[0] if new_pr_repo['owner'] and 'login' in new_pr_repo['owner']: - cntrb_id = find_id_from_login(self, new_pr_repo['owner']['login']) + cntrb_id = self.find_id_from_login(new_pr_repo['owner']['login']) else: cntrb_id = 1 @@ -962,20 +786,8 @@ def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): if new_pr_repo['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_repo_table.insert().values(pr_repo)) - logging.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}') + self.logger.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}') self.results_counter += 1 - logging.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}') - - def get_owner_repo(self, github_url): - split = github_url.split('/') - - owner = split[-2] - repo = split[-1] - - if '.git' in repo: - repo = repo[:-4] - - return owner, repo - + self.logger.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}') diff --git a/workers/pull_request_worker/pull_request_worker/runtime.py b/workers/pull_request_worker/pull_request_worker/runtime.py deleted file mode 100644 --- a/workers/pull_request_worker/pull_request_worker/runtime.py +++ /dev/null @@ -1,109 +0,0 @@ -import json, logging, os, click -import requests -from flask import Flask, Response, jsonify, request -from pull_request_worker.worker import GHPullRequestWorker -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': # POST a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.gh_pr_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.gh_pr_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51239, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'pull_request_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - print("New pull request worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.pull_request_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - - app.gh_pr_worker = GHPullRequestWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.gh_pr_worker._child is not None: - app.gh_pr_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/pull_request_worker/runtime.py b/workers/pull_request_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/pull_request_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.pull_request_worker.pull_request_worker import GitHubPullRequestWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitHubPullRequestWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/pull_request_worker/setup.py b/workers/pull_request_worker/setup.py --- a/workers/pull_request_worker/setup.py +++ b/workers/pull_request_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="pull_request_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'pull_request_worker_start=pull_request_worker.runtime:main', + 'pull_request_worker_start=workers.pull_request_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/release_worker/__init__.py b/workers/release_worker/__init__.py new file mode 100644 --- /dev/null +++ b/workers/release_worker/__init__.py @@ -0,0 +1,5 @@ +"""gh_release_worker - Augur Worker that collects GitHub Repo Info data""" + +__version__ = '0.0.0' +__author__ = 'Augur Team <[email protected]>' +__all__ = [] diff --git a/workers/release_worker/release_worker.py b/workers/release_worker/release_worker.py new file mode 100644 --- /dev/null +++ b/workers/release_worker/release_worker.py @@ -0,0 +1,154 @@ +import logging, os, sys, time, requests, json +from datetime import datetime +from multiprocessing import Process, Queue +from urllib.parse import urlparse +import pandas as pd +import sqlalchemy as s +from sqlalchemy import MetaData +from sqlalchemy.ext.automap import automap_base +from workers.worker_base import Worker + +#TODO - fully edit to match releases +class ReleaseWorker(Worker): + def __init__(self, config={}): + + worker_type = "release_worker" + + # Define what this worker can be given and know how to interpret + given = [['github_url']] + models = ['releases'] + + # Define the tables needed to insert, update, or delete on + data_tables = ['releases'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Define data collection info + self.tool_source = 'Release Worker' + self.tool_version = '1.0.0' + self.data_source = 'GitHub API' + + def releases_model(self, task, repo_id): + + github_url = task['given']['github_url'] + + self.logger.info("Beginning filling the releases model for repo: " + github_url + "\n") + + owner, repo = self.get_owner_repo(github_url) + + url = 'https://api.github.com/graphql' + + query = """ + { + repository(owner:"%s", name:"%s"){ + id + releases(orderBy: {field: CREATED_AT, direction: ASC}, last: %d) { + edges { + node { + name + publishedAt + createdAt + description + id + isDraft + isPrerelease + tagName + url + updatedAt + author { + name + company + } + } + } + } + } + } + """ % (owner, repo, 10) + + # Hit the graphql endpoint and retry 3 times in case of failure + num_attempts = 0 + success = False + while num_attempts < 3: + self.logger.info("Hitting endpoint: {} ...\n".format(url)) + r = requests.post(url, json={'query': query}, headers=self.headers) + self.update_gh_rate_limit(r) + + try: + data = r.json() + except: + data = json.loads(json.dumps(r.text)) + + if 'errors' in data: + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': + self.update_gh_rate_limit(r) + continue + + if 'data' in data: + success = True + data = data['data']['repository'] + break + else: + self.logger.info("Request returned a non-data dict: {}\n".format(data)) + if data['message'] == 'Not Found': + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + break + if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': + self.update_gh_rate_limit(r, temporarily_disable=True) + continue + if data['message'] == 'Bad credentials': + self.update_gh_rate_limit(r, bad_credentials=True) + continue + num_attempts += 1 + if not success: + self.register_task_failure(task, repo_id, "Failed to hit endpoint: {}".format(url)) + return + + if 'repository' in data: + if 'releases' in data['repository']: + if 'edges' in data['repository']['releases']: + for n in data['repository']['releases']['edges']: + if 'node' in n: + release = n['node'] + insert_release(self, repo_id, owner, release) + self.logger.info("There's no release to insert. Current node is not available in releases: {}\n".format(n)) + self.logger.info("There are no releases to insert for current repository: {}\n".format(data)) + self.logger.info("Graphql response does not contain releases: {}\n".format(data)) + self.logger.info("Graphql response does not contain repository: {}\n".format(data)) + + def insert_release(self, repo_id, owner, release): + author = release['author']['name']+'_'+release['author']['company'] + # Put all data together in format of the table + self.logger.info(f'Inserting release for repo with id:{repo_id}, owner:{owner}, release name:{release["name"]}\n') + release_inf = { + 'release_id': release['id'], + 'repo_id': repo_id, + 'release_name': release['name'], + 'release_description': release['description'], + 'release_author': release['author'], + 'release_created_at': release['createdAt'], + 'release_published_at': release['publishedAt'], + 'release_updated_at': release['updatedAt'], + 'release_is_draft': release['isDraft'], + 'release_is_prerelease': release['isPrerelease'], + 'release_tag_name': release['tagName'], + 'release_url': release['url'], + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source + } + + result = self.db.execute(self.releases_table.insert().values(release_inf)) + self.logger.info(f"Primary Key inserted into releases table: {result.inserted_primary_key}\n") + self.results_counter += 1 + + self.logger.info(f"Inserted info for {owner}/{repo}/{release['name']}\n") + + #Register this task as completed + self.register_task_completion(task, release_id, "releases") + return + + diff --git a/workers/release_worker/runtime.py b/workers/release_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/release_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.release_worker.release_worker import ReleaseWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ReleaseWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/metric_status_worker/setup.py b/workers/release_worker/setup.py similarity index 83% rename from workers/metric_status_worker/setup.py rename to workers/release_worker/setup.py --- a/workers/metric_status_worker/setup.py +++ b/workers/release_worker/setup.py @@ -5,22 +5,20 @@ from setuptools import find_packages from setuptools import setup - def read(filename): filename = os.path.join(os.path.dirname(__file__), filename) text_type = type(u"") with io.open(filename, mode="r", encoding='utf-8') as fd: return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) - setup( - name="metric_status_worker", - version="0.1.0", + name="release_worker", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", author_email="[email protected]", - description="Augur Worker that collects GitHub data", + description="Augur Worker that collects data about GitHub releases", packages=find_packages(exclude=('tests',)), install_requires=[ 'flask', @@ -30,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'metric_status_worker_start=metric_status_worker.runtime:main', + 'release_worker_start=workers.release_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/repo_info_worker/__init__.py b/workers/repo_info_worker/__init__.py new file mode 100644 diff --git a/workers/repo_info_worker/repo_info_worker.py b/workers/repo_info_worker/repo_info_worker.py new file mode 100644 --- /dev/null +++ b/workers/repo_info_worker/repo_info_worker.py @@ -0,0 +1,302 @@ +import logging, os, sys, time, requests, json +from datetime import datetime +from multiprocessing import Process, Queue +import pandas as pd +import sqlalchemy as s +from workers.worker_base import Worker + +# NOTE: This worker primarily inserts rows into the REPO_INFO table, which serves the primary purposes of +# 1. Displaying discrete metadata like "number of forks" and how they change over time +# 2. Validating other workers, like those related to pull requests, issues, and commits. Our totals should be at or very near the totals in the repo_info table. + +# This table also updates the REPO table in 2 cases: +# 1. Recognizing when a repository is a forked repository by updating the "forked_from" field and +# 2. Recognizing when a repository is archived, and recording the data we observed the change in status. + +class RepoInfoWorker(Worker): + def __init__(self, config={}): + + worker_type = "repo_info_worker" + + # Define what this worker can be given and know how to interpret + given = [['github_url']] + models = ['repo_info'] + + # Define the tables needed to insert, update, or delete on + data_tables = ['repo_info', 'repo'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Define data collection info + self.tool_source = 'Repo Info Worker' + self.tool_version = '1.0.0' + self.data_source = 'GitHub API' + + def repo_info_model(self, task, repo_id): + + github_url = task['given']['github_url'] + + self.logger.info("Beginning filling the repo_info model for repo: " + github_url + "\n") + + owner, repo = self.get_owner_repo(github_url) + + url = 'https://api.github.com/graphql' + + query = """ + { + repository(owner:"%s", name:"%s"){ + updatedAt + hasIssuesEnabled + issues(states:OPEN) { + totalCount + } + hasWikiEnabled + forkCount + defaultBranchRef { + name + } + watchers { + totalCount + } + id + licenseInfo { + name + url + } + stargazers { + totalCount + } + codeOfConduct { + name + url + } + issue_count: issues { + totalCount + } + issues_closed: issues(states:CLOSED) { + totalCount + } + pr_count: pullRequests { + totalCount + } + pr_open: pullRequests(states: OPEN) { + totalCount + } + pr_closed: pullRequests(states: CLOSED) { + totalCount + } + pr_merged: pullRequests(states: MERGED) { + totalCount + } + ref(qualifiedName: "master") { + target { + ... on Commit { + history(first: 0){ + totalCount + } + } + } + } + } + } + """ % (owner, repo) + + # Hit the graphql endpoint and retry 3 times in case of failure + num_attempts = 0 + success = False + data = None + while num_attempts < 3: + self.logger.info("Hitting endpoint: {} ...\n".format(url)) + r = requests.post(url, json={'query': query}, headers=self.headers) + self.update_gh_rate_limit(r) + + try: + data = r.json() + except: + data = json.loads(json.dumps(r.text)) + + if 'errors' in data: + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': + self.update_gh_rate_limit(r) + continue + + if 'data' in data: + success = True + data = data['data']['repository'] + break + else: + self.logger.info("Request returned a non-data dict: {}\n".format(data)) + if data['message'] == 'Not Found': + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + break + if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': + self.update_gh_rate_limit(r, temporarily_disable=True) + continue + if data['message'] == 'Bad credentials': + self.update_gh_rate_limit(r, bad_credentials=True) + continue + num_attempts += 1 + if not success: + self.logger.error('Cannot hit endpoint after 3 attempts. \"Completing\" task.\n') + self.register_task_completion(self.task, repo_id, 'repo_info') + return + + # Just checking that the data is accessible (would not be if repo no longer exists) + try: + data['updatedAt'] + except Exception as e: + self.logger.error('Cannot access repo_info data: {}\nError: {}. \"Completing\" task.'.format(data, e)) + self.register_task_completion(self.task, repo_id, 'repo_info') + return + + # Get committers count info that requires seperate endpoint + committers_count = self.query_committers_count(owner, repo) + + # Put all data together in format of the table + self.logger.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n') + rep_inf = { + 'repo_id': repo_id, + 'last_updated': data['updatedAt'] if 'updatedAt' in data else None, + 'issues_enabled': data['hasIssuesEnabled'] if 'hasIssuesEnabled' in data else None, + 'open_issues': data['issues']['totalCount'] if data['issues'] else None, + 'pull_requests_enabled': None, + 'wiki_enabled': data['hasWikiEnabled'] if 'hasWikiEnabled' in data else None, + 'pages_enabled': None, + 'fork_count': data['forkCount'] if 'forkCount' in data else None, + 'default_branch': data['defaultBranchRef']['name'] if data['defaultBranchRef'] else None, + 'watchers_count': data['watchers']['totalCount'] if data['watchers'] else None, + 'UUID': None, + 'license': data['licenseInfo']['name'] if data['licenseInfo'] else None, + 'stars_count': data['stargazers']['totalCount'] if data['stargazers'] else None, + 'committers_count': committers_count, + 'issue_contributors_count': None, + 'changelog_file': None, + 'contributing_file': None, + 'license_file': data['licenseInfo']['url'] if data['licenseInfo'] else None, + 'code_of_conduct_file': data['codeOfConduct']['url'] if data['codeOfConduct'] else None, + 'security_issue_file': None, + 'security_audit_file': None, + 'status': None, + 'keywords': None, + 'commit_count': data['ref']['target']['history']['totalCount'] if data['ref'] else None, + 'issues_count': data['issue_count']['totalCount'] if data['issue_count'] else None, + 'issues_closed': data['issues_closed']['totalCount'] if data['issues_closed'] else None, + 'pull_request_count': data['pr_count']['totalCount'] if data['pr_count'] else None, + 'pull_requests_open': data['pr_open']['totalCount'] if data['pr_open'] else None, + 'pull_requests_closed': data['pr_closed']['totalCount'] if data['pr_closed'] else None, + 'pull_requests_merged': data['pr_merged']['totalCount'] if data['pr_merged'] else None, + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source + } + + result = self.db.execute(self.repo_info_table.insert().values(rep_inf)) + self.logger.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n") + self.results_counter += 1 + + # Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table. + forked = self.is_forked(owner, repo) + archived = self.is_archived(owner, repo) + archived_date_collected = None + if archived is not False: + archived_date_collected = archived + archived = 1 + else: + archived = 0 + + rep_additional_data = { + 'forked_from': forked, + 'repo_archived': archived, + 'repo_archived_date_collected': archived_date_collected + } + result = self.db.execute(self.repo_table.update().where( + self.repo_table.c.repo_id==repo_id).values(rep_additional_data)) + + self.logger.info(f"Inserted info for {owner}/{repo}\n") + + # Register this task as completed + self.register_task_completion(self.task, repo_id, "repo_info") + + def query_committers_count(self, owner, repo): + self.logger.info('Querying committers count\n') + url = f'https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100' + committers = 0 + + try: + while True: + r = requests.get(url, headers=self.headers) + self.update_gh_rate_limit(r) + committers += len(r.json()) + + if 'next' not in r.links: + break + else: + url = r.links['next']['url'] + except Exception: + self.logger.exception('An error occured while querying contributor count\n') + + return committers + + def is_forked(self, owner, repo): #/repos/:owner/:repo parent + self.logger.info('Querying parent info to verify if the repo is forked\n') + url = f'https://api.github.com/repos/{owner}/{repo}' + + r = requests.get(url, headers=self.headers) + self.update_gh_rate_limit(r) + + data = self.get_repo_data(url, r) + + if 'fork' in data: + if 'parent' in data: + return data['parent']['full_name'] + return 'Parent not available' + + return False + + def is_archived(self, owner, repo): + self.logger.info('Querying committers count\n') + url = f'https://api.github.com/repos/{owner}/{repo}' + + r = requests.get(url, headers=self.headers) + self.update_gh_rate_limit(r) + + data = self.get_repo_data(url, r) + + if 'archived' in data: + if data['archived']: + if 'updated_at' in data: + return data['updated_at'] + return 'Date not available' + return False + + return False + + def get_repo_data(self, url, response): + success = False + try: + data = response.json() + except: + data = json.loads(json.dumps(response.text)) + + if 'errors' in data: + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': + self.update_gh_rate_limit(response) + + if 'id' in data: + success = True + else: + self.logger.info("Request returned a non-data dict: {}\n".format(data)) + if data['message'] == 'Not Found': + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': + self.update_gh_rate_limit(r, temporarily_disable=True) + if data['message'] == 'Bad credentials': + self.update_gh_rate_limit(r, bad_credentials=True) + if not success: + self.register_task_failure(self.task, repo_id, "Failed to hit endpoint: {}".format(url)) + + return data diff --git a/workers/repo_info_worker/repo_info_worker/runtime.py b/workers/repo_info_worker/repo_info_worker/runtime.py deleted file mode 100644 --- a/workers/repo_info_worker/repo_info_worker/runtime.py +++ /dev/null @@ -1,107 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from repo_info_worker.worker import GHRepoInfoWorker -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.gh_repo_info_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.gh_repo_info_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'repo_info_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.repo_info_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - app.gh_repo_info_worker = GHRepoInfoWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - if app.gh_repo_info_worker._child is not None: - app.gh_repo_info_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - - - diff --git a/workers/repo_info_worker/repo_info_worker/worker.py b/workers/repo_info_worker/repo_info_worker/worker.py deleted file mode 100644 --- a/workers/repo_info_worker/repo_info_worker/worker.py +++ /dev/null @@ -1,368 +0,0 @@ -import logging, os, sys, time, requests, json -from datetime import datetime -from multiprocessing import Process, Queue -from urllib.parse import urlparse -import pandas as pd -import sqlalchemy as s -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base -from workers.standard_methods import register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process - -class GHRepoInfoWorker: - def __init__(self, config, task=None): - self._task = task - self._child = None - self._queue = Queue() - self.working_on = None - self.config = config - LOG_FORMAT = '%(levelname)s:[%(name)s]: %(message)s' - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO, format=LOG_FORMAT) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - # logger = logging.getLogger('RepoInfoWorker') - self.db = None - self.table = None - self.API_KEY = self.config['key'] - self.tool_source = 'GitHub Repo Info Worker' - self.tool_version = '0.0.1' - self.data_source = 'GitHub API' - self.results_counter = 0 - self.finishing_task = False - self.info_id_inc = None - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["github_url"]], - "models":["repo_info"] - } - ], - "config": [self.config] - } - - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - logging.info("Making database connections...") - - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['repo_info']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.repo_info_table = Base.classes.repo_info.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - maxHistorySQL = s.sql.text(""" - SELECT max(history_id) AS history_id - FROM worker_history - """) - rs = pd.read_sql(maxHistorySQL, self.helper_db, params={}) - self.history_id = int(rs.iloc[0]["history_id"]) if rs.iloc[0]["history_id"] is not None else 25150 - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = self.history_id if self.finishing_task else self.history_id + 1 - - # Organize different keys available - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauthSQL = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(config['key'])) - for oauth in [{'oauth_id': 0, 'access_token': config['key']}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): - # self.headers = {'Authorization': 'token %s' % oauth['access_token']} - self.headers = {'Authorization': 'token {}'.format(oauth['access_token']), - 'Accept': 'application/vnd.github.vixen-preview+json'} - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - - # Send broker hello message - connect_to_broker(self) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - else: - self.finishing_task = False - logging.info("focused task is OFF\n") - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task """ - self._task = None - - def run(self): - logging.info("Running...") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self, repos=None): - - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['github_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - self.repo_info_model(message, repo_id) - except Exception: - raise ValueError('Worker ran into an error for task {}'.format(message)) - - def get_owner_repo(self, github_url): - split = github_url.split('/') - - owner = split[-2] - repo = split[-1] - - if '.git' in repo: - repo = repo[:-4] - - return owner, repo - - def repo_info_model(self, task, repo_id): - - github_url = task['given']['github_url'] - - logging.info("Beginning filling the repo_info model for repo: " + github_url + "\n") - record_model_process(self, repo_id, 'repo_info') - - owner, repo = self.get_owner_repo(github_url) - - url = 'https://api.github.com/graphql' - - query = """ - { - repository(owner:"%s", name:"%s"){ - updatedAt - hasIssuesEnabled - issues(states:OPEN) { - totalCount - } - hasWikiEnabled - forkCount - defaultBranchRef { - name - } - watchers { - totalCount - } - id - licenseInfo { - name - url - } - stargazers { - totalCount - } - codeOfConduct { - name - url - } - issue_count: issues { - totalCount - } - issues_closed: issues(states:CLOSED) { - totalCount - } - pr_count: pullRequests { - totalCount - } - pr_open: pullRequests(states: OPEN) { - totalCount - } - pr_closed: pullRequests(states: CLOSED) { - totalCount - } - pr_merged: pullRequests(states: MERGED) { - totalCount - } - ref(qualifiedName: "master") { - target { - ... on Commit { - history(first: 0){ - totalCount - } - } - } - } - } - } - """ % (owner, repo) - - num_attempts = 3 - success = False - for attempt in range(num_attempts): - logging.info("Hitting endpoint: {} ...\n".format(url)) - r = requests.post(url, json={'query': query}, headers=self.headers) - update_gh_rate_limit(self, r) - - try: - j = r.json() - except: - j = json.loads(json.dumps(r.text)) - - if 'errors' in j: - logging.info("Error!: {}".format(j['errors'])) - register_task_failure(self, task, repo_id, j['errors'][0]['message']) - return - - if 'data' in j: - success = True - j = j['data']['repository'] - break - else: - logging.info("Request returned a non-data dict: {}\n".format(j)) - if j['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) - break - if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': - num_attempts -= 1 - update_gh_rate_limit(self, r, temporarily_disable=True) - if j['message'] == 'Bad credentials': - update_gh_rate_limit(self, r, bad_credentials=True) - if not success: - register_task_failure(self, task, repo_id, "Failed to hit endpoint: {}".format(url)) - return - - committers_count = self.query_committers_count(owner, repo) - - logging.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n') - rep_inf = { - 'repo_id': repo_id, - 'last_updated': j['updatedAt'] if 'updatedAt' in j else None, - 'issues_enabled': j['hasIssuesEnabled'] if 'hasIssuesEnabled' in j else None, - 'open_issues': j['issues']['totalCount'] if j['issues'] else None, - 'pull_requests_enabled': None, - 'wiki_enabled': j['hasWikiEnabled'] if 'hasWikiEnabled' in j else None, - 'pages_enabled': None, - 'fork_count': j['forkCount'] if 'forkCount' in j else None, - 'default_branch': j['defaultBranchRef']['name'] if j['defaultBranchRef'] else None, - 'watchers_count': j['watchers']['totalCount'] if j['watchers'] else None, - 'UUID': None, - 'license': j['licenseInfo']['name'] if j['licenseInfo'] else None, - 'stars_count': j['stargazers']['totalCount'] if j['stargazers'] else None, - 'committers_count': committers_count, - 'issue_contributors_count': None, - 'changelog_file': None, - 'contributing_file': None, - 'license_file': j['licenseInfo']['url'] if j['licenseInfo'] else None, - 'code_of_conduct_file': j['codeOfConduct']['url'] if j['codeOfConduct'] else None, - 'security_issue_file': None, - 'security_audit_file': None, - 'status': None, - 'keywords': None, - 'commit_count': j['ref']['target']['history']['totalCount'] if j['ref'] else None, - 'issues_count': j['issue_count']['totalCount'] if j['issue_count'] else None, - 'issues_closed': j['issues_closed']['totalCount'] if j['issues_closed'] else None, - 'pull_request_count': j['pr_count']['totalCount'] if j['pr_count'] else None, - 'pull_requests_open': j['pr_open']['totalCount'] if j['pr_open'] else None, - 'pull_requests_closed': j['pr_closed']['totalCount'] if j['pr_closed'] else None, - 'pull_requests_merged': j['pr_merged']['totalCount'] if j['pr_merged'] else None, - 'tool_source': self.tool_source, - 'tool_version': self.tool_version, - 'data_source': self.data_source, - 'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') - } - - result = self.db.execute(self.repo_info_table.insert().values(rep_inf)) - logging.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n") - self.results_counter += 1 - - logging.info(f"Inserted info for {owner}/{repo}\n") - - #Register this task as completed - register_task_completion(self, task, repo_id, "repo_info") - - def query_committers_count(self, owner, repo): - logging.info('Querying committers count\n') - url = f'https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100' - committers = 0 - - try: - while True: - r = requests.get(url, headers=self.headers) - update_gh_rate_limit(self, r) - committers += len(r.json()) - - if 'next' not in r.links: - break - else: - url = r.links['next']['url'] - except Exception: - logging.exception('An error occured while querying contributor count\n') - - return committers - diff --git a/workers/repo_info_worker/runtime.py b/workers/repo_info_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/repo_info_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.repo_info_worker.repo_info_worker import RepoInfoWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = RepoInfoWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/repo_info_worker/setup.py b/workers/repo_info_worker/setup.py --- a/workers/repo_info_worker/setup.py +++ b/workers/repo_info_worker/setup.py @@ -13,22 +13,21 @@ def read(filename): setup( name="repo_info_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", author_email="[email protected]", description="Augur Worker that collects general data about a repo on GitHub", - packages=find_packages(exclude=('tests',)), + packages=find_packages(), install_requires=[ 'flask', 'requests', - 'psycopg2-binary', - 'click' + 'psycopg2-binary' ], entry_points={ 'console_scripts': [ - 'repo_info_worker_start=repo_info_worker.runtime:main', + 'repo_info_worker_start=workers.repo_info_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/standard_methods.py b/workers/standard_methods.py deleted file mode 100644 --- a/workers/standard_methods.py +++ /dev/null @@ -1,712 +0,0 @@ -""" Helper methods constant across all workers """ -import requests, datetime, time, traceback, json, os, sys, math -import sqlalchemy as s -import pandas as pd -import os -import sys, logging -from urllib.parse import urlparse - -def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}): - """ map objects => { *our db col* : *gh json key*} """ - need_insertion_count = 0 - need_update_count = 0 - for i, obj in enumerate(new_data): - if type(obj) != dict: - logging.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) - continue - - obj['flag'] = 'none' # default of no action needed - existing_tuple = None - for db_dupe_key in list(duplicate_col_map.keys()): - - if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any(): - if table_values[table_values[db_dupe_key].isin( - [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records'): - existing_tuple = table_values[table_values[db_dupe_key].isin( - [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0] - continue - - logging.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) - obj['flag'] = 'need_insertion' - need_insertion_count += 1 - break - - if obj['flag'] == 'need_insertion': - logging.info('Already determined that current tuple needs insertion, skipping checking updates. ' - 'Moving to next tuple.\n') - continue - - # If we need to check the values of the existing tuple to determine if an update is needed - for augur_col, value_check in value_update_col_map.items(): - not_nan_check = not (pd.isna(value_check) and pd.isna(existing_tuple[augur_col])) if value_check is not None else True - if existing_tuple[augur_col] != value_check and not_nan_check: - continue - logging.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) - obj['flag'] = 'need_update' - obj['pkey'] = existing_tuple[table_pkey] - need_update_count += 1 - - if obj['flag'] == 'need_update': - logging.info('Already determined that current tuple needs update, skipping checking further updates. ' - 'Moving to next tuple.\n') - continue - - # Now check the existing tuple's values against the response values to determine if an update is needed - for col in update_col_map.keys(): - if update_col_map[col] not in obj: - continue - if obj[update_col_map[col]] == existing_tuple[col]: - continue - logging.info("Found a tuple that needs an update for column: {}\n".format(col)) - obj['flag'] = 'need_update' - obj['pkey'] = existing_tuple[table_pkey] - need_update_count += 1 - - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + - "was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count)) - return new_data - -def check_duplicates(new_data, table_values, key): - need_insertion = [] - for obj in new_data: - if type(obj) == dict: - if not table_values.isin([obj[key]]).any().any(): - need_insertion.append(obj) - # else: - # logging.info("Tuple with github's {} key value already".format(key) + - # "exists in our db: {}\n".format(str(obj[key]))) - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + - "was reduced to {} tuples.\n".format(str(len(need_insertion)))) - return need_insertion - -def connect_to_broker(self): - connected = False - for i in range(5): - try: - logging.info("attempt {}\n".format(i)) - if i > 0: - time.sleep(10) - requests.post('http://{}:{}/api/unstable/workers'.format( - self.config['broker_host'],self.config['broker_port']), json=self.specs) - logging.info("Connection to the broker was successful\n") - connected = True - break - except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker. Trying again...\n') - if not connected: - sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n') - -def dump_queue(queue): - """ - Empties all pending items in a queue and returns them in a list. - """ - result = [] - queue.put("STOP") - for i in iter(queue.get, 'STOP'): - result.append(i) - # time.sleep(.1) - return result - -def find_id_from_login(self, login): - idSQL = s.sql.text(""" - SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' - """.format(login)) - rs = pd.read_sql(idSQL, self.db, params={}) - data_list = [list(row) for row in rs.itertuples(index=False)] - try: - return data_list[0][0] - except: - logging.info("contributor needs to be added...") - - cntrb_url = ("https://api.github.com/users/" + login) - logging.info("Hitting endpoint: {} ...\n".format(cntrb_url)) - r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) - contributor = r.json() - - company = None - location = None - email = None - if 'company' in contributor: - company = contributor['company'] - if 'location' in contributor: - location = contributor['location'] - if 'email' in contributor: - email = contributor['email'] - - cntrb = { - "cntrb_login": contributor['login'] if 'login' in contributor else None, - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, - "cntrb_canonical": None, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } - result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) - self.results_counter += 1 - self.cntrb_id_inc = int(result.inserted_primary_key[0]) - - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - return find_id_from_login(self, login) - -def get_owner_repo(github_url): - split = github_url.split('/') - - owner = split[-2] - repo = split[-1] - - if '.git' in repo: - repo = repo[:-4] - - return owner, repo - -def get_max_id(self, table, column, default=25150, operations_table=False): - maxIdSQL = s.sql.text(""" - SELECT max({0}.{1}) AS {1} - FROM {0} - """.format(table, column)) - db = self.db if not operations_table else self.helper_db - rs = pd.read_sql(maxIdSQL, db, params={}) - if rs.iloc[0][column] is not None: - max_id = int(rs.iloc[0][column]) + 1 - logging.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) - else: - max_id = default - logging.info("Could not find max id for {} column in the {} table... using default set to: \ - {}\n".format(column, table, max_id)) - return max_id - -def get_table_values(self, cols, tables, where_clause=""): - table_str = tables[0] - del tables[0] - - col_str = cols[0] - del cols[0] - - for table in tables: - table_str += ", " + table - for col in cols: - col_str += ", " + col - - tableValuesSQL = s.sql.text(""" - SELECT {} FROM {} {} - """.format(col_str, table_str, where_clause)) - logging.info("Getting table values with the following PSQL query: \n{}\n".format(tableValuesSQL)) - values = pd.read_sql(tableValuesSQL, self.db, params={}) - return values - -def init_oauths(self): - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauthSQL = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(self.config['key'])) - for oauth in [{'oauth_id': 0, 'access_token': self.config['key']}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}\n".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - -def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}): - # Paginate backwards through all the tuples but get first page in order - # to determine if there are multiple pages and if the 1st page covers all - update_keys = list(update_col_map.keys()) if update_col_map else [] - update_keys += list(value_update_col_map.keys()) if value_update_col_map else [] - cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - table_values = get_table_values(self, cols_query, [table], where_clause) - - i = 1 - multiple_pages = False - tuples = [] - while True: - num_attempts = 0 - success = False - while num_attempts < 3: - logging.info("Hitting endpoint: " + url.format(i) + " ...\n") - r = requests.get(url=url.format(i), headers=self.headers) - update_gh_rate_limit(self, r) - logging.info("Analyzing page {} of {}\n".format(i, int(r.links['last']['url'][-6:].split('=')[1]) + 1 if 'last' in r.links else '*last page not known*')) - - try: - j = r.json() - except: - j = json.loads(json.dumps(r.text)) - - if type(j) != dict and type(j) != str: - success = True - break - elif type(j) == dict: - logging.info("Request returned a dict: {}\n".format(j)) - if j['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) - break - if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': - num_attempts -= 1 - update_gh_rate_limit(self, r, temporarily_disable=True) - if j['message'] == 'Bad credentials': - update_gh_rate_limit(self, r, bad_credentials=True) - elif type(j) == str: - logging.info("J was string: {}\n".format(j)) - if '<!DOCTYPE html>' in j: - logging.info("HTML was returned, trying again...\n") - elif len(j) == 0: - logging.info("Empty string, trying again...\n") - else: - try: - j = json.loads(j) - success = True - break - except: - pass - num_attempts += 1 - if not success: - break - - # Find last page so we can decrement from there - if 'last' in r.links and not multiple_pages and not self.finishing_task: - param = r.links['last']['url'][-6:] - i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") - multiple_pages = True - elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") - elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ..." - " excess rate limit requests will be made\n") - - if len(j) == 0: - logging.info("Response was empty, breaking from pagination.\n") - break - - # Checking contents of requests with what we already have in the db - j = assign_tuple_action(self, j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map) - if not j: - logging.info("Assigning tuple action failed, moving to next page.\n") - i = i + 1 if self.finishing_task else i - 1 - continue - try: - to_add = [obj for obj in j if obj not in tuples and obj['flag'] != 'none'] - except Exception as e: - logging.info("Failure accessing data of page: {}. Moving to next page.\n".format(e)) - i = i + 1 if self.finishing_task else i - 1 - continue - if len(to_add) == 0 and multiple_pages and 'last' in r.links: - logging.info("{}".format(r.links['last'])) - if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown tuples, breaking from pagination.\n") - break - tuples += to_add - - i = i + 1 if self.finishing_task else i - 1 - - # Since we already wouldve checked the first page... break - if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") - break - - return tuples - -def query_github_contributors(self, entry_info, repo_id): - - """ Data collection function - Query the GitHub API for contributors - """ - logging.info("Querying contributors with given entry info: " + str(entry_info) + "\n") - - github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url'] - - # Extract owner/repo from the url for the endpoint - path = urlparse(github_url) - split = path[2].split('/') - - owner = split[1] - name = split[2] - - # Handles git url case by removing the extension - if ".git" in name: - name = name[:-4] - - # Set the base of the url and place to hold contributors to insert - contributors_url = ("https://api.github.com/repos/" + owner + "/" + - name + "/contributors?per_page=100&page={}") - - # Get contributors that we already have stored - # Set our duplicate and update column map keys (something other than PK) to - # check dupicates/needed column updates with - table = 'contributors' - table_pkey = 'cntrb_id' - update_col_map = {'cntrb_email': 'email'} - duplicate_col_map = {'cntrb_login': 'login'} - - #list to hold contributors needing insertion or update - contributors = paginate(self, contributors_url, duplicate_col_map, update_col_map, table, table_pkey) - - logging.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") - - for repo_contributor in contributors: - try: - # Need to hit this single contributor endpoint to get extra data including... - # `created at` - # i think that's it - cntrb_url = ("https://api.github.com/users/" + repo_contributor['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") - r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) - contributor = r.json() - - company = None - location = None - email = None - if 'company' in contributor: - company = contributor['company'] - if 'location' in contributor: - location = contributor['location'] - if 'email' in contributor: - email = contributor['email'] - canonical_email = contributor['email'] - - cntrb = { - "cntrb_login": contributor['login'], - "cntrb_created_at": contributor['created_at'], - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - # "cntrb_type": , dont have a use for this as of now ... let it default to null - "cntrb_canonical": canonical_email, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } - - # Commit insertion to table - if repo_contributor['flag'] == 'need_update': - result = self.db.execute(self.contributors_table.update().where( - self.history_table.c.cntrb_email==email).values(cntrb)) - logging.info("Updated tuple in the contributors table with existing email: {}".format(email)) - self.cntrb_id_inc = repo_contributor['pkey'] - elif repo_contributor['flag'] == 'need_insertion': - result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) - self.results_counter += 1 - - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - # Increment our global track of the cntrb id for the possibility of it being used as a FK - self.cntrb_id_inc = int(result.inserted_primary_key[0]) - - except Exception as e: - logging.info("Caught exception: {}".format(e)) - logging.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) - continue - -def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): - """ - Read a variable in specified section of the config file, unless provided an environment variable - - :param section: location of given variable - :param name: name of variable - """ - config_file_path = os.getenv("AUGUR_CONFIG_FILE", config_file_path) - _config_file_name = 'augur.config.json' - _config_bad = False - _already_exported = {} - _runtime_location = 'runtime/' - _default_config = {} - _config_file = None - - try: - _config_file = open(config_file_path, 'r+') - except: - print('Couldn\'t open {}'.format(_config_file_name)) - - # Load the config file - try: - config_text = _config_file.read() - _config = json.loads(config_text) - except json.decoder.JSONDecodeError as e: - if not _config_bad: - _using_config_file = False - print('{} could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: {}'.format(config_file_path, str(e))) - _config = _default_config - - value = None - if environment_variable is not None: - value = os.getenv(environment_variable) - if value is None: - try: - if name is not None: - value = _config[section][name] - else: - value = _config[section] - except Exception as e: - value = default - if not section in _config: - _config[section] = {} - - return value - - -def record_model_process(self, repo_id, model): - - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Stopped", - "total_results": self.results_counter - } - if self.finishing_task: - result = self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - self.history_id += 1 - else: - result = self.helper_db.execute(self.history_table.insert().values(task_history)) - logging.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) - self.history_id = int(result.inserted_primary_key[0]) - -def register_task_completion(self, task, repo_id, model): - # Task to send back to broker - task_completed = { - 'worker_id': self.config['id'], - 'job_type': "MAINTAIN", - 'repo_id': repo_id, - 'job_model': model - } - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] if 'git_url' in task['given'] else "INVALID_GIVEN" - if key == 'INVALID_GIVEN': - register_task_failure(self, task, repo_id, "INVALID_GIVEN: not github nor git url") - return - - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Success", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job completion for: " + str(task_completed) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where( - self.job_table.c.job_model==model).values(updated_job)) - logging.info("Updated job process for model: " + model + "\n") - - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - - # Reset results counter for next task - self.results_counter = 0 - -def register_task_failure(self, task, repo_id, e): - - logging.info("Worker ran into an error for task: {}\n".format(task)) - logging.info("Printing traceback...\n") - tb = traceback.format_exc() - logging.info(tb) - - logging.info(f'This task inserted {self.results_counter} tuples before failure.\n') - logging.info("Notifying broker and logging task failure in database...\n") - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - url = task['given'][key] - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(url)) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - task['worker_id'] = self.config['id'] - try: - requests.post("http://{}:{}/api/unstable/task_error".format( - self.config['broker_host'],self.config['broker_port']), json=task) - except requests.exceptions.ConnectionError: - logging.error('Could not send task failure message to the broker\n') - except Exception: - logging.exception('An error occured while informing broker about task failure\n') - - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": task['models'][0], - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Error", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job error in the history table for: " + str(task) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==task['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + task['models'][0] + "\n") - - # Reset results counter for next task - self.results_counter = 0 - -def retrieve_tuple(self, key_values, tables): - table_str = tables[0] - del tables[0] - - key_values_items = list(key_values.items()) - for col, value in [key_values_items[0]]: - where_str = col + " = '" + value + "'" - del key_values_items[0] - - for col, value in key_values_items: - where_str += ' AND ' + col + " = '" + value + "'" - for table in tables: - table_str += ", " + table - - retrieveTupleSQL = s.sql.text(""" - SELECT * FROM {} WHERE {} - """.format(table_str, where_str)) - values = json.loads(pd.read_sql(retrieveTupleSQL, self.db, params={}).to_json(orient="records")) - return values - -def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): - # Try to get rate limit from request headers, sometimes it does not work (GH's issue) - # In that case we just decrement from last recieved header count - if bad_credentials and len(self.oauths) > 1: - logging.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) - del self.oauths[0] - - if temporarily_disable: - logging.info("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") - self.oauths[0]['rate_limit'] = 0 - else: - try: - self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - logging.info("Recieved rate limit from headers\n") - except: - self.oauths[0]['rate_limit'] -= 1 - logging.info("Headers did not work, had to decrement\n") - logging.info("Updated rate limit, you have: " + - str(self.oauths[0]['rate_limit']) + " requests remaining.\n") - if self.oauths[0]['rate_limit'] <= 0: - try: - reset_time = response.headers['X-RateLimit-Reset'] - except Exception as e: - logging.info("Could not get reset time from headers because of error: {}".format(e)) - logging.info('Headers: {}'.format(response.headers)) - reset_time = 3600 - time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() - logging.info("Rate limit exceeded, checking for other available keys to use.\n") - - # We will be finding oauth with the highest rate limit left out of our list of oauths - new_oauth = self.oauths[0] - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] - for oauth in other_oauths: - logging.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - response = requests.get(url=url, headers=self.headers) - oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() - - # Update oauth to switch to if a higher limit is found - if oauth['rate_limit'] > new_oauth['rate_limit']: - logging.info("Higher rate limit found in oauth: {}\n".format(oauth)) - new_oauth = oauth - elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: - logging.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) - new_oauth = oauth - - if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: - logging.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) - time.sleep(new_oauth['seconds_to_reset']) - - # Make new oauth the 0th element in self.oauths so we know which one is in use - index = self.oauths.index(new_oauth) - self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] - logging.info("Using oauth: {}\n".format(self.oauths[0])) - - # Change headers to be using the new oauth's key - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} diff --git a/workers/template_worker/__init__.py b/workers/template_worker/__init__.py new file mode 100644 diff --git a/workers/template_worker/runtime.py b/workers/template_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/template_worker/runtime.py @@ -0,0 +1,23 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.template_worker.template_worker import TemplateWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ + Creates the Flask app and data collection worker, then starts the Gunicorn server + """ + app = Flask(__name__) + app.worker = TemplateWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/template_worker/setup.py b/workers/template_worker/setup.py new file mode 100644 --- /dev/null +++ b/workers/template_worker/setup.py @@ -0,0 +1,40 @@ +import io +import os +import re + +from setuptools import find_packages +from setuptools import setup + +def read(filename): + filename = os.path.join(os.path.dirname(__file__), filename) + text_type = type(u"") + with io.open(filename, mode="r", encoding='utf-8') as fd: + return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) + +setup( + name="template_worker", + version="0.0.0", + url="https://github.com/chaoss/augur", + license='MIT', + author="Augur Team", + author_email="[email protected]", + description="Template worker to be used as an example", + packages=find_packages(), + install_requires=[ + 'flask', + 'requests', + 'psycopg2-binary' + ], + entry_points={ + 'console_scripts': [ + 'template_worker_start=workers.template_worker.runtime:main', + ], + }, + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + ] +) diff --git a/workers/template_worker/template_worker.py b/workers/template_worker/template_worker.py new file mode 100644 --- /dev/null +++ b/workers/template_worker/template_worker.py @@ -0,0 +1,82 @@ +import logging, os, sys, time, requests, json +from datetime import datetime +from multiprocessing import Process, Queue +import pandas as pd +import sqlalchemy as s +from workers.worker_base import Worker + +class TemplateWorker(Worker): + def __init__(self, config={}): + + # Define the worker's type, which will be used for self identification. + # Should be unique among all workers and is the same key used to define + # this worker's settings in the configuration file. + worker_type = "template_worker" + + # Define what this worker can be given and know how to interpret + # given is usually either [['github_url']] or [['git_url']] (depending if your + # worker is exclusive to repos that are on the GitHub platform) + given = [[]] + + # The name the housekeeper/broker use to distinguish the data model this worker can fill + # You will also need to name the method that does the collection for this model + # in the format *model name*_model() such as fake_data_model() for example + models = ['fake_data'] + + # Define the tables needed to insert, update, or delete on + # The Worker class will set each table you define here as an attribute + # so you can reference all of them like self.message_table or self.repo_table + data_tables = ['message', 'repo'] + # For most workers you will only need the worker_history and worker_job tables + # from the operations schema, these tables are to log worker task histories + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Do any additional configuration after the general initialization has been run + self.config.update(config) + + # If you need to do some preliminary interactions with the database, these MUST go + # in the model method. The database connection is instantiated only inside of each + # data collection process + + # Define data collection info + self.tool_source = 'Fake Template Worker' + self.tool_version = '0.0.0' + self.data_source = 'Non-existent API' + + def fake_data_model(self, task, repo_id): + """ This is just an example of a data collection method. All data collection + methods for all workers currently accept this format of parameters. If you + want to change these parameters, you can re-define the collect() method to + overwrite the Worker class' version of it (which is the method that calls + this method). + + :param task: the task generated by the housekeeper and sent to the broker which + was then sent to this worker. Takes the example dict format of: + { + 'job_type': 'MAINTAIN', + 'models': ['fake_data'], + 'display_name': 'fake_data model for url: https://github.com/vmware/vivace', + 'given': { + 'git_url': 'https://github.com/vmware/vivace' + } + } + :param repo_id: the collect() method queries the repo_id given the git/github url + and passes it along to make things easier. An int such as: 27869 + + """ + + # Any initial database instructions, like finding the last tuple inserted or generate the next ID value + + # Collection and insertion of data happens here + + # ... + + # Register this task as completed. + # This is a method of the worker class that is required to be called upon completion + # of any data collection model, this lets the broker know that this worker is ready + # for another task + self.register_task_completion(task, repo_id, 'fake_data') + diff --git a/workers/util.py b/workers/util.py new file mode 100644 --- /dev/null +++ b/workers/util.py @@ -0,0 +1,111 @@ +import os, json, requests, logging +from flask import Flask, Response, jsonify, request +import gunicorn.app.base + +def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): + """ + Read a variable in specified section of the config file, unless provided an environment variable + + :param section: location of given variable + :param name: name of variable + """ + config_file_path = os.getenv("AUGUR_CONFIG_FILE", config_file_path) + _config_file_name = 'augur.config.json' + _config_bad = False + _already_exported = {} + _runtime_location = 'runtime/' + _default_config = {} + _config_file = None + + try: + _config_file = open(config_file_path, 'r+') + except: + print('Couldn\'t open {}'.format(_config_file_name)) + + # Load the config file + try: + config_text = _config_file.read() + _config = json.loads(config_text) + except json.decoder.JSONDecodeError as e: + if not _config_bad: + _using_config_file = False + print('{} could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: {}'.format(config_file_path, str(e))) + _config = _default_config + + value = None + if environment_variable is not None: + value = os.getenv(environment_variable) + if value is None: + try: + if name is not None: + value = _config[section][name] + else: + value = _config[section] + except Exception as e: + value = default + if not section in _config: + _config[section] = {} + + return value + +def create_server(app, worker=None): + """ Consists of AUGWOP endpoints for the broker to communicate to this worker + Can post a new task to be added to the workers queue + Can retrieve current status of the worker + Can retrieve the workers config object + """ + + @app.route("/AUGWOP/task", methods=['POST', 'GET']) + def augwop_task(): + """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker + """ + if request.method == 'POST': #will post a task to be added to the queue + logging.info("Sending to work on task: {}".format(str(request.json))) + app.worker.task = request.json + return Response(response=request.json, + status=200, + mimetype="application/json") + if request.method == 'GET': #will retrieve the current tasks/status of the worker + return jsonify({ + "status": "not implemented" + }) + return Response(response=request.json, + status=200, + mimetype="application/json") + + @app.route("/AUGWOP/heartbeat", methods=['GET']) + def heartbeat(): + if request.method == 'GET': + return jsonify({ + "status": "alive" + }) + + @app.route("/AUGWOP/config") + def augwop_config(): + """ Retrieve worker's config + """ + return app.worker.config + +class WorkerGunicornApplication(gunicorn.app.base.BaseApplication): + + def __init__(self, app): + self.options = { + 'bind': '%s:%s' % (app.worker.config["host"], app.worker.config["port"]), + 'workers': 1, + 'errorlog': app.worker.config['server_logfile'], + 'accesslog': app.worker.config['server_logfile'], + 'loglevel': app.worker.config['log_level'], + 'capture_output': app.worker.config['capture_output'] + } + + self.application = app + super().__init__() + + def load_config(self): + config = {key: value for key, value in self.options.items() + if key in self.cfg.settings and value is not None} + for key, value in config.items(): + self.cfg.set(key.lower(), value) + + def load(self): + return self.application diff --git a/workers/value_worker/__init__.py b/workers/value_worker/__init__.py new file mode 100644 diff --git a/workers/value_worker/runtime.py b/workers/value_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/value_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.value_worker.value_worker import ValueWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ValueWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) \ No newline at end of file diff --git a/workers/value_worker/setup.py b/workers/value_worker/setup.py --- a/workers/value_worker/setup.py +++ b/workers/value_worker/setup.py @@ -5,33 +5,23 @@ from setuptools import find_packages from setuptools import setup - -def read(filename): - filename = os.path.join(os.path.dirname(__file__), filename) - text_type = type(u"") - with io.open(filename, mode="r", encoding='utf-8') as fd: - return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) - - setup( name="value_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', - author="Augurlabs", author_email="[email protected]", - description="Augur Worker that gathers value data", - long_description=read("README.md"), - packages=find_packages(exclude=('tests',)), - - install_requires=['flask', 'requests', 'psycopg2-binary', 'click'], - + install_requires=[ + 'flask', + 'requests', + 'psycopg2-binary', + ], entry_points={ 'console_scripts': [ - 'value_worker_start=value_worker.runtime:main', + 'value_worker_start=workers.value_worker.runtime:main', ], }, diff --git a/workers/value_worker/value_worker.py b/workers/value_worker/value_worker.py new file mode 100644 --- /dev/null +++ b/workers/value_worker/value_worker.py @@ -0,0 +1,94 @@ +import os, subprocess +from datetime import datetime +import logging +import requests +import json +from urllib.parse import quote +from multiprocessing import Process, Queue + +import pandas as pd +import sqlalchemy as s +from sqlalchemy.ext.automap import automap_base +from sqlalchemy import MetaData +from workers.worker_base import Worker + +class ValueWorker(Worker): + def __init__(self, config={}): + + worker_type = "value_worker" + + # Define what this worker can be given and know how to interpret + given = [['git_url']] + models = ['value'] + + # Define the tables needed to insert, update, or delete on + data_tables = ['repo_labor'] + operations_tables = ['worker_history', 'worker_job'] + + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({ + 'repo_directory': self.augur_config.get_value('Workers', 'facade_worker')['repo_directory'] + }) + + self.tool_source = 'Value Worker' + self.tool_version = '1.0.0' + self.data_source = 'SCC' + + def value_model(self, entry_info, repo_id): + """ Data collection and storage method + """ + self.logger.info(entry_info) + self.logger.info(repo_id) + + repo_path_sql = s.sql.text(""" + SELECT repo_id, CONCAT(repo_group_id || chr(47) || repo_path || repo_name) AS path + FROM repo + WHERE repo_id = :repo_id + """) + + relative_repo_path = self.db.execute(repo_path_sql, {'repo_id': repo_id}).fetchone()[1] + absolute_repo_path = self.config['repo_directory'] + relative_repo_path + + try: + self.generate_value_data(repo_id, absolute_repo_path) + except Exception as e: + self.logger.error(e) + + self.register_task_completion(entry_info, repo_id, "value") + + def generate_value_data(self, repo_id, path): + """Runs scc on repo and stores data in database + + :param repo_id: Repository ID + :param path: Absolute path of the Repostiory + """ + self.logger.info('Running `scc`....') + self.logger.info(f'Repo ID: {repo_id}, Path: {path}') + + output = subprocess.check_output([self.config['scc_bin'], '-f', 'json', path]) + records = json.loads(output.decode('utf8')) + + for record in records: + for file in record['Files']: + repo_labor = { + 'repo_id': repo_id, + 'rl_analysis_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'), + 'programming_language': file['Language'], + 'file_path': file['Location'], + 'file_name': file['Filename'], + 'total_lines': file['Lines'], + 'code_lines': file['Code'], + 'comment_lines': file['Comment'], + 'blank_lines': file['Blank'], + 'code_complexity': file['Complexity'], + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source, + 'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') + } + + result = self.db.execute(self.repo_labor_table.insert().values(repo_labor)) + self.logger.info(f"Added Repo Labor Data: {result.inserted_primary_key}") diff --git a/workers/value_worker/value_worker/__init__.py b/workers/value_worker/value_worker/__init__.py deleted file mode 100644 --- a/workers/value_worker/value_worker/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""value_worker - Augur Worker that collects value data""" - -__tool_source__ = 'Value Worker' -__tool_version__ = '0.1.0' -__data_source__ = 'SCC' - -__author__ = 'Augur Team <[email protected]>' -__all__ = [] - diff --git a/workers/value_worker/value_worker/runtime.py b/workers/value_worker/value_worker/runtime.py deleted file mode 100644 --- a/workers/value_worker/value_worker/runtime.py +++ /dev/null @@ -1,122 +0,0 @@ -import json -import logging -import os -import subprocess -import sys - -import click -import requests -from flask import Flask, Response, jsonify, request - -from value_worker.worker import ValueWorker - -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - # POST a task to be added to the queue - if request.method == 'POST': - logging.info("Sending to work on task: {}".format(str(request.json))) - app.value_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.value_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51239, help='Port') [email protected]('--scc-bin', default=f'{os.environ["HOME"]}/go/bin/scc', help='scc binary') -def main(augur_url, host, port, scc_bin): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'value_worker', None, - { - "port": 37300, - "scc_bin": "/home/sean/go/bin/scc" - }) - - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.value_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - 'scc_bin': worker_info['scc_bin'], - 'repo_directory': read_config('Workers', 'facade_worker', None, None)['repo_directory'], - } - - # Create the worker that will be running on this server with specified config - app.value_worker = ValueWorker(config) - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - - app.run(debug=app.debug, host=host, port=worker_port) - if app.value_worker._child is not None: - app.value_worker._child.terminate() - try: - requests.post(f'http://{server["host"]}:{server["port"]}/api/unstable/workers/remove', json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/value_worker/value_worker/worker.py b/workers/value_worker/value_worker/worker.py deleted file mode 100644 --- a/workers/value_worker/value_worker/worker.py +++ /dev/null @@ -1,267 +0,0 @@ -import os, subprocess -from datetime import datetime -import logging -import requests -import json -from urllib.parse import quote -from multiprocessing import Process, Queue - -from value_worker import __data_source__, __tool_source__, __tool_version__ -import pandas as pd -import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData -from workers.standard_methods import register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class ValueWorker: - def __init__(self, config, task=None): - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.config = config - - self.db = None - self.value_table = None - - self._task = task - self._queue = Queue() - self._child = None - - self.history_id = None - self.finishing_task = False - self.working_on = None - self.results_counter = 0 - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["value"] - } - ], - "config": [self.config] - } - - self._db_str = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], - self.config['password'], - self.config['host'], - self.config['port'], - self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self._db_str, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self._db_str, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - logging.info("Database connection established...") - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['repo_labor']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.repo_labor_table = Base.classes.repo_labor.__table__ - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - logging.info("ORM setup complete...") - - # Organize different api keys/oauths available - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauth_sql = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(0)) - - for oauth in [{'oauth_id': 0, 'access_token': 0}] + json.loads(pd.read_sql(oauth_sql, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) \ - - datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - - # Send broker hello message - connect_to_broker(self) - logging.info("Connected to the broker...\n") - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "key": "", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['github_api_key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - - self._task = value - self.run() - - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def value_model(self, entry_info, repo_id): - """ Data collection and storage method - """ - logging.info(entry_info) - logging.info(repo_id) - - repo_path_sql = s.sql.text(""" - SELECT repo_id, CONCAT(repo_group_id || chr(47) || repo_path || repo_name) AS path - FROM repo - WHERE repo_id = :repo_id - """) - - relative_repo_path = self.db.execute(repo_path_sql, {'repo_id': repo_id}).fetchone()[1] - absolute_repo_path = self.config['repo_directory'] + relative_repo_path - - try: - self.generate_value_data(repo_id, absolute_repo_path) - except Exception as e: - logging.error(e) - - register_task_completion(self, entry_info, repo_id, "value") - - def generate_value_data(self, repo_id, path): - """Runs scc on repo and stores data in database - - :param repo_id: Repository ID - :param path: Absolute path of the Repostiory - """ - logging.info('Running `scc`....') - logging.info(f'Repo ID: {repo_id}, Path: {path}') - - output = subprocess.check_output([self.config['scc_bin'], '-f', 'json', path]) - records = json.loads(output.decode('utf8')) - - for record in records: - for file in record['Files']: - repo_labor = { - 'repo_id': repo_id, - 'rl_analysis_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'), - 'programming_language': file['Language'], - 'file_path': file['Location'], - 'file_name': file['Filename'], - 'total_lines': file['Lines'], - 'code_lines': file['Code'], - 'comment_lines': file['Comment'], - 'blank_lines': file['Blank'], - 'code_complexity': file['Complexity'], - 'tool_source': __tool_source__, - 'tool_version': __tool_version__, - 'data_source': __data_source__, - 'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') - } - - result = self.db.execute(self.repo_labor_table.insert().values(repo_labor)) - logging.info(f"Added Repo Labor Data: {result.inserted_primary_key}") - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'value': - self.value_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() diff --git a/workers/worker_base.py b/workers/worker_base.py new file mode 100644 --- /dev/null +++ b/workers/worker_base.py @@ -0,0 +1,1347 @@ +""" Helper methods constant across all workers """ +import requests, datetime, time, traceback, json, os, sys, math, logging +from logging import FileHandler, Formatter, StreamHandler +from multiprocessing import Process, Queue +import sqlalchemy as s +import pandas as pd +from pathlib import Path +from urllib.parse import urlparse, quote +from sqlalchemy import MetaData +from sqlalchemy.ext.automap import automap_base +from augur.config import AugurConfig +from augur.logging import AugurLogging + +class Worker(): + + ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + + def __init__(self, worker_type, config={}, given=[], models=[], data_tables=[], operations_tables=[], platform="github"): + + self.worker_type = worker_type + self._task = None # task currently being worked on (dict) + self._child = None # process of currently running task (multiprocessing process) + self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes) + self.data_tables = data_tables + self.operations_tables = operations_tables + self._root_augur_dir = Worker.ROOT_AUGUR_DIR + self.platform = platform + + # count of tuples inserted in the database (to store stats for each task in op tables) + self.results_counter = 0 + + # if we are finishing a previous task, certain operations work differently + self.finishing_task = False + # Update config with options that are general and not specific to any worker + self.augur_config = AugurConfig(self._root_augur_dir) + + self.config = { + 'worker_type': self.worker_type, + 'host': self.augur_config.get_value("Server", "host"), + 'gh_api_key': self.augur_config.get_value('Database', 'key'), + 'offline_mode': False + } + self.config.update(self.augur_config.get_section("Logging")) + + try: + worker_defaults = self.augur_config.get_default_config()['Workers'][self.config['worker_type']] + self.config.update(worker_defaults) + except KeyError as e: + logging.warn('Could not get default configuration for {}'.format(self.config['worker_type'])) + + worker_info = self.augur_config.get_value('Workers', self.config['worker_type']) + self.config.update(worker_info) + + worker_port = self.config['port'] + while True: + try: + r = requests.get('http://{}:{}/AUGWOP/heartbeat'.format( + self.config['host'], worker_port)).json() + if 'status' in r: + if r['status'] == 'alive': + worker_port += 1 + except: + break + + self.config.update({ + "port": worker_port, + "id": "workers.{}.{}".format(self.worker_type, worker_port), + "capture_output": False, + 'location': 'http://{}:{}'.format(self.config["host"], worker_port), + 'port_broker': self.augur_config.get_value('Server', 'port'), + 'host_broker': self.augur_config.get_value('Server', 'host'), + 'host_database': self.augur_config.get_value('Database', 'host'), + 'port_database': self.augur_config.get_value('Database', 'port'), + 'user_database': self.augur_config.get_value('Database', 'user'), + 'name_database': self.augur_config.get_value('Database', 'name'), + 'password_database': self.augur_config.get_value('Database', 'password') + }) + self.config.update(config) + + # Initialize logging in the main process + self.initialize_logging() + + # Clear log contents from previous runs + open(self.config["server_logfile"], "w").close() + open(self.config["collection_logfile"], "w").close() + + # Get configured collection logger + self.logger = logging.getLogger(self.config["id"]) + self.logger.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) + + self.given = given + self.models = models + self.specs = { + 'id': self.config['id'], # what the broker knows this worker as + 'location': self.config['location'], # host + port worker is running on (so broker can send tasks here) + 'qualifications': [ + { + 'given': self.given, # type of repo this worker can be given as a task + 'models': self.models # models this worker can fill for a repo as a task + } + ], + 'config': self.config + } + + # Send broker hello message + if self.config["offline_mode"] is False: + self.connect_to_broker() + + try: + self.tool_source + self.tool_version + self.data_source + except: + self.tool_source = 'Augur Worker Testing' + self.tool_version = '0.0.0' + self.data_source = 'Augur Worker Testing' + + def __repr__(self): + return f"{self.config['id']}" + + def initialize_logging(self): + self.config["log_level"] = self.config["log_level"].upper() + if self.config["debug"]: + self.config["log_level"] = "DEBUG" + + if self.config["verbose"]: + format_string = AugurLogging.verbose_format_string + else: + format_string = AugurLogging.simple_format_string + + formatter = Formatter(fmt=format_string) + error_formatter = Formatter(fmt=AugurLogging.error_format_string) + + worker_dir = AugurLogging.get_log_directories(self.augur_config, reset_logfiles=False) + "/workers/" + Path(worker_dir).mkdir(exist_ok=True) + logfile_dir = worker_dir + f"/{self.worker_type}/" + Path(logfile_dir).mkdir(exist_ok=True) + + server_logfile = logfile_dir + '{}_{}_server.log'.format(self.worker_type, self.config["port"]) + collection_logfile = logfile_dir + '{}_{}_collection.log'.format(self.worker_type, self.config["port"]) + collection_errorfile = logfile_dir + '{}_{}_collection.err'.format(self.worker_type, self.config["port"]) + self.config.update({ + "logfile_dir": logfile_dir, + "server_logfile": server_logfile, + "collection_logfile": collection_logfile, + "collection_errorfile": collection_errorfile + }) + + collection_file_handler = FileHandler(filename=self.config["collection_logfile"], mode="a") + collection_file_handler.setFormatter(formatter) + collection_file_handler.setLevel(self.config["log_level"]) + + collection_errorfile_handler = FileHandler(filename=self.config["collection_errorfile"], mode="a") + collection_errorfile_handler.setFormatter(error_formatter) + collection_errorfile_handler.setLevel(logging.WARNING) + + logger = logging.getLogger(self.config["id"]) + logger.handlers = [] + logger.addHandler(collection_file_handler) + logger.addHandler(collection_errorfile_handler) + logger.setLevel(self.config["log_level"]) + logger.propagate = False + + if self.config["debug"]: + self.config["log_level"] = "DEBUG" + console_handler = StreamHandler() + console_handler.setFormatter(formatter) + console_handler.setLevel(self.config["log_level"]) + logger.addHandler(console_handler) + + if self.config["quiet"]: + logger.disabled = True + + self.logger = logger + + def initialize_database_connections(self): + DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( + self.config['user_database'], self.config['password_database'], self.config['host_database'], self.config['port_database'], self.config['name_database'] + ) + + # Create an sqlalchemy engine for both database schemas + self.logger.info("Making database connections") + + db_schema = 'augur_data' + self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, + connect_args={'options': '-csearch_path={}'.format(db_schema)}) + + helper_schema = 'augur_operations' + self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, + connect_args={'options': '-csearch_path={}'.format(helper_schema)}) + + metadata = MetaData() + helper_metadata = MetaData() + + # Reflect only the tables we will use for each schema's metadata object + metadata.reflect(self.db, only=self.data_tables) + helper_metadata.reflect(self.helper_db, only=self.operations_tables) + + Base = automap_base(metadata=metadata) + HelperBase = automap_base(metadata=helper_metadata) + + Base.prepare() + HelperBase.prepare() + + # So we can access all our tables when inserting, updating, etc + for table in self.data_tables: + setattr(self, '{}_table'.format(table), Base.classes[table].__table__) + + try: + self.logger.info(HelperBase.classes.keys()) + except: + pass + for table in self.operations_tables: + try: + setattr(self, '{}_table'.format(table), HelperBase.classes[table].__table__) + except Exception as e: + self.logger.error("Error setting attribute for table: {} : {}".format(table, e)) + + # Increment so we are ready to insert the 'next one' of each of these most recent ids + self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 + + # Organize different api keys/oauths available + if 'gh_api_key' in self.config or 'gitlab_api_key' in self.config: + self.init_oauths(self.platform) + else: + self.oauths = [{'oauth_id': 0}] + + @property + def task(self): + """ Property that is returned when the worker's current task is referenced + """ + return self._task + + @task.setter + def task(self, value): + """ entry point for the broker to add a task to the queue + Adds this task to the queue, and calls method to process queue + """ + # If the task has one of our "valid" job types + if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": + self._queue.put(value) + + # Setting that causes paginating through ALL pages, not just unknown ones + # This setting is set by the housekeeper and is attached to the task before it gets sent here + if 'focused_task' in value: + if value['focused_task'] == 1: + self.logger.debug("Focused task is ON\n") + self.finishing_task = True + + self._task = value + self.run() + + def cancel(self): + """ Delete/cancel current task + """ + self._task = None + + def run(self): + """ Kicks off the processing of the queue if it is not already being processed + Gets run whenever a new task is added + """ + # Spawn a subprocess to handle message reading and performing the tasks + self._child = Process(target=self.collect, args=()) + self._child.start() + + def collect(self): + """ Function to process each entry in the worker's task queue + Determines what action to take based off the message type + """ + self.initialize_logging() # need to initialize logging again in child process cause multiprocessing + self.logger.info("Starting data collection process\n") + self.initialize_database_connections() + while True: + if not self._queue.empty(): + message = self._queue.get() # Get the task off our MP queue + else: + break + self.logger.info("Popped off message: {}\n".format(str(message))) + + if message['job_type'] == 'STOP': + break + + # If task is not a valid job type + if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': + raise ValueError('{} is not a recognized task type'.format(message['job_type'])) + pass + + # Query repo_id corresponding to repo url of given task + repoUrlSQL = s.sql.text(""" + SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' + """.format(message['given'][self.given[0][0]])) + repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) + self.logger.info("repo_id for which data collection is being initiated: {}".format(str(repo_id))) + # Call method corresponding to model sent in task + try: + model_method = getattr(self, '{}_model'.format(message['models'][0])) + self.record_model_process(repo_id, 'repo_info') + except Exception as e: + self.logger.error('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) + + 'must have name of {}_model'.format(message['models'][0])) + self.register_task_failure(message, repo_id, e) + break + + # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught + # and worker can move onto the next task without stopping + try: + self.logger.info("Calling model method {}_models".format(message['models'][0])) + model_method(message, repo_id) + except Exception as e: # this could be a custom exception, might make things easier + self.register_task_failure(message, repo_id, e) + break + + self.logger.debug('Closing database connections\n') + self.db.dispose() + self.helper_db.dispose() + self.logger.info("Collection process finished") + + def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}): + """ Include an extra key-value pair on each element of new_data that represents + the action that should be taken with this element (i.e. 'need_insertion') + + :param new_data: List of dictionaries, data to be assigned an action to + :param table_values: Pandas DataFrame, existing data in the database to check + what action should be taken on the new_data depending on the presence of + each element in this DataFrame + :param update_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + updates (if source data value != value in existing database row, then an + update is needed). Key is source data column name, value is database field name. + Example: {'id': 'gh_issue_id'} + :param duplicate_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + duplicates (if source data value == value in existing database row, then this + element is a duplicate and would not need an insertion). Key is source data + column name, value is database field name. Example: {'id': 'gh_issue_id'} + :param table_pkey: String, the field name of the primary key of the table in + the database that we are checking the table_values for. + :param value_update_col_map: Dictionary, sometimes we add a new field to a table, + and we want to trigger an update of that row in the database even if all of the + data values are the same and would not need an update ordinarily. Checking for + a specific existing value in the database field allows us to do this. The key is the + name of the field in the database we are checking for a specific value to trigger + an update, the value is the value we are checking for equality to trigger an update. + Example: {'cntrb_id': None} + :return: List of dictionaries, contains all the same elements of new_data, except + each element now has an extra key-value pair with the key being 'flag', and + the value being 'need_insertion', 'need_update', or 'none' + """ + need_insertion_count = 0 + need_update_count = 0 + for i, obj in enumerate(new_data): + if type(obj) != dict: + self.logger.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) + continue + + obj['flag'] = 'none' # default of no action needed + existing_tuple = None + for db_dupe_key in list(duplicate_col_map.keys()): + + if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any(): + if table_values[table_values[db_dupe_key].isin( + [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records'): + + existing_tuple = table_values[table_values[db_dupe_key].isin( + [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0] + continue + + self.logger.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) + obj['flag'] = 'need_insertion' + need_insertion_count += 1 + break + + if obj['flag'] == 'need_insertion': + self.logger.info('Already determined that current tuple needs insertion, skipping checking updates. ' + 'Moving to next tuple.\n') + continue + + if not existing_tuple: + self.logger.info('An existing tuple was not found for this data ' + + 'point and we have reached the check-updates portion of assigning ' + + 'tuple action, so we will now move to next data point\n') + continue + + # If we need to check the values of the existing tuple to determine if an update is needed + for augur_col, value_check in value_update_col_map.items(): + not_nan_check = not (math.isnan(value_check) and math.isnan(existing_tuple[augur_col])) if value_check is not None else True + if existing_tuple[augur_col] != value_check and not_nan_check: + continue + self.logger.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) + obj['flag'] = 'need_update' + obj['pkey'] = existing_tuple[table_pkey] + need_update_count += 1 + + if obj['flag'] == 'need_update': + self.logger.info('Already determined that current tuple needs update, skipping checking further updates. ' + 'Moving to next tuple.\n') + continue + + # Now check the existing tuple's values against the response values to determine if an update is needed + for col in update_col_map.keys(): + if update_col_map[col] not in obj: + continue + if obj[update_col_map[col]] == existing_tuple[col]: + continue + self.logger.info("Found a tuple that needs an update for column: {}\n".format(col)) + obj['flag'] = 'need_update' + obj['pkey'] = existing_tuple[table_pkey] + need_update_count += 1 + + self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + + "was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count)) + return new_data + + def check_duplicates(self, new_data, table_values, key): + """ Filters what items of the new_data json (list of dictionaries) that are not + present in the table_values df + + :param new_data: List of dictionaries, new data to filter duplicates out of + :param table_values: Pandas DataFrame, existing data to check what data is already + present in the database + :param key: String, key of each dict in new_data whose value we are checking + duplicates with + :return: List of dictionaries, contains elements of new_data that are not already + present in the database + """ + need_insertion = [] + for obj in new_data: + if type(obj) != dict: + continue + if not table_values.isin([obj[key]]).any().any(): + need_insertion.append(obj) + self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + + "was reduced to {} tuples.\n".format(str(len(need_insertion)))) + return need_insertion + + def connect_to_broker(self): + connected = False + for i in range(5): + try: + self.logger.debug("Connecting to broker, attempt {}\n".format(i)) + if i > 0: + time.sleep(10) + requests.post('http://{}:{}/api/unstable/workers'.format( + self.config['host_broker'],self.config['port_broker']), json=self.specs) + self.logger.info("Connection to the broker was successful\n") + connected = True + break + except requests.exceptions.ConnectionError: + self.logger.error('Cannot connect to the broker. Trying again...\n') + if not connected: + sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n') + + def dump_queue(queue): + """ + Empties all pending items in a queue and returns them in a list. + """ + result = [] + queue.put("STOP") + for i in iter(queue.get, 'STOP'): + result.append(i) + # time.sleep(.1) + return result + + def find_id_from_login(self, login, platform='github'): + """ + Retrieves our contributor table primary key value for the contributor with + the given GitHub login credentials, if this contributor is not there, then + they get inserted. + + :param login: String, the GitHub login username to find the primary key id for + :return: Integer, the id of the row in our database with the matching GitHub login + """ + idSQL = s.sql.text(""" + SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' \ + AND LOWER(data_source) = '{} api' + """.format(login, platform)) + + self.logger.info(idSQL) + + rs = pd.read_sql(idSQL, self.db, params={}) + data_list = [list(row) for row in rs.itertuples(index=False)] + try: + return data_list[0][0] + except: + self.logger.info('contributor needs to be added...') + + if platform == 'github': + cntrb_url = ("https://api.github.com/users/" + login) + elif platform == 'gitlab': + cntrb_url = ("https://gitlab.com/api/v4/users?username=" + login ) + self.logger.info("Hitting endpoint: {} ...\n".format(cntrb_url)) + r = requests.get(url=cntrb_url, headers=self.headers) + self.update_rate_limit(r) + contributor = r.json() + + + company = None + location = None + email = None + if 'company' in contributor: + company = contributor['company'] + if 'location' in contributor: + location = contributor['location'] + if 'email' in contributor: + email = contributor['email'] + + + if platform == 'github': + cntrb = { + "cntrb_login": contributor['login'] if 'login' in contributor else None, + "cntrb_email": contributor['email'] if 'email' in contributor else None, + "cntrb_company": contributor['company'] if 'company' in contributor else None, + "cntrb_location": contributor['location'] if 'location' in contributor else None, + "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, + "cntrb_canonical": None, + "gh_user_id": contributor['id'] if 'id' in contributor else None, + "gh_login": contributor['login'] if 'login' in contributor else None, + "gh_url": contributor['url'] if 'url' in contributor else None, + "gh_html_url": contributor['html_url'] if 'html_url' in contributor else None, + "gh_node_id": contributor['node_id'] if 'node_id' in contributor else None, + "gh_avatar_url": contributor['avatar_url'] if 'avatar_url' in contributor else None, + "gh_gravatar_id": contributor['gravatar_id'] if 'gravatar_id' in contributor else None, + "gh_followers_url": contributor['followers_url'] if 'followers_url' in contributor else None, + "gh_following_url": contributor['following_url'] if 'following_url' in contributor else None, + "gh_gists_url": contributor['gists_url'] if 'gists_url' in contributor else None, + "gh_starred_url": contributor['starred_url'] if 'starred_url' in contributor else None, + "gh_subscriptions_url": contributor['subscriptions_url'] if 'subscriptions_url' in contributor else None, + "gh_organizations_url": contributor['organizations_url'] if 'organizations_url' in contributor else None, + "gh_repos_url": contributor['repos_url'] if 'repos_url' in contributor else None, + "gh_events_url": contributor['events_url'] if 'events_url' in contributor else None, + "gh_received_events_url": contributor['received_events_url'] if 'received_events_url' in contributor else None, + "gh_type": contributor['type'] if 'type' in contributor else None, + "gh_site_admin": contributor['site_admin'] if 'site_admin' in contributor else None, + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + + elif platform == 'gitlab': + cntrb = { + "cntrb_login": contributor[0]['username'] if 'username' in contributor[0] else None, + "cntrb_email": email, + "cntrb_company": company, + "cntrb_location": location, + "cntrb_created_at": contributor[0]['created_at'] if 'created_at' in contributor[0] else None, + "cntrb_canonical": None, + "gh_user_id": contributor[0]['id'], + "gh_login": contributor[0]['username'], + "gh_url": contributor[0]['web_url'], + "gh_html_url": None, + "gh_node_id": None, + "gh_avatar_url": contributor[0]['avatar_url'], + "gh_gravatar_id": None, + "gh_followers_url": None, + "gh_following_url": None, + "gh_gists_url": None, + "gh_starred_url": None, + "gh_subscriptions_url": None, + "gh_organizations_url": None, + "gh_repos_url": None, + "gh_events_url": None, + "gh_received_events_url": None, + "gh_type": None, + "gh_site_admin": None, + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + result = self.db.execute(self.contributors_table.insert().values(cntrb)) + self.logger.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) + self.results_counter += 1 + self.cntrb_id_inc = int(result.inserted_primary_key[0]) + + self.logger.info("Inserted contributor: " + cntrb['cntrb_login'] + "\n") + + return self.find_id_from_login(login, platform) + + def get_owner_repo(self, git_url): + """ Gets the owner and repository names of a repository from a git url + + :param git_url: String, the git url of a repository + :return: Tuple, includes the owner and repository names in that order + """ + split = git_url.split('/') + + owner = split[-2] + repo = split[-1] + + if '.git' == repo[-4:]: + repo = repo[:-4] + + return owner, repo + + def get_max_id(self, table, column, default=25150, operations_table=False): + """ Gets the max value (usually used for id/pk's) of any Integer column + of any table + + :param table: String, the table that consists of the column you want to + query a max value for + :param column: String, the column that you want to query the max value for + :param default: Integer, if there are no values in the + specified column, the value of this parameter will be returned + :param operations_table: Boolean, if True, this signifies that the table/column + that is wanted to be queried is in the augur_operations schema rather than + the augur_data schema. Default False + :return: Integer, the max value of the specified column/table + """ + maxIdSQL = s.sql.text(""" + SELECT max({0}.{1}) AS {1} + FROM {0} + """.format(table, column)) + db = self.db if not operations_table else self.helper_db + rs = pd.read_sql(maxIdSQL, db, params={}) + if rs.iloc[0][column] is not None: + max_id = int(rs.iloc[0][column]) + 1 + self.logger.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) + else: + max_id = default + self.logger.warning('Could not find max id for {} column in the {} table... ' + + 'using default set to: {}\n'.format(column, table, max_id)) + return max_id + + def get_table_values(self, cols, tables, where_clause=""): + """ Can query all values of any column(s) from any table(s) + with an optional where clause + + :param cols: List of Strings, column(s) that user wants to query + :param tables: List of Strings, table(s) that user wants to query + :param where_clause: String, optional where clause to filter the values + queried + :return: Pandas DataFrame, contains all values queried in the columns, tables, and + optional where clause provided + """ + table_str = tables[0] + del tables[0] + + col_str = cols[0] + del cols[0] + + for table in tables: + table_str += ", " + table + for col in cols: + col_str += ", " + col + + table_values_sql = s.sql.text(""" + SELECT {} FROM {} {} + """.format(col_str, table_str, where_clause)) + self.logger.info('Getting table values with the following PSQL query: \n{}\n'.format( + table_values_sql)) + values = pd.read_sql(table_values_sql, self.db, params={}) + return values + + def init_oauths(self, platform="github"): + self.oauths = [] + self.headers = None + + # Make a list of api key in the config combined w keys stored in the database + # Select endpoint to hit solely to retrieve rate limit information from headers of the response + # Adjust header keys needed to fetch rate limit information from the API responses + if platform == "github": + url = "https://api.github.com/users/gabe-heim" + oauthSQL = s.sql.text(""" + SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'github' + """.format(self.config['gh_api_key'])) + key_name = "gh_api_key" + rate_limit_header_key = "X-RateLimit-Remaining" + rate_limit_reset_header_key = "X-RateLimit-Reset" + elif platform == "gitlab": + url = "https://gitlab.com/api/v4/version" + oauthSQL = s.sql.text(""" + SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'gitlab' + """.format(self.config['gitlab_api_key'])) + key_name = "gitlab_api_key" + rate_limit_header_key = "ratelimit-remaining" + rate_limit_reset_header_key = "ratelimit-reset" + + for oauth in [{'oauth_id': 0, 'access_token': self.config[key_name]}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): + if platform == "github": + self.headers = {'Authorization': 'token %s' % oauth['access_token']} + elif platform == "gitlab": + self.headers = {'Authorization': 'Bearer %s' % oauth['access_token']} + self.logger.info("Getting rate limit info for oauth: {}\n".format(oauth)) + response = requests.get(url=url, headers=self.headers) + self.oauths.append({ + 'oauth_id': oauth['oauth_id'], + 'access_token': oauth['access_token'], + 'rate_limit': int(response.headers[rate_limit_header_key]), + 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers[rate_limit_reset_header_key])) - datetime.datetime.now()).total_seconds() + }) + self.logger.debug("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) + + if len(self.oauths) == 0: + self.logger.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") + + # First key to be used will be the one specified in the config (first element in + # self.oauths array will always be the key in use) + if platform == "github": + self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} + elif platform == "gitlab": + self.headers = {'Authorization': 'Bearer %s' % self.oauths[0]['access_token']} + + self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} + self.logger.info("OAuth initialized") + + def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}, platform="github"): + """ Paginate either backwards or forwards (depending on the value of the worker's + finishing_task attribute) through all the GitHub or GitLab api endpoint pages. + + :param url: String, the url of the API endpoint we are paginating through, expects + a curly brace string formatter within the string to format the Integer + representing the page number that is wanted to be returned + :param duplicate_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + duplicates (if source data value == value in existing database row, then this + element is a duplicate and would not need an insertion). Key is source data + column name, value is database field name. Example: {'id': 'gh_issue_id'} + :param update_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + updates (if source data value != value in existing database row, then an + update is needed). Key is source data column name, value is database field name. + Example: {'id': 'gh_issue_id'} + :param table: String, the name of the table that holds the values to check for + duplicates/updates against + :param table_pkey: String, the field name of the primary key of the table in + the database that we are getting the values for to cross-reference to check + for duplicates. + :param where_clause: String, optional where clause to filter the values + that are queried when preparing the values that will be cross-referenced + for duplicates/updates + :param value_update_col_map: Dictionary, sometimes we add a new field to a table, + and we want to trigger an update of that row in the database even if all of the + data values are the same and would not need an update ordinarily. Checking for + a specific existing value in the database field allows us to do this. The key is the + name of the field in the database we are checking for a specific value to trigger + an update, the value is the value we are checking for equality to trigger an update. + Example: {'cntrb_id': None} + :return: List of dictionaries, all data points from the pages of the specified API endpoint + each with a 'flag' key-value pair representing the required action to take with that + data point (i.e. 'need_insertion', 'need_update', 'none') + """ + + update_keys = list(update_col_map.keys()) if update_col_map else [] + update_keys += list(value_update_col_map.keys()) if value_update_col_map else [] + cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] + table_values = self.get_table_values(cols_query, [table], where_clause) + + i = 1 + multiple_pages = False + tuples = [] + while True: + num_attempts = 0 + success = False + while num_attempts < 3: + self.logger.info(f'Hitting endpoint: {url.format(i)}...\n') + r = requests.get(url=url.format(i), headers=self.headers) + + self.update_rate_limit(r, platform=platform) + if 'last' not in r.links: + last_page = None + else: + if platform == "github": + last_page = r.links['last']['url'][-6:].split('=')[1] + elif platform == "gitlab": + last_page = r.links['last']['url'].split('&')[2].split("=")[1] + self.logger.info("Analyzing page {} of {}\n".format(i, int(last_page) + 1 if last_page is not None else '*last page not known*')) + + try: + j = r.json() + except: + j = json.loads(json.dumps(r.text)) + + if type(j) != dict and type(j) != str: + success = True + break + elif type(j) == dict: + self.logger.info("Request returned a dict: {}\n".format(j)) + if j['message'] == 'Not Found': + self.logger.warning("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + break + if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': + num_attempts -= 1 + self.logger.info("rate limit update code goes here") + self.update_rate_limit(r, temporarily_disable=True,platform=platform) + if j['message'] == 'Bad credentials': + self.logger.info("rate limit update code goes here") + self.update_rate_limit(r, bad_credentials=True, platform=platform) + elif type(j) == str: + self.logger.info(f'J was string: {j}\n') + if '<!DOCTYPE html>' in j: + self.logger.info('HTML was returned, trying again...\n') + elif len(j) == 0: + self.logger.warning('Empty string, trying again...\n') + else: + try: + j = json.loads(j) + success = True + break + except: + pass + num_attempts += 1 + if not success: + break + + # Find last page so we can decrement from there + if 'last' in r.links and not multiple_pages and not self.finishing_task: + if platform == "github": + param = r.links['last']['url'][-6:] + i = int(param.split('=')[1]) + 1 + elif platform == "gitlab": + i = int(r.links['last']['url'].split('&')[2].split("=")[1]) + 1 + self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n") + multiple_pages = True + elif not multiple_pages and not self.finishing_task: + self.logger.info("Only 1 page of request\n") + elif self.finishing_task: + self.logger.info("Finishing a previous task, paginating forwards ..." + " excess rate limit requests will be made\n") + + if len(j) == 0: + self.logger.info("Response was empty, breaking from pagination.\n") + break + + # Checking contents of requests with what we already have in the db + j = self.assign_tuple_action(j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map) + if not j: + self.logger.error("Assigning tuple action failed, moving to next page.\n") + i = i + 1 if self.finishing_task else i - 1 + continue + try: + to_add = [obj for obj in j if obj not in tuples and (obj['flag'] != 'none')] + except Exception as e: + self.logger.error("Failure accessing data of page: {}. Moving to next page.\n".format(e)) + i = i + 1 if self.finishing_task else i - 1 + continue + if len(to_add) == 0 and multiple_pages and 'last' in r.links: + self.logger.info("{}".format(r.links['last'])) + if platform == "github": + page_number = int(r.links['last']['url'][-6:].split('=')[1]) + elif platform == "gitlab": + page_number = int(r.links['last']['url'].split('&')[2].split("=")[1]) + if i - 1 != page_number: + self.logger.info("No more pages with unknown tuples, breaking from pagination.\n") + break + + tuples += to_add + + i = i + 1 if self.finishing_task else i - 1 + + # Since we already wouldve checked the first page... break + if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: + self.logger.info("No more pages to check, breaking from pagination.\n") + break + + return tuples + + def query_github_contributors(self, entry_info, repo_id): + + """ Data collection function + Query the GitHub API for contributors + """ + self.logger.info(f'Querying contributors with given entry info: {entry_info}\n') + + github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url'] + + # Extract owner/repo from the url for the endpoint + owner, name = self.get_owner_repo(github_url) + + # Set the base of the url and place to hold contributors to insert + contributors_url = (f'https://api.github.com/repos/{owner}/{name}/' + + 'contributors?per_page=100&page={}') + + # Get contributors that we already have stored + # Set our duplicate and update column map keys (something other than PK) to + # check dupicates/needed column updates with + table = 'contributors' + table_pkey = 'cntrb_id' + update_col_map = {'cntrb_email': 'email'} + duplicate_col_map = {'cntrb_login': 'login'} + + #list to hold contributors needing insertion or update + contributors = self.paginate(contributors_url, duplicate_col_map, update_col_map, table, table_pkey) + + self.logger.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") + + for repo_contributor in contributors: + try: + # Need to hit this single contributor endpoint to get extra data including... + # `created at` + # i think that's it + cntrb_url = ("https://api.github.com/users/" + repo_contributor['login']) + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") + r = requests.get(url=cntrb_url, headers=self.headers) + self.update_gh_rate_limit(r) + contributor = r.json() + + company = None + location = None + email = None + if 'company' in contributor: + company = contributor['company'] + if 'location' in contributor: + location = contributor['location'] + if 'email' in contributor: + email = contributor['email'] + canonical_email = contributor['email'] + + cntrb = { + "cntrb_login": contributor['login'], + "cntrb_created_at": contributor['created_at'], + "cntrb_email": email, + "cntrb_company": company, + "cntrb_location": location, + # "cntrb_type": , dont have a use for this as of now ... let it default to null + "cntrb_canonical": canonical_email, + "gh_user_id": contributor['id'], + "gh_login": contributor['login'], + "gh_url": contributor['url'], + "gh_html_url": contributor['html_url'], + "gh_node_id": contributor['node_id'], + "gh_avatar_url": contributor['avatar_url'], + "gh_gravatar_id": contributor['gravatar_id'], + "gh_followers_url": contributor['followers_url'], + "gh_following_url": contributor['following_url'], + "gh_gists_url": contributor['gists_url'], + "gh_starred_url": contributor['starred_url'], + "gh_subscriptions_url": contributor['subscriptions_url'], + "gh_organizations_url": contributor['organizations_url'], + "gh_repos_url": contributor['repos_url'], + "gh_events_url": contributor['events_url'], + "gh_received_events_url": contributor['received_events_url'], + "gh_type": contributor['type'], + "gh_site_admin": contributor['site_admin'], + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + + # Commit insertion to table + if repo_contributor['flag'] == 'need_update': + result = self.db.execute(self.contributors_table.update().where( + self.worker_history_table.c.cntrb_email==email).values(cntrb)) + self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email)) + self.cntrb_id_inc = repo_contributor['pkey'] + elif repo_contributor['flag'] == 'need_insertion': + result = self.db.execute(self.contributors_table.insert().values(cntrb)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.results_counter += 1 + + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") + + # Increment our global track of the cntrb id for the possibility of it being used as a FK + self.cntrb_id_inc = int(result.inserted_primary_key[0]) + + except Exception as e: + self.logger.error("Caught exception: {}".format(e)) + self.logger.error("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) + continue + + def query_gitlab_contribtutors(self, entry_info, repo_id): + + gitlab_url = entry_info['given']['gitlab_url'] if 'gitlab_url' in entry_info['given'] else entry_info['given']['git_url'] + + self.logger.info("Querying contributors with given entry info: " + str(entry_info) + "\n") + + path = urlparse(gitlab_url) + split = path[2].split('/') + + owner = split[1] + name = split[2] + + # Handles git url case by removing the extension + if ".git" in name: + name = name[:-4] + + url_encoded_format = quote(owner + '/' + name, safe='') + + table = 'contributors' + table_pkey = 'cntrb_id' + update_col_map = {'cntrb_email': 'email'} + duplicate_col_map = {'cntrb_login': 'email'} + + # list to hold contributors needing insertion or update + contributors = self.paginate("https://gitlab.com/api/v4/projects/" + url_encoded_format + "/repository/contributors?per_page=100&page={}", duplicate_col_map, update_col_map, table, table_pkey, platform='gitlab') + + for repo_contributor in contributors: + try: + cntrb_compressed_url = ("https://gitlab.com/api/v4/users?search=" + repo_contributor['email']) + self.logger.info("Hitting endpoint: " + cntrb_compressed_url + " ...\n") + r = requests.get(url=cntrb_compressed_url, headers=self.headers) + contributor_compressed = r.json() + + email = repo_contributor['email'] + if len(contributor_compressed) == 0 or "id" not in contributor_compressed[0]: + continue + + self.logger.info("Fetching for user: " + str(contributor_compressed[0]["id"])) + + cntrb_url = ("https://gitlab.com/api/v4/users/" + str(contributor_compressed[0]["id"])) + self.logger.info("Hitting end point to get complete contributor info now: " + cntrb_url + "...\n") + r = requests.get(url=cntrb_url, headers=self.headers) + contributor = r.json() + + cntrb = { + "cntrb_login": contributor.get('username', None), + "cntrb_created_at": contributor.get('created_at', None), + "cntrb_email": email, + "cntrb_company": contributor.get('organization', None), + "cntrb_location": contributor.get('location', None), + # "cntrb_type": , dont have a use for this as of now ... let it default to null + "cntrb_canonical": contributor.get('public_email', None), + "gh_user_id": contributor.get('id', None), + "gh_login": contributor.get('username', None), + "gh_url": contributor.get('web_url', None), + "gh_html_url": contributor.get('web_url', None), + "gh_node_id": None, + "gh_avatar_url": contributor.get('avatar_url', None), + "gh_gravatar_id": None, + "gh_followers_url": None, + "gh_following_url": None, + "gh_gists_url": None, + "gh_starred_url": None, + "gh_subscriptions_url": None, + "gh_organizations_url": None, + "gh_repos_url": None, + "gh_events_url": None, + "gh_received_events_url": None, + "gh_type": None, + "gh_site_admin": None, + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + + # Commit insertion to table + if repo_contributor['flag'] == 'need_update': + result = self.db.execute(self.contributors_table.update().where( + self.worker_history_table.c.cntrb_email == email).values(cntrb)) + self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email)) + self.cntrb_id_inc = repo_contributor['pkey'] + elif repo_contributor['flag'] == 'need_insertion': + result = self.db.execute(self.contributors_table.insert().values(cntrb)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.results_counter += 1 + + self.logger.info("Inserted contributor: " + contributor['username'] + "\n") + + # Increment our global track of the cntrb id for the possibility of it being used as a FK + self.cntrb_id_inc = int(result.inserted_primary_key[0]) + + except Exception as e: + self.logger.info("Caught exception: {}".format(e)) + self.logger.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) + continue + + def record_model_process(self, repo_id, model): + + task_history = { + "repo_id": repo_id, + "worker": self.config['id'], + "job_model": model, + "oauth_id": self.oauths[0]['oauth_id'], + "timestamp": datetime.datetime.now(), + "status": "Stopped", + "total_results": self.results_counter + } + if self.finishing_task: + result = self.helper_db.execute(self.worker_history_table.update().where( + self.worker_history_table.c.history_id==self.history_id).values(task_history)) + self.history_id += 1 + else: + result = self.helper_db.execute(self.worker_history_table.insert().values(task_history)) + self.logger.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) + self.history_id = int(result.inserted_primary_key[0]) + + def register_task_completion(self, task, repo_id, model): + # Task to send back to broker + task_completed = { + 'worker_id': self.config['id'], + 'job_type': "MAINTAIN", + 'repo_id': repo_id, + 'job_model': model + } + key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else \ + 'gitlab_url' if 'gitlab_url' in task['given'] else 'INVALID_GIVEN' + task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] \ + if 'git_url' in task['given'] else task['given']['gitlab_url'] if 'gitlab_url' in task['given'] else 'INVALID_GIVEN' + if key == 'INVALID_GIVEN': + self.register_task_failure(task, repo_id, "INVALID_GIVEN: Not a github/gitlab/git url.") + return + + # Add to history table + task_history = { + "repo_id": repo_id, + "worker": self.config['id'], + "job_model": model, + "oauth_id": self.oauths[0]['oauth_id'], + "timestamp": datetime.datetime.now(), + "status": "Success", + "total_results": self.results_counter + } + self.helper_db.execute(self.worker_history_table.update().where( + self.worker_history_table.c.history_id==self.history_id).values(task_history)) + + self.logger.info("Recorded job completion for: " + str(task_completed) + "\n") + + # Update job process table + updated_job = { + "since_id_str": repo_id, + "last_count": self.results_counter, + "last_run": datetime.datetime.now(), + "analysis_state": 0 + } + self.helper_db.execute(self.worker_job_table.update().where( + self.worker_job_table.c.job_model==model).values(updated_job)) + self.logger.info("Updated job process for model: " + model + "\n") + + if self.config["offline_mode"] is False: + + # Notify broker of completion + self.logger.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + + "This task inserted: " + str(self.results_counter) + " tuples.\n") + + requests.post('http://{}:{}/api/unstable/completed_task'.format( + self.config['host_broker'],self.config['port_broker']), json=task_completed) + + # Reset results counter for next task + self.results_counter = 0 + + def register_task_failure(self, task, repo_id, e): + + self.logger.error("Worker ran into an error for task: {}\n".format(task)) + self.logger.error("Printing traceback...\n") + tb = traceback.format_exc() + self.logger.error(tb) + + self.logger.info(f'This task inserted {self.results_counter} tuples before failure.\n') + self.logger.info("Notifying broker and logging task failure in database...\n") + key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else \ + 'gitlab_url' if 'gitlab_url' in task['given'] else 'INVALID_GIVEN' + url = task['given'][key] + + """ Query all repos with repo url of given task """ + repoUrlSQL = s.sql.text(""" + SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' + """.format(url)) + repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) + + task['worker_id'] = self.config['id'] + try: + requests.post("http://{}:{}/api/unstable/task_error".format( + self.config['host_broker'],self.config['port_broker']), json=task) + except requests.exceptions.ConnectionError: + self.logger.error('Could not send task failure message to the broker\n') + self.logger.error(e) + except Exception: + self.logger.error('An error occured while informing broker about task failure\n') + self.logger.error(e) + + # Add to history table + task_history = { + "repo_id": repo_id, + "worker": self.config['id'], + "job_model": task['models'][0], + "oauth_id": self.oauths[0]['oauth_id'], + "timestamp": datetime.datetime.now(), + "status": "Error", + "total_results": self.results_counter + } + self.helper_db.execute(self.worker_history_table.update().where(self.worker_history_table.c.history_id==self.history_id).values(task_history)) + + self.logger.error("Recorded job error in the history table for: " + str(task) + "\n") + + # Update job process table + updated_job = { + "since_id_str": repo_id, + "last_count": self.results_counter, + "last_run": datetime.datetime.now(), + "analysis_state": 0 + } + self.helper_db.execute(self.worker_job_table.update().where(self.worker_job_table.c.job_model==task['models'][0]).values(updated_job)) + self.logger.info("Updated job process for model: " + task['models'][0] + "\n") + + # Reset results counter for next task + self.results_counter = 0 + + def retrieve_tuple(self, key_values, tables): + table_str = tables[0] + del tables[0] + + key_values_items = list(key_values.items()) + for col, value in [key_values_items[0]]: + where_str = col + " = '" + value + "'" + del key_values_items[0] + + for col, value in key_values_items: + where_str += ' AND ' + col + " = '" + value + "'" + for table in tables: + table_str += ", " + table + + retrieveTupleSQL = s.sql.text(""" + SELECT * FROM {} WHERE {} + """.format(table_str, where_str)) + values = json.loads(pd.read_sql(retrieveTupleSQL, self.db, params={}).to_json(orient="records")) + return values + + def update_gitlab_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): + # Try to get rate limit from request headers, sometimes it does not work (GH's issue) + # In that case we just decrement from last recieved header count + if bad_credentials and len(self.oauths) > 1: + self.logger.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) + del self.oauths[0] + + if temporarily_disable: + self.logger.info("Gitlab rate limit reached. Temp. disabling...\n") + self.oauths[0]['rate_limit'] = 0 + else: + try: + self.oauths[0]['rate_limit'] = int(response.headers['RateLimit-Remaining']) + self.logger.info("Recieved rate limit from headers\n") + except: + self.oauths[0]['rate_limit'] -= 1 + self.logger.info("Headers did not work, had to decrement\n") + self.logger.info("Updated rate limit, you have: " + + str(self.oauths[0]['rate_limit']) + " requests remaining.\n") + if self.oauths[0]['rate_limit'] <= 0: + try: + reset_time = response.headers['RateLimit-Reset'] + except Exception as e: + self.logger.info("Could not get reset time from headers because of error: {}".format(e)) + reset_time = 3600 + time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() + self.logger.info("Rate limit exceeded, checking for other available keys to use.\n") + + # We will be finding oauth with the highest rate limit left out of our list of oauths + new_oauth = self.oauths[0] + # Endpoint to hit solely to retrieve rate limit information from headers of the response + url = "https://gitlab.com/api/v4/version" + + other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] + for oauth in other_oauths: + self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) + self.headers = {"PRIVATE-TOKEN" : oauth['access_token']} + response = requests.get(url=url, headers=self.headers) + oauth['rate_limit'] = int(response.headers['RateLimit-Remaining']) + oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() + + # Update oauth to switch to if a higher limit is found + if oauth['rate_limit'] > new_oauth['rate_limit']: + self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth)) + new_oauth = oauth + elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: + self.logger.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) + new_oauth = oauth + + if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: + self.logger.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) + time.sleep(new_oauth['seconds_to_reset']) + + # Make new oauth the 0th element in self.oauths so we know which one is in use + index = self.oauths.index(new_oauth) + self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] + self.logger.info("Using oauth: {}\n".format(self.oauths[0])) + + # Change headers to be using the new oauth's key + self.headers = {"PRIVATE-TOKEN" : self.oauths[0]['access_token']} + + + def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): + # Try to get rate limit from request headers, sometimes it does not work (GH's issue) + # In that case we just decrement from last recieved header count + if bad_credentials and len(self.oauths) > 1: + self.logger.warning("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) + del self.oauths[0] + + if temporarily_disable: + self.logger.debug("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") + self.oauths[0]['rate_limit'] = 0 + else: + try: + self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) + self.logger.info("Recieved rate limit from headers\n") + except: + self.oauths[0]['rate_limit'] -= 1 + self.logger.info("Headers did not work, had to decrement\n") + self.logger.info("Updated rate limit, you have: " + + str(self.oauths[0]['rate_limit']) + " requests remaining.\n") + if self.oauths[0]['rate_limit'] <= 0: + try: + reset_time = response.headers['X-RateLimit-Reset'] + except Exception as e: + self.logger.error("Could not get reset time from headers because of error: {}".format(e)) + reset_time = 3600 + time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() + self.logger.info("Rate limit exceeded, checking for other available keys to use.\n") + + # We will be finding oauth with the highest rate limit left out of our list of oauths + new_oauth = self.oauths[0] + # Endpoint to hit solely to retrieve rate limit information from headers of the response + url = "https://api.github.com/users/gabe-heim" + + other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] + for oauth in other_oauths: + self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) + self.headers = {'Authorization': 'token %s' % oauth['access_token']} + + attempts = 3 + success = False + while attempts > 0 and not success: + response = requests.get(url=url, headers=self.headers) + try: + oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) + oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() + success = True + except Exception as e: + self.logger.info(f'oath method ran into error getting info from headers: {e}\n') + self.logger.info(f'{self.headers}\n{url}\n') + attempts -= 1 + if not success: + continue + + # Update oauth to switch to if a higher limit is found + if oauth['rate_limit'] > new_oauth['rate_limit']: + self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth)) + new_oauth = oauth + elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: + self.logger.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) + new_oauth = oauth + + if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: + self.logger.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) + time.sleep(new_oauth['seconds_to_reset']) + + # Make new oauth the 0th element in self.oauths so we know which one is in use + index = self.oauths.index(new_oauth) + self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] + self.logger.info("Using oauth: {}\n".format(self.oauths[0])) + + # Change headers to be using the new oauth's key + self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} + + def update_rate_limit(self, response, bad_credentials=False, temporarily_disable=False, platform="gitlab"): + if platform == 'gitlab': + return self.update_gitlab_rate_limit(response, bad_credentials=bad_credentials, + temporarily_disable=temporarily_disable) + elif platform == 'github': + return self.update_gh_rate_limit(response, bad_credentials=bad_credentials, + temporarily_disable=temporarily_disable) \ No newline at end of file
diff --git a/test/api/test_experimental_routes.py b/test/api/test_experimental_routes.py deleted file mode 100644 --- a/test/api/test_experimental_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_insight_routes.py b/test/api/test_insight_routes.py deleted file mode 100644 --- a/test/api/test_insight_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_message_routes.py b/test/api/test_message_routes.py deleted file mode 100644 --- a/test/api/test_message_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_platform_routes.py b/test/api/test_platform_routes.py deleted file mode 100644 --- a/test/api/test_platform_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_util_routes.py b/test/api/test_util_routes.py deleted file mode 100644 --- a/test/api/test_util_routes.py +++ /dev/null @@ -1,31 +0,0 @@ -import requests -import pytest - [email protected](scope="session") -def metrics(): - pass - -def test_common(endpoint="http://localhost:5000/api/unstable/repos"): - response = requests.get(endpoint) - data = response.json() - assert response.status_code == 200 - assert len(data) >= 1 - -def test_downloaded_repos(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repos') - -def test_repo_groups(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups') - -def test_repos_in_repo_groups(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups') - -def test_get_repo_for_dosocs(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/dosocs/repos') - -def test_aggregate_summary_by_repo(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups/10/repos/25430/aggregate-summary') - -def test_aggregate_summary_by_group(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups/10/aggregate-summary') - diff --git a/test/metrics/test_experimental_metrics.py b/test/metrics/test_experimental_metrics.py deleted file mode 100644 --- a/test/metrics/test_experimental_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_insight_metrics.py b/test/metrics/test_insight_metrics.py deleted file mode 100644 --- a/test/metrics/test_insight_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_message_metrics.py b/test/metrics/test_message_metrics.py deleted file mode 100644 --- a/test/metrics/test_message_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_platform_metrics.py b/test/metrics/test_platform_metrics.py deleted file mode 100644 --- a/test/metrics/test_platform_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_util_metrics.py b/test/metrics/test_util_metrics.py deleted file mode 100644 --- a/test/metrics/test_util_metrics.py +++ /dev/null @@ -1,14 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - -# def test_get_repos_for_dosocs(metrics): -# assert metrics.get_repos_for_dosocs().isin( -# ['/home/sean/git-repos/25430/github.com/rails/rails-dom-testing']).any().any() - diff --git a/augur/plugins/__init__.py b/tests/__init__.py similarity index 100% rename from augur/plugins/__init__.py rename to tests/__init__.py diff --git a/tests/test_application.py b/tests/test_application.py new file mode 100644 --- /dev/null +++ b/tests/test_application.py @@ -0,0 +1,20 @@ +import pytest +import augur.application +import sqlalchemy as s +import json + +from augur.application import Application + +def test_init_augur_regular(): + augur_app = Application(disable_logs=True) + assert augur_app is not None + +def test_connect_to_database(monkeypatch): + def mock_fail_connection(self): + raise(s.exc.OperationalError("fake", "error", "message")) + + monkeypatch.setattr(s.engine.Engine, "connect", mock_fail_connection) + monkeypatch.setenv("AUGUR_LOG_QUIET", "1") + + with pytest.raises(s.exc.OperationalError): + augur_app = Application(disable_logs=True) diff --git a/test/metrics/test_commit_metrics.py b/tests/test_metrics/test_commit_metrics.py similarity index 90% rename from test/metrics/test_commit_metrics.py rename to tests/test_metrics/test_commit_metrics.py --- a/test/metrics/test_commit_metrics.py +++ b/tests/test_metrics/test_commit_metrics.py @@ -2,12 +2,6 @@ import pytest [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_annual_commit_count_ranked_by_repo_in_repo_group(metrics): assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10).iloc[0].net > 0 assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10, 25430).iloc[0].net > 0 diff --git a/test/metrics/test_contributor_metrics.py b/tests/test_metrics/test_contributor_metrics.py similarity index 91% rename from test/metrics/test_contributor_metrics.py rename to tests/test_metrics/test_contributor_metrics.py --- a/test/metrics/test_contributor_metrics.py +++ b/tests/test_metrics/test_contributor_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_contributors(metrics): # repo group assert metrics.contributors(20).iloc[0]['total'] > 0 diff --git a/test/metrics/test_issue_metrics.py b/tests/test_metrics/test_issue_metrics.py similarity index 97% rename from test/metrics/test_issue_metrics.py rename to tests/test_metrics/test_issue_metrics.py --- a/test/metrics/test_issue_metrics.py +++ b/tests/test_metrics/test_issue_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_issues_new(metrics): #repo_id assert metrics.issues_new(10, 25430, period='year').iloc[0]['issues'] > 0 diff --git a/test/metrics/test_pull_request_metrics.py b/tests/test_metrics/test_pull_request_metrics.py similarity index 91% rename from test/metrics/test_pull_request_metrics.py rename to tests/test_metrics/test_pull_request_metrics.py --- a/test/metrics/test_pull_request_metrics.py +++ b/tests/test_metrics/test_pull_request_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_pull_requests_merge_contributor_new(metrics): # repo id assert metrics.pull_requests_merge_contributor_new(10, repo_id=25430, period='year').isin( diff --git a/test/metrics/test_repo_meta_metrics.py b/tests/test_metrics/test_repo_meta_metrics.py similarity index 96% rename from test/metrics/test_repo_meta_metrics.py rename to tests/test_metrics/test_repo_meta_metrics.py --- a/test/metrics/test_repo_meta_metrics.py +++ b/tests/test_metrics/test_repo_meta_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_code_changes(metrics): #repo_id assert metrics.code_changes(10, 25430, period='year').isin([pd.Timestamp('2019-01-01T00:00:00+00:00'), 2]).any().any() diff --git a/test/api/runner.py b/tests/test_routes/runner.py similarity index 88% rename from test/api/runner.py rename to tests/test_routes/runner.py --- a/test/api/runner.py +++ b/tests/test_routes/runner.py @@ -9,9 +9,10 @@ start = subprocess.Popen(["augur", "run", "--disable-housekeeper", "--skip-cleanup"], stdout=FNULL, stderr=subprocess.STDOUT) print("Waiting for the server to start...") time.sleep(5) -process = subprocess.run(["pytest", "-ra", "test/api/"]) + +process = subprocess.run(["pytest", "tests/test_routes/"]) time.sleep(2) + subprocess.Popen(["augur", "util", "kill"], stdout=FNULL, stderr=subprocess.STDOUT) print("Server successfully shutdown.") - sys.exit(process.returncode) diff --git a/test/api/test_commit_routes.py b/tests/test_routes/test_commit_routes.py similarity index 97% rename from test/api/test_commit_routes.py rename to tests/test_routes/test_commit_routes.py --- a/test/api/test_commit_routes.py +++ b/tests/test_routes/test_commit_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_annual_commit_count_ranked_by_new_repo_in_repo_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/annual-commit-count-ranked-by-new-repo-in-repo-group/') data = response.json() diff --git a/test/api/test_contributor_routes.py b/tests/test_routes/test_contributor_routes.py similarity index 95% rename from test/api/test_contributor_routes.py rename to tests/test_routes/test_contributor_routes.py --- a/test/api/test_contributor_routes.py +++ b/tests/test_routes/test_contributor_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_contributors_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/contributors') data = response.json() diff --git a/test/api/test_issue_routes.py b/tests/test_routes/test_issue_routes.py similarity index 99% rename from test/api/test_issue_routes.py rename to tests/test_routes/test_issue_routes.py --- a/test/api/test_issue_routes.py +++ b/tests/test_routes/test_issue_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_issues_new_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/issues-new') data = response.json() diff --git a/test/api/test_pull_request_routes.py b/tests/test_routes/test_pull_request_routes.py similarity index 94% rename from test/api/test_pull_request_routes.py rename to tests/test_routes/test_pull_request_routes.py --- a/test/api/test_pull_request_routes.py +++ b/tests/test_routes/test_pull_request_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_pull_requests_merge_contributor_new_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/pull-requests-merge-contributor-new') data = response.json() diff --git a/test/api/test_repo_meta_routes.py b/tests/test_routes/test_repo_meta_routes.py similarity index 98% rename from test/api/test_repo_meta_routes.py rename to tests/test_routes/test_repo_meta_routes.py --- a/test/api/test_repo_meta_routes.py +++ b/tests/test_routes/test_repo_meta_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_code_changes_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/code-changes') data = response.json() @@ -51,7 +47,6 @@ def test_sub_projects_by_repo(metrics): def test_cii_best_practices_badge_by_repo(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/repos/25430/cii-best-practices-badge') - print(response) data = response.json() assert response.status_code == 200 assert len(data) >= 1 diff --git a/tests/test_routes/test_util_routes.py b/tests/test_routes/test_util_routes.py new file mode 100644 --- /dev/null +++ b/tests/test_routes/test_util_routes.py @@ -0,0 +1,20 @@ +import requests +import pytest + +from conftest import create_full_routes + +util_routes = [\ +"repos",\ +"repo-groups",\ +"repo-groups",\ +"dosocs/repos",\ +"repo-groups/<default_repo_group_id>/aggregate-summary",\ +"repo-groups/<default_repo_group_id>/repos/<default_repo_id>/aggregate-summary",\ +] + [email protected]("endpoint", create_full_routes(util_routes)) +def test_base_test(client, endpoint): + response = client.get(endpoint) + data = response.get_json() + assert response.status_code == 200 + assert len(data) >= 1 diff --git a/tests/test_workers/test_repo_info_worker.py b/tests/test_workers/test_repo_info_worker.py new file mode 100644 --- /dev/null +++ b/tests/test_workers/test_repo_info_worker.py @@ -0,0 +1,29 @@ +import pytest +from time import sleep + +from workers.repo_info_worker.repo_info_worker import RepoInfoWorker + [email protected] +def test_task(): + return { + "given": { + "github_url": "https://github.com/chaoss/augur.git" + }, + "models": ["repo_info"], + "job_type": "MAINTAIN", + "display_name": "repo_info model for url: https://github.com/chaoss/augur.git", + "focused_task": 1 + } + [email protected] +def repo_info_worker(): + config = { + "offline_mode": True, + "quiet": True + } + + repo_info_worker = RepoInfoWorker(config=config) + return repo_info_worker + +def test_repo_info_worker(repo_info_worker, test_task): + assert repo_info_worker is not None diff --git a/test/__init__.py b/workers/contributor_worker/__init__.py similarity index 100% rename from test/__init__.py rename to workers/contributor_worker/__init__.py diff --git a/test/test_model.py b/workers/github_worker/__init__.py similarity index 100% rename from test/test_model.py rename to workers/github_worker/__init__.py diff --git a/workers/metric_status_worker/tests/tests_worker.py b/workers/metric_status_worker/tests/tests_worker.py deleted file mode 100644 --- a/workers/metric_status_worker/tests/tests_worker.py +++ /dev/null @@ -1,16 +0,0 @@ -import requests -import pytest - -from metric_status_worker.worker import MetricsStatus - -def test_get_metric_index_in_table_row(): - row = "metric |sTatuS|TestString" - metric_status = MetricsStatus("api.github.com") - result = metric_status.get_metric_index_in_table_row(row) - print(result) - assert result == (0, 3) - -def test_is_has_link(): - metric_status = MetricsStatus("api.github.com") - re_result = metric_status.is_has_link(" [oss](augur" , None) - assert re_result == ('oss', 'augur') diff --git a/workers/tests/test_standard_methods.py b/workers/tests/test_standard_methods.py deleted file mode 100644 --- a/workers/tests/test_standard_methods.py +++ /dev/null @@ -1,28 +0,0 @@ -# Sample Test passing with nose and pytest -import pandas as pd -import pytest -from workers.standard_methods import check_duplicates, dump_queue, read_config -from queue import Queue - - -def test_check_duplicates(): - obj = {"website":["walmart.com"]} - new_data = [obj] - table_values = pd.read_csv("augur/data/companies.csv") - assert check_duplicates(new_data, table_values, "website") == [obj] - -def test_dump_queues(): - sample_queue = Queue() - list_sample = ["[email protected]", "[email protected]", "[email protected]"] - for list_item in list_sample: - sample_queue.put(list_item) - queue_to_list = dump_queue(sample_queue) - assert queue_to_list == ["[email protected]", "[email protected]", "[email protected]"] - -def test_read_config_no_exception(): - db_name = read_config('Database', 'user', 'AUGUR_DB_USER', 'augur',config_file_path="augur.config.json") - assert db_name == "augur" - -def test_read_config_exception(): - with pytest.raises(AttributeError): - db_name = read_config('Server', 'username')
repo_info worker: dev/test branch Please help us help you by filling out the following sections as thoroughly as you can. **Description:** Looks like the new Fork information collection has some kind of mismatch between the method and parameters passed: ``` INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}} INFO:root:Printing traceback... INFO:root:Traceback (most recent call last): File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect model_method(message, repo_id) File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model forked = self.is_forked(owner, repo) File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked data = self.get_repo_data(self, url, r) TypeError: get_repo_data() takes 3 positional arguments but 4 were given INFO:root:This task inserted 0 tuples before failure. INFO:root:Notifying broker and logging task failure in database... INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 - INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'} INFO:root:Updated job process for model: repo_info ``` If the log does not provide enough info, let me know
2020-07-07T21:04:18Z
[]
[]
chaoss/augur
885
chaoss__augur-885
[ "737" ]
f3ab1021011aec8ac885ae1519164d988df090eb
diff --git a/augur/__init__.py b/augur/__init__.py --- a/augur/__init__.py +++ b/augur/__init__.py @@ -1,10 +1,4 @@ #SPDX-License-Identifier: MIT -import logging -import coloredlogs - -coloredlogs.install() -logger = logging.getLogger('augur') - -# Classes -from .application import Application, logger +import os +ROOT_AUGUR_DIRECTORY = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) diff --git a/augur/application.py b/augur/application.py --- a/augur/application.py +++ b/augur/application.py @@ -4,72 +4,52 @@ """ import os -import time -import multiprocessing as mp +from pathlib import Path import logging +from logging import FileHandler, Formatter import coloredlogs import json -import pkgutil from beaker.cache import CacheManager from beaker.util import parse_cache_config_options import sqlalchemy as s import psycopg2 -from augur import logger +from augur import ROOT_AUGUR_DIRECTORY from augur.metrics import Metrics -from augur.cli.configure import default_config +from augur.config import AugurConfig +from augur.logging import AugurLogging -class Application(object): +logger = logging.getLogger(__name__) + +class Application(): """Initalizes all classes from Augur using a config file or environment variables""" - def __init__(self): + def __init__(self, given_config={}, disable_logs=False, offline_mode=False): """ Reads config, creates DB session, and initializes cache """ - self.config_file_name = 'augur.config.json' - self.__shell_config = None - self.__export_file = None - self.__env_file = None - self.config = default_config - self.env_config = {} - self.root_augur_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - default_config_path = self.root_augur_dir + '/' + self.config_file_name - using_config_file = False - - - config_locations = [self.config_file_name, default_config_path, f"/opt/augur/{self.config_file_name}"] - if os.getenv('AUGUR_CONFIG_FILE') is not None: - config_file_path = os.getenv('AUGUR_CONFIG_FILE') - using_config_file = True - else: - for index, location in enumerate(config_locations): - try: - f = open(location, "r+") - config_file_path = os.path.abspath(location) - using_config_file = True - f.close() - break - except FileNotFoundError: - pass - - if using_config_file: - try: - with open(config_file_path, 'r+') as config_file_handle: - self.config = json.loads(config_file_handle.read()) - except json.decoder.JSONDecodeError as e: - logger.warning('%s could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: %s', config_file_path, str(e)) - else: - logger.warning('%s could not be parsed, using defaults.') - - self.load_env_configuration() - - logger.setLevel(self.read_config("Development", "log_level")) + self.logging = AugurLogging(disable_logs=disable_logs) + self.root_augur_dir = ROOT_AUGUR_DIRECTORY + self.config = AugurConfig(self.root_augur_dir, given_config) + + # we need these for later + self.housekeeper = None + self.manager = None + + self.gunicorn_options = { + 'bind': '%s:%s' % (self.config.get_value("Server", "host"), self.config.get_value("Server", "port")), + 'workers': int(self.config.get_value('Server', 'workers')), + 'timeout': int(self.config.get_value('Server', 'timeout')) + } + self.logging.configure_logging(self.config) + self.gunicorn_options.update(self.logging.gunicorn_logging_options) self.cache_config = { 'cache.type': 'file', 'cache.data_dir': 'runtime/cache/', 'cache.lock_dir': 'runtime/cache/' } + if not os.path.exists(self.cache_config['cache.data_dir']): os.makedirs(self.cache_config['cache.data_dir']) if not os.path.exists(self.cache_config['cache.lock_dir']): @@ -77,75 +57,56 @@ def __init__(self): cache_parsed = parse_cache_config_options(self.cache_config) self.cache = CacheManager(**cache_parsed) - self.database = self.__connect_to_database() - self.spdx_db = self.__connect_to_database(include_spdx=True) + if offline_mode is False: + logger.debug("Running in online mode") + self.database, self.operations_database, self.spdx_database = self._connect_to_database() - self.metrics = Metrics(self) + self.metrics = Metrics(self) - def __connect_to_database(self, include_spdx=False): - user = self.read_config('Database', 'user') - host = self.read_config('Database', 'host') - port = self.read_config('Database', 'port') - dbname = self.read_config('Database', 'name') + def _connect_to_database(self): + logger.debug("Testing database connections") + user = self.config.get_value('Database', 'user') + host = self.config.get_value('Database', 'host') + port = self.config.get_value('Database', 'port') + dbname = self.config.get_value('Database', 'name') database_connection_string = 'postgresql://{}:{}@{}:{}/{}'.format( - user, self.read_config('Database', 'password'), host, port, dbname + user, self.config.get_value('Database', 'password'), host, port, dbname ) csearch_path_options = 'augur_data' - if include_spdx == True: - csearch_path_options += ',spdx' engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, connect_args={'options': f'-csearch_path={csearch_path_options}'}, pool_pre_ping=True) + csearch_path_options += ',spdx' + spdx_engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, + connect_args={'options': f'-csearch_path={csearch_path_options}'}, pool_pre_ping=True) + + helper_engine = s.create_engine(database_connection_string, poolclass=s.pool.NullPool, + connect_args={'options': f'-csearch_path=augur_operations'}, pool_pre_ping=True) + try: - test_connection = engine.connect() - test_connection.close() - return engine + engine.connect().close() + helper_engine.connect().close() + spdx_engine.connect().close() + return engine, helper_engine, spdx_engine except s.exc.OperationalError as e: - logger.fatal(f"Unable to connect to the database. Terminating...") - exit() + logger.error("Unable to connect to the database. Terminating...") + raise(e) - def read_config(self, section, name=None): - """ - Read a variable in specified section of the config file, unless provided an environment variable + def shutdown(self): + if self.logging.stop_event is not None: + logger.debug("Stopping housekeeper logging listener...") + self.logging.stop_event.set() - :param section: location of given variable - :param name: name of variable - """ - if name is not None: - try: - value = self.config[section][name] - except KeyError as e: - value = default_config[section][name] - else: - try: - value = self.config[section] - except KeyError as e: - value = default_config[section] - - return value - - def load_env_configuration(self): - self.set_env_value(section='Database', name='key', environment_variable='AUGUR_GITHUB_API_KEY') - self.set_env_value(section='Database', name='host', environment_variable='AUGUR_DB_HOST') - self.set_env_value(section='Database', name='name', environment_variable='AUGUR_DB_NAME') - self.set_env_value(section='Database', name='port', environment_variable='AUGUR_DB_PORT') - self.set_env_value(section='Database', name='user', environment_variable='AUGUR_DB_USER') - self.set_env_value(section='Database', name='password', environment_variable='AUGUR_DB_PASSWORD') - self.set_env_value(section='Development', name='log_level', environment_variable='AUGUR_LOG_LEVEL') - - def set_env_value(self, section, name, environment_variable, sub_config=None): - """ - Sets names and values of specified config section according to their environment variables. - """ - # using sub_config lets us grab values from nested config blocks - if sub_config is None: - sub_config = self.config + if self.housekeeper is not None: + logger.debug("Shutting down housekeeper updates...") + self.housekeeper.shutdown_updates() + self.housekeeper = None - env_value = os.getenv(environment_variable) + if self.manager is not None: + logger.debug("Shutting down manager...") + self.manager.shutdown() + self.manager = None - if env_value is not None: - self.env_config[environment_variable] = env_value - sub_config[section][name] = env_value diff --git a/augur/cli/__init__.py b/augur/cli/__init__.py --- a/augur/cli/__init__.py +++ b/augur/cli/__init__.py @@ -0,0 +1,34 @@ +import click +from functools import update_wrapper + +from augur.application import Application +from augur.config import AugurConfig +from augur.logging import AugurLogging, ROOT_AUGUR_DIRECTORY + +def pass_application(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + ctx.obj = Application() + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def pass_config(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + ctx.obj = Application(offline_mode=True).config + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def pass_logs_dir(f): + @click.pass_context + def new_func(ctx, *args, **kwargs): + config = AugurConfig(ROOT_AUGUR_DIRECTORY) + ctx.obj = AugurLogging.get_log_directories(config, reset_logfiles=False) + return ctx.invoke(f, ctx.obj, *args, **kwargs) + return update_wrapper(new_func, f) + +def initialize_logging(f): + def new_func(*args, **kwargs): + AugurLogging(reset_logfiles=False) + return f(*args, **kwargs) + return update_wrapper(new_func, f) \ No newline at end of file diff --git a/augur/runtime.py b/augur/cli/_multicommand.py similarity index 63% rename from augur/runtime.py rename to augur/cli/_multicommand.py --- a/augur/runtime.py +++ b/augur/cli/_multicommand.py @@ -6,16 +6,14 @@ import os import sys import click +import importlib import augur.application CONTEXT_SETTINGS = dict(auto_envvar_prefix='AUGUR') class AugurMultiCommand(click.MultiCommand): - def __commands_folder(self): - return os.path.abspath( - os.path.join(os.path.dirname(__file__), 'cli') - ) + return os.path.abspath(os.path.dirname(__file__)) def list_commands(self, ctx): rv = [] @@ -26,13 +24,8 @@ def list_commands(self, ctx): return rv def get_command(self, ctx, name): - # try: - if sys.version_info[0] == 2: - name = name.encode('ascii', 'replace') - mod = __import__('augur.cli.' + name, - None, None, ['cli']) - - return mod.cli + module = importlib.import_module('.' + name, 'augur.cli') + return module.cli @click.command(cls=AugurMultiCommand, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -40,11 +33,4 @@ def run(ctx): """ Augur is an application for open source community health analytics """ - - app = augur.application.Application() - ctx.obj = app - return ctx.obj - - -if __name__ == '__main__': - run() + return ctx diff --git a/augur/cli/configure.py b/augur/cli/configure.py --- a/augur/cli/configure.py +++ b/augur/cli/configure.py @@ -6,190 +6,15 @@ import os import click import json +import logging -from augur import logger +from augur.config import default_config, ENVVAR_PREFIX +from augur.cli import initialize_logging +from augur.logging import ROOT_AUGUR_DIRECTORY +logger = logging.getLogger(__name__) ENVVAR_PREFIX = "AUGUR_" -default_config = { - "Database": { - "name": "augur", - "host": "localhost", - "key": "key", - "password": "augur", - "port": 5432, - "user": "augur" - }, - "Housekeeper": { - "jobs": [ - { - "all_focused": 1, - "delay": 150000, - "given": [ - "github_url" - ], - "model": "issues", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "github_url" - ], - "model": "pull_request_commits", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "github_url" - ], - "model": "repo_info", - "repo_group_id": 0 - }, - { - "delay": 150000, - "given": [ - "repo_group" - ], - "model": "commits", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "github_url" - ], - "model": "pull_requests", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "contributors", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "insights", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "badges", - "repo_group_id": 0 - }, - { - "delay": 1000000, - "given": [ - "git_url" - ], - "model": "value", - "repo_group_id": 0 - }, - { - "delay": 100000, - "given": [ - "github_url" - ], - "model": "pull_request_files", - "repo_group_id": 0 - } - ] - }, - "Workers": { - "facade_worker": { - "port": 50100, - "repo_directory": "repos/", - "switch": 1, - "workers": 1 - }, - "github_worker": { - "port": 50200, - "switch": 1, - "workers": 1 - }, - "insight_worker": { - "port": 50300, - "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"}, - "contamination": 0.041, - "switch": 0, - "workers": 1, - "training_days": 365, - "anomaly_days": 2 - }, - "linux_badge_worker": { - "port": 50400, - "switch": 1, - "workers": 1 - }, - "metric_status_worker": { - "port": 50500, - "switch": 0, - "workers": 1 - }, - "pull_request_worker": { - "port": 50600, - "switch": 1, - "workers": 1 - }, - "repo_info_worker": { - "port": 50700, - "switch": 1, - "workers": 1 - }, - "value_worker": { - "port": 50800, - "scc_bin": "scc", - "switch": 0, - "workers": 1 - }, - "contributor_worker": { - "port": 50900, - "switch": 1, - "workers": 1 - } - }, - "Facade": { - "check_updates": 1, - "clone_repos": 1, - "create_xlsx_summary_files": 1, - "delete_marked_repos": 0, - "fix_affiliations": 1, - "force_analysis": 1, - "force_invalidate_caches": 1, - "force_updates": 1, - "limited_run": 0, - "multithreaded": 0, - "nuke_stored_affiliations": 0, - "pull_repos": 1, - "rebuild_caches": 1, - "run_analysis": 1 - }, - "Server": { - "cache_expire": "3600", - "host": "0.0.0.0", - "port": "5000", - "workers": 4, - "timeout": 60 - }, - "Frontend": { - "host": "0.0.0.0", - "port": "5000" - }, - "Development": { - "log_level": "INFO" - } - } @click.group('configure', short_help='Generate an augur.config.json') def cli(): @@ -204,7 +29,9 @@ def cli(): @click.option('--github_api_key', help="GitHub API key for data collection from the GitHub API", envvar=ENVVAR_PREFIX + 'GITHUB_API_KEY') @click.option('--facade_repo_directory', help="Directory on the database server where Facade should clone repos", envvar=ENVVAR_PREFIX + 'FACADE_REPO_DIRECTORY') @click.option('--rc-config-file', help="File containing existing config whose values will be used as the defaults", type=click.Path(exists=True)) -def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file): [email protected]('--gitlab_api_key', help="GitLab API key for data collection from the GitLab API", envvar=ENVVAR_PREFIX + 'GITLAB_API_KEY') +@initialize_logging +def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file, gitlab_api_key): """ Generate an augur.config.json """ @@ -250,11 +77,13 @@ def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, fa config['Database']['password'] = db_password if github_api_key is not None: config['Database']['key'] = github_api_key + if gitlab_api_key is not None: + config['Database']['gitlab_api_key'] = gitlab_api_key if facade_repo_directory is not None: config['Workers']['facade_worker']['repo_directory'] = facade_repo_directory try: - with open(os.path.abspath('augur.config.json'), 'w') as f: + with open(os.path.abspath(ROOT_AUGUR_DIRECTORY + '/augur.config.json'), 'w') as f: json.dump(config, f, indent=4) logger.info('augur.config.json successfully created') except Exception as e: diff --git a/augur/cli/db.py b/augur/cli/db.py --- a/augur/cli/db.py +++ b/augur/cli/db.py @@ -1,5 +1,6 @@ from os import walk, chdir, environ, chmod, path import os +import logging from sys import exit import stat from collections import OrderedDict @@ -12,7 +13,9 @@ import pandas as pd from sqlalchemy import exc -from augur import logger +from augur.cli import pass_config, pass_application + +logger = logging.getLogger(__name__) @click.group('db', short_help='Database utilities') def cli(): @@ -20,14 +23,12 @@ def cli(): @cli.command('add-repos') @click.argument('filename', type=click.Path(exists=True)) [email protected]_context -def add_repos(ctx, filename): +@pass_application +def add_repos(augur_app, filename): """ Add repositories to Augur's database """ - app = ctx.obj - - df = app.database.execute(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups")) + df = augur_app.database.execute(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups")) repo_group_IDs = [group[0] for group in df.fetchall()] insertSQL = s.sql.text(""" @@ -41,33 +42,29 @@ def add_repos(ctx, filename): for row in data: logger.info(f"Inserting repo with Git URL `{row[1]}` into repo group {row[0]}") if int(row[0]) in repo_group_IDs: - result = app.database.execute(insertSQL, repo_group_id=int(row[0]), repo_git=row[1]) + result = augur_app.database.execute(insertSQL, repo_group_id=int(row[0]), repo_git=row[1]) else: - logger.warn(f"Invalid repo group id specified for {row[1]}, skipping.") + logger.warning(f"Invalid repo group id specified for {row[1]}, skipping.") @cli.command('get-repo-groups') [email protected]_context -def get_repo_groups(ctx): +@pass_application +def get_repo_groups(augur_app): """ List all repo groups and their associated IDs """ - app = ctx.obj - - df = pd.read_sql(s.sql.text("SELECT repo_group_id, rg_name, rg_description FROM augur_data.repo_groups"), app.database) + df = pd.read_sql(s.sql.text("SELECT repo_group_id, rg_name, rg_description FROM augur_data.repo_groups"), augur_app.database) print(df) return df @cli.command('add-repo-groups') @click.argument('filename', type=click.Path(exists=True)) [email protected]_context -def add_repo_groups(ctx, filename): +@pass_application +def add_repo_groups(augur_app, filename): """ Create new repo groups in Augur's database """ - app = ctx.obj - - df = pd.read_sql(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups"), app.database) + df = pd.read_sql(s.sql.text("SELECT repo_group_id FROM augur_data.repo_groups"), augur_app.database) repo_group_IDs = df['repo_group_id'].values.tolist() insert_repo_group_sql = s.sql.text(""" @@ -80,51 +77,48 @@ def add_repo_groups(ctx, filename): logger.info(f"Inserting repo group with name {row[1]} and ID {row[0]}...") if int(row[0]) not in repo_group_IDs: repo_group_IDs.append(int(row[0])) - app.database.execute(insert_repo_group_sql, repo_group_id=int(row[0]), repo_group_name=row[1]) + augur_app.database.execute(insert_repo_group_sql, repo_group_id=int(row[0]), repo_group_name=row[1]) else: logger.info(f"Repo group with ID {row[1]} for repo group {row[1]} already exists, skipping...") @cli.command('update-repo-directory') @click.argument('repo_directory') [email protected]_context -def update_repo_directory(ctx, repo_directory): +@pass_application +def update_repo_directory(augur_app, repo_directory): """ Update Facade worker repo cloning directory """ - app = ctx.obj - updateRepoDirectorySQL = s.sql.text(""" UPDATE augur_data.settings SET VALUE = :repo_directory WHERE setting='repo_directory'; """) - app.database.execute(updateRepoDirectorySQL, repo_directory=repo_directory) + augur_app.database.execute(updateRepoDirectorySQL, repo_directory=repo_directory) logger.info(f"Updated Facade repo directory to: {repo_directory}") # get_db_version is a helper function to print_db_version and upgrade_db_version -def get_db_version(app): +def get_db_version(augur_app): db_version_sql = s.sql.text(""" SELECT * FROM augur_operations.augur_settings WHERE setting = 'augur_data_version' """) - return int(app.database.execute(db_version_sql).fetchone()[2]) + return int(augur_app.database.execute(db_version_sql).fetchone()[2]) @cli.command('print-db-version') [email protected]_context -def print_db_version(ctx): +@pass_application +def print_db_version(augur_app): """ Get the version of the configured database """ - print(get_db_version(ctx.obj)) + print(get_db_version(augur_app)) @cli.command('upgrade-db-version') [email protected]_context -def upgrade_db_version(ctx): +@pass_application +def upgrade_db_version(augur_app): """ Upgrade the configured database to the latest version """ - app = ctx.obj - check_pgpass_credentials(app.config) - current_db_version = get_db_version(app) + check_pgpass_credentials(augur_app.config.get_raw_config()) + current_db_version = get_db_version(augur_app) update_scripts_filenames = [] for (_, _, filenames) in walk('schema/generate'): @@ -143,23 +137,22 @@ def upgrade_db_version(ctx): if current_db_version == most_recent_version: logger.info("Your database is already up to date. ") elif current_db_version > most_recent_version: - logger.info(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") + logger.error(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") for target_version, script_location in target_version_script_map.items(): if target_version == current_db_version + 1: logger.info(f"Upgrading from {current_db_version} to {target_version}") - run_psql_command_in_database(app, '-f', f"schema/generate/{script_location}") + run_psql_command_in_database(augur_app, '-f', f"schema/generate/{script_location}") current_db_version += 1 @cli.command('check-for-upgrade') [email protected]_context -def check_for_upgrade(ctx): +@pass_application +def check_for_upgrade(augur_app): """ Upgrade the configured database to the latest version """ - app = ctx.obj - check_pgpass_credentials(app.config) - current_db_version = get_db_version(app) + check_pgpass_credentials(augur_app.config.get_raw_config()) + current_db_version = get_db_version(augur_app) update_scripts_filenames = [] for (_, _, filenames) in walk('schema/generate'): @@ -180,18 +173,17 @@ def check_for_upgrade(ctx): elif current_db_version < most_recent_version: logger.info(f"Current database version: v{current_db_version}\nPlease upgrade to the most recent version (v{most_recent_version}) with augur db upgrade-db-version.") elif current_db_version > most_recent_version: - logger.warn(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") + logger.error(f"Unrecognized version: {current_db_version}\nThe most recent version is {most_recent_version}. Please contact your system administrator to resolve this error.") @cli.command('create-schema') [email protected]_context -def create_schema(ctx): +@pass_application +def create_schema(augur_app): """ Create schema in the configured database """ - app = ctx.obj - check_pgpass_credentials(app.config) - run_psql_command_in_database(app, '-f', 'schema/create_schema.sql') + check_pgpass_credentials(augur_app.config.get_raw_config()) + run_psql_command_in_database(augur_app, '-f', 'schema/create_schema.sql') def generate_key(length): return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length)) @@ -202,46 +194,40 @@ def generate_api_key(ctx): """ Generate and set a new Augur API key """ - app = ctx.obj key = generate_key(32) ctx.invoke(update_api_key, api_key=key) print(key) @cli.command('update-api-key') @click.argument("api_key") [email protected]_context -def update_api_key(ctx, api_key): +@pass_application +def update_api_key(augur_app, api_key): """ Update the API key in the database to the given key """ - app = ctx.obj - update_api_key_sql = s.sql.text(""" UPDATE augur_operations.augur_settings SET VALUE = :api_key WHERE setting='augur_api_key'; """) - app.database.execute(update_api_key_sql, api_key=api_key) - logger.info(f"Update Augur API key to: {api_key}") + augur_app.database.execute(update_api_key_sql, api_key=api_key) + logger.info(f"Updated Augur API key to: {api_key}") @cli.command('get-api-key') [email protected]_context -def get_api_key(ctx): - app = ctx.obj - +@pass_application +def get_api_key(augur_app): get_api_key_sql = s.sql.text(""" SELECT value FROM augur_operations.augur_settings WHERE setting='augur_api_key'; """) try: - print(app.database.execute(get_api_key_sql).fetchone()[0]) + print(augur_app.database.execute(get_api_key_sql).fetchone()[0]) except TypeError: - logger.warn("No Augur API key found.") + logger.error("No Augur API key found.") @cli.command('check-pgpass', short_help="Check the ~/.pgpass file for Augur's database credentials") [email protected]_context -def check_pgpass(ctx): - app = ctx.obj - check_pgpass_credentials(app.config) +@pass_config +def check_pgpass(config): + check_pgpass_credentials(config.get_raw_config()) @cli.command('init-database') @click.option('--default-db-name', default='postgres') @@ -252,12 +238,10 @@ def check_pgpass(ctx): @click.option('--target-password', default='augur') @click.option('--host', default='localhost') @click.option('--port', default='5432') [email protected]_context -def init_database(ctx, default_db_name, default_user, default_password, target_db_name, target_user, target_password, host, port): +def init_database(default_db_name, default_user, default_password, target_db_name, target_user, target_password, host, port): """ Create database with the given credentials using the given maintenance database """ - app = ctx.obj config = { 'Database': { 'name': default_db_name, @@ -276,15 +260,15 @@ def init_database(ctx, default_db_name, default_user, default_password, target_d def run_db_creation_psql_command(host, port, user, name, command): call(['psql', '-h', host, '-p', port, '-U', user, '-d', name, '-a', '-w', '-c', command]) -def run_psql_command_in_database(app, target_type, target): +def run_psql_command_in_database(augur_app, target_type, target): if target_type not in ['-f', '-c']: - logger.fatal("Invalid target type. Exiting...") + logger.error("Invalid target type. Exiting...") exit(1) - call(['psql', '-h', app.read_config('Database', 'host'),\ - '-d', app.read_config('Database', 'name'),\ - '-U', app.read_config('Database', 'user'),\ - '-p', str(app.read_config('Database', 'port')),\ + call(['psql', '-h', augur_app.config.get_value('Database', 'host'),\ + '-d', augur_app.config.get_value('Database', 'name'),\ + '-U', augur_app.config.get_value('Database', 'user'),\ + '-p', str(augur_app.config.get_value('Database', 'port')),\ '-a', '-w', target_type, target ]) @@ -292,14 +276,14 @@ def check_pgpass_credentials(config): pgpass_file_path = environ['HOME'] + '/.pgpass' if not path.isfile(pgpass_file_path): - logger.debug("~/.pgpass does not exist, creating.") + logger.info("~/.pgpass does not exist, creating.") open(pgpass_file_path, 'w+') chmod(pgpass_file_path, stat.S_IWRITE | stat.S_IREAD) pgpass_file_mask = oct(os.stat(pgpass_file_path).st_mode & 0o777) if pgpass_file_mask != '0o600': - logger.debug("Updating ~/.pgpass file permissions.") + logger.info("Updating ~/.pgpass file permissions.") chmod(pgpass_file_path, stat.S_IWRITE | stat.S_IREAD) with open(pgpass_file_path, 'a+') as pgpass_file: diff --git a/augur/cli/logging.py b/augur/cli/logging.py new file mode 100644 --- /dev/null +++ b/augur/cli/logging.py @@ -0,0 +1,140 @@ +import click +import os +from os import walk + +from augur.cli import pass_logs_dir + [email protected]("logging", short_help="View Augur's log files") +def cli(): + pass + [email protected]("directory") +@pass_logs_dir +def directory(logs_dir): + """ + Print the location of Augur's logs directory + """ + print(logs_dir) + [email protected]("errors") [email protected]("worker", default="all") +@pass_logs_dir +def errors(logs_dir, worker): + """ + Output error messages of the main Augur and all worker logfiles or a specific worker logfile + """ + root_log_dir = logs_dir + worker_log_dir = logs_dir + "/workers/" + if worker is None: + worker = "all" + + if worker == "all": + files = [] + directories = [] + for (_, _, filenames) in walk(root_log_dir): + for file in filenames: + if file.endswith(".err"): + print_log(file, root_log_dir) + break + + files = [] + directories = [] + for (dirpath, dirnames, filenames) in walk(worker_log_dir): + directories.extend(dirnames) + break + + for directory in directories: + specific_worker_log_dir = worker_log_dir + directory + for (_, _, filenames) in walk(specific_worker_log_dir): + files.extend(filenames) + for file in [file for file in filenames if "collection" in file and file.endswith(".err")]: + print_log(file, specific_worker_log_dir) + break + else: + files = [] + specific_worker_log_dir = worker_log_dir + "/" + worker + "/" + for (_, _, filenames) in walk(specific_worker_log_dir): + files.extend(filenames) + for file in [file for file in filenames if "collection" in file and file.endswith(".err")]: + print_log(file, specific_worker_log_dir) + break + +def print_log(file, log_dir): + f = open(log_dir + "/" + file) + result = f.readlines() + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + [email protected]("tail") [email protected]("lines", default=20) +@pass_logs_dir +def tail(logs_dir, lines): + """ + Output the last n lines of the main Augur and worker logfiles + """ + root_log_dir = logs_dir + worker_log_dir = logs_dir + "/workers/" + if lines is None: + lines = 20 + + files = [] + directories = [] + for (_, _, filenames) in walk(root_log_dir): + for file in filenames: + result = _tail(open(root_log_dir + "/" + file), lines) + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + break + + files = [] + directories = [] + for (dirpath, dirnames, filenames) in walk(worker_log_dir): + directories.extend(dirnames) + break + + for directory in directories: + specific_worker_log_dir = worker_log_dir + directory + for (_, _, filenames) in walk(specific_worker_log_dir): + files.extend(filenames) + + for file in [file for file in filenames if "collection" in file]: + result = _tail(open(specific_worker_log_dir + "/" + file), lines) + print("********** Logfile: " + file) + for log in result: + print(log.strip()) + print() + break + +def _tail(f, lines=20, _buffer=4098): + lines_found = [] + + # block counter will be multiplied by buffer + # to get the block size from the end + block_counter = -1 + + # loop until we find X lines + while len(lines_found) < lines: + try: + f.seek(block_counter * _buffer, os.SEEK_END) + except IOError: # either file is too small, or too many lines requested + f.seek(0) + lines_found = f.readlines() + break + + lines_found = f.readlines() + + # we found enough lines, get out + # Removed this line because it was redundant the while will catch + # it, I left it for history + # if len(lines_found) > lines: + # break + + # decrement the block counter to get the + # next X bytes + block_counter -= 1 + + return lines_found[-lines:] \ No newline at end of file diff --git a/augur/cli/run.py b/augur/cli/run.py --- a/augur/cli/run.py +++ b/augur/cli/run.py @@ -4,187 +4,143 @@ """ from copy import deepcopy -import os, time, atexit, subprocess, click +import os, time, atexit, subprocess, click, atexit, logging, sys import multiprocessing as mp import gunicorn.app.base -from gunicorn.six import iteritems from gunicorn.arbiter import Arbiter -from augur.housekeeper.housekeeper import Housekeeper -from augur import logger +from augur.housekeeper import Housekeeper from augur.server import Server - from augur.cli.util import kill_processes -import time +from augur.application import Application + +logger = logging.getLogger("augur") @click.command("run") @click.option("--disable-housekeeper", is_flag=True, default=False, help="Turns off the housekeeper") @click.option("--skip-cleanup", is_flag=True, default=False, help="Disables the old process cleanup that runs before Augur starts") [email protected]_context -def cli(ctx, disable_housekeeper, skip_cleanup): +def cli(disable_housekeeper, skip_cleanup): """ Start Augur's backend server """ + augur_app = Application() + logger.info("Augur application initialized") if not skip_cleanup: - logger.info("Cleaning up old Augur processes. Just a moment please...") - ctx.invoke(kill_processes) + logger.debug("Cleaning up old Augur processes...") + kill_processes() time.sleep(2) else: - logger.info("Skipping cleanup processes.") - - def get_process_id(name): - """Return process ids found by name or command - """ - child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False) - response = child.communicate()[0] - return [int(pid) for pid in response.split()] + logger.debug("Skipping process cleanup") - app = ctx.obj + master = initialize_components(augur_app, disable_housekeeper) + logger.info('Starting Gunicorn server in the background...') + if not disable_housekeeper: + logger.info('Housekeeper update process logs will now take over.') + else: + logger.info("Gunicorn server logs will be written to gunicorn.log") + logger.info("Augur is still running...don't close this process!") + Arbiter(master).run() - mp.set_start_method('forkserver', force=True) +def initialize_components(augur_app, disable_housekeeper): master = None - manager = None broker = None housekeeper = None - - logger.info("Booting broker and its manager...") - manager = mp.Manager() - broker = manager.dict() - - controller = app.read_config('Workers') - worker_pids = [] worker_processes = [] + mp.set_start_method('forkserver', force=True) if not disable_housekeeper: - if not controller: - return + logger.info("Booting manager") + manager = mp.Manager() + + logger.info("Booting broker") + broker = manager.dict() + + housekeeper = Housekeeper(broker=broker, augur_app=augur_app) + + controller = augur_app.config.get_section('Workers') + for worker in controller.keys(): - if not controller[worker]['switch']: - continue - logger.info("Your config has the option set to automatically boot {} instances of the {}".format(controller[worker]['workers'], worker)) - pids = get_process_id("/bin/sh -c cd workers/{} && {}_start".format(worker, worker)) - worker_pids += pids - if len(pids) > 0: - worker_pids.append(pids[0] + 1) - pids.append(pids[0] + 1) - logger.info("Found and preparing to kill previous {} worker pids: {}".format(worker,pids)) - for pid in pids: - try: - os.kill(pid, 9) - except: - logger.info("Worker process {} already killed".format(pid)) - - @atexit.register - def exit(): - try: - for pid in worker_pids: - os.kill(pid, 9) - except: - logger.info("Worker process {} already killed".format(pid)) - for process in worker_processes: - logger.info("Shutting down worker process with pid: {} ...".format(process.pid)) - process.terminate() + if controller[worker]['switch']: + for i in range(controller[worker]['workers']): + logger.info("Booting {} #{}".format(worker, i + 1)) + worker_process = mp.Process(target=worker_start, name=f"{worker}_{i}", kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True) + worker_processes.append(worker_process) + worker_process.start() - if master is not None: - master.halt() - logger.info("Shutting down housekeeper updates...") - if housekeeper is not None: - housekeeper.shutdown_updates() - - # if hasattr(manager, "shutdown"): - # wait for the spawner and the worker threads to go down - # - if manager is not None: - manager.shutdown() - # check if it is still alive and kill it if necessary - # if manager._process.is_alive(): - manager._process.terminate() - - # Prevent multiprocessing's atexit from conflicting with gunicorn - logger.info("Killing main augur process with PID: {}".format(os.getpid())) - os.kill(os.getpid(), 9) - os._exit(0) + augur_app.manager = manager + augur_app.broker = broker + augur_app.housekeeper = housekeeper - if not disable_housekeeper: - logger.info("Booting housekeeper...") - jobs = deepcopy(app.read_config('Housekeeper', 'jobs')) - try: - housekeeper = Housekeeper( - jobs, - broker, - broker_host=app.read_config('Server', 'host'), - broker_port=app.read_config('Server', 'port'), - user=app.read_config('Database', 'user'), - password=app.read_config('Database', 'password'), - host=app.read_config('Database', 'host'), - port=app.read_config('Database', 'port'), - dbname=app.read_config('Database', 'name') - ) - except KeyboardInterrupt as e: - exit() - - logger.info("Housekeeper has finished booting.") - - if controller: - for worker in controller.keys(): - if controller[worker]['switch']: - for i in range(controller[worker]['workers']): - logger.info("Booting {} #{}".format(worker, i + 1)) - worker_process = mp.Process(target=worker_start, kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True) - worker_process.start() - worker_processes.append(worker_process) - - host = app.read_config('Server', 'host') - port = app.read_config('Server', 'port') - workers = int(app.read_config('Server', 'workers')) - timeout = int(app.read_config('Server', 'timeout')) - options = { - 'bind': '%s:%s' % (host, port), - 'workers': workers, - 'accesslog': '-', - 'access_log_format': '%(h)s - %(t)s - %(r)s', - 'timeout': timeout - } - logger.info('Starting server...') - master = Arbiter(AugurGunicornApp(options, manager=manager, broker=broker, housekeeper=housekeeper)).run() + atexit._clear() + atexit.register(exit, augur_app, worker_processes, master) + return AugurGunicornApp(augur_app.gunicorn_options, augur_app=augur_app) def worker_start(worker_name=None, instance_number=0, worker_port=None): - time.sleep(120 * instance_number) - destination = subprocess.DEVNULL try: - destination = open("workers/{}/worker_{}.log".format(worker_name, worker_port), "a+") - except IOError as e: - logger.error("Error opening log file for auto-started worker {}: {}".format(worker_name, e)) - process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT) - logger.info("{} booted.".format(worker_name)) + time.sleep(30 * instance_number) + destination = subprocess.DEVNULL + process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT) + logger.info("{} #{} booted.".format(worker_name,instance_number+1)) + except KeyboardInterrupt as e: + pass + +def exit(augur_app, worker_processes, master): + + logger.info("Shutdown started for this Gunicorn worker...") + augur_app.shutdown() + + if worker_processes: + for process in worker_processes: + logger.debug("Shutting down worker process with pid: {}...".format(process.pid)) + process.terminate() + + if master is not None: + logger.debug("Shutting down Gunicorn server") + master.halt() + master = None + + logger.info("Shutdown complete") + sys.exit(0) class AugurGunicornApp(gunicorn.app.base.BaseApplication): """ Loads configurations, initializes Gunicorn, loads server """ - def __init__(self, options=None, manager=None, broker=None, housekeeper=None): - self.options = options or {} - self.manager = manager - self.broker = broker - self.housekeeper = housekeeper + def __init__(self, options={}, augur_app=None): + self.options = options + self.augur_app = augur_app + self.manager = self.augur_app.manager + self.broker = self.augur_app.broker + self.housekeeper = self.augur_app.housekeeper + self.server = None + logger.debug(f"Gunicorn will start {self.options['workers']} worker processes") super(AugurGunicornApp, self).__init__() - # self.cfg.pre_request.set(pre_request) def load_config(self): """ Sets the values for configurations """ - config = dict([(key, value) for key, value in iteritems(self.options) - if key in self.cfg.settings and value is not None]) - for key, value in iteritems(config): + config = {key: value for key, value in self.options.items() + if key in self.cfg.settings and value is not None} + for key, value in config.items(): self.cfg.set(key.lower(), value) - def load(self): + def get_augur_app(self): """ Returns the loaded server """ - server = Server(manager=self.manager, broker=self.broker, housekeeper=self.housekeeper) - return server.app + self.load() + return self.server.augur_app + def load(self): + """ + Returns the loaded server + """ + if self.server is None: + try: + self.server = Server(augur_app=self.augur_app) + except Exception as e: + logger.error(f"An error occured when Gunicorn tried to load the server: {e}") + return self.server.app diff --git a/augur/cli/util.py b/augur/cli/util.py --- a/augur/cli/util.py +++ b/augur/cli/util.py @@ -5,43 +5,47 @@ import os import signal +import logging from subprocess import call, run +import time import psutil import click import pandas as pd import sqlalchemy as s -from augur import logger -from augur.cli.configure import default_config +from augur.cli import initialize_logging, pass_config, pass_application + +logger = logging.getLogger(__name__) @click.group('util', short_help='Miscellaneous utilities') def cli(): pass @cli.command('export-env') [email protected]_context -def export_env(ctx): +@pass_config +def export_env(config): """ Exports your GitHub key and database credentials """ - app = ctx.obj export_file = open(os.getenv('AUGUR_EXPORT_FILE', 'augur_export_env.sh'), 'w+') export_file.write('#!/bin/bash') export_file.write('\n') env_file = open(os.getenv('AUGUR_ENV_FILE', 'docker_env.txt'), 'w+') - for env_var in app.env_config.items(): - export_file.write('export ' + env_var[0] + '="' + str(env_var[1]) + '"\n') - env_file.write(env_var[0] + '=' + str(env_var[1]) + '\n') + for env_var in config.get_env_config().items(): + if "LOG" not in env_var[0]: + logger.info(f"Exporting {env_var[0]}") + export_file.write('export ' + env_var[0] + '="' + str(env_var[1]) + '"\n') + env_file.write(env_var[0] + '=' + str(env_var[1]) + '\n') export_file.close() env_file.close() @cli.command('kill') [email protected]_context -def kill_processes(ctx): +@initialize_logging +def cli_kill_processes(): """ Terminates all currently running backend Augur processes, including any workers. Will only work in a virtual environment. """ @@ -49,14 +53,63 @@ def kill_processes(ctx): if processes != []: for process in processes: if process.pid != os.getpid(): - # logger.info(f"Killing {process.pid}: {' '.join(process.info['cmdline'][1:])}") + logger.info(f"Terminating process {process.pid}") + try: + process.send_signal(signal.SIGTERM) + logger.info(f"sending SIGTERM Signal to {process.pid}") + except psutil.NoSuchProcess as e: + pass + + logger.info(f"Waiting to check if processes terminated.") + + time.sleep(15) + logger.info(f"Checking on process termination.") + + processes = get_augur_processes() + + if processes != []: + for process in processes: + + if process.pid != os.getpid(): logger.info(f"Killing process {process.pid}") + try: + process.send_signal(signal.SIGKILL) + logger.info(f"sending SIGKILL Signal to {process.pid}") + except psutil.NoSuchProcess as e: + pass + +def kill_processes(): + logger = logging.getLogger("augur") + processes = get_augur_processes() + if processes != []: + for process in processes: + if process.pid != os.getpid(): + logger.info(f"Terminating process {process.pid}") try: process.send_signal(signal.SIGTERM) + logger.info(f"sending SIGTERM Signal to {process.pid}") + except psutil.NoSuchProcess as e: + logger.warning(e) + logger.info(f"Waiting to check if processes terminated.") + + time.sleep(15) + logger.info(f"Checking on process termination.") + + processes = get_augur_processes() + + if processes != []: + for process in processes: + if process.pid != os.getpid(): + logger.info(f"Killing process {process.pid}") + logger.info(f"Killing process {process.pid}") + try: + process.send_signal(signal.SIGKILL) + logger.info(f"sending SIGKILL Signal to {process.pid}") except psutil.NoSuchProcess as e: pass @cli.command('list',) +@initialize_logging def list_processes(): """ Outputs the name and process ID (PID) of all currently running backend Augur processes, including any workers. Will only work in a virtual environment. @@ -78,13 +131,11 @@ def get_augur_processes(): return processes @cli.command('repo-reset') [email protected]_context -def repo_reset(ctx): +@pass_application +def repo_reset(augur_app): """ Refresh repo collection to force data collection """ - app = ctx.obj - - app.database.execute("UPDATE augur_data.repo SET repo_path = NULL, repo_name = NULL, repo_status = 'New'; TRUNCATE augur_data.commits CASCADE; ") + augur_app.database.execute("UPDATE augur_data.repo SET repo_path = NULL, repo_name = NULL, repo_status = 'New'; TRUNCATE augur_data.commits CASCADE; ") logger.info("Repos successfully reset") diff --git a/augur/config.py b/augur/config.py new file mode 100644 --- /dev/null +++ b/augur/config.py @@ -0,0 +1,349 @@ +import os +import json +import logging + +ENVVAR_PREFIX = "AUGUR_" + +default_config = { + "version": 1, + "Database": { + "name": "augur", + "host": "localhost", + "key": "key", + "password": "augur", + "port": 5432, + "user": "augur", + "gitlab_api_key":"gitlab_api_key" + }, + "Housekeeper": { + "jobs": [ + { + "all_focused": 1, + "delay": 150000, + "given": [ + "github_url" + ], + "model": "issues", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "github_url" + ], + "model": "pull_request_commits", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "github_url" + ], + "model": "repo_info", + "repo_group_id": 0 + }, + { + "delay": 150000, + "given": [ + "repo_group" + ], + "model": "commits", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "github_url" + ], + "model": "pull_requests", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "contributors", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "insights", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "badges", + "repo_group_id": 0 + }, + { + "delay": 1000000, + "given": [ + "git_url" + ], + "model": "value", + "repo_group_id": 0 + }, + { + "delay": 100000, + "given": [ + "github_url" + ], + "model": "pull_request_files", + "repo_group_id": 0 + }, + { + "delay": 100000, + "given": [ + "github_url" + ], + "model": "releases", + "repo_group_id": 0 + } + ] + }, + "Workers": { + "facade_worker": { + "port": 50100, + "repo_directory": "repos/", + "switch": 1, + "workers": 1 + }, + "github_worker": { + "port": 50200, + "switch": 1, + "workers": 1 + }, + "insight_worker": { + "port": 50300, + "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", + "reviews": "pull_requests", "contributors-new": "new_contributors"}, + "confidence_interval": 95, + "contamination": 0.041, + "switch": 0, + "workers": 1, + "training_days": 365, + "anomaly_days": 2 + }, + "linux_badge_worker": { + "port": 50400, + "switch": 1, + "workers": 1 + }, + "metric_status_worker": { + "port": 50500, + "switch": 0, + "workers": 1 + }, + "pull_request_worker": { + "port": 50600, + "switch": 1, + "workers": 1 + }, + "repo_info_worker": { + "port": 50700, + "switch": 1, + "workers": 1 + }, + "value_worker": { + "port": 50800, + "scc_bin": "scc", + "switch": 0, + "workers": 1 + }, + "contributor_worker": { + "port": 50900, + "switch": 1, + "workers": 1 + }, + "gitlab_issues_worker": { + "port": 51000, + "switch": 1, + "workers": 1 + }, + "release_worker": { + "port": 51100, + "switch": 1, + "workers": 1 + }, + "gitlab_merge_request_worker": { + "port": 51200, + "switch": 1, + "workers": 1 + } + }, + "Facade": { + "check_updates": 1, + "clone_repos": 1, + "create_xlsx_summary_files": 1, + "delete_marked_repos": 0, + "fix_affiliations": 1, + "force_analysis": 1, + "force_invalidate_caches": 1, + "force_updates": 1, + "limited_run": 0, + "multithreaded": 0, + "nuke_stored_affiliations": 0, + "pull_repos": 1, + "rebuild_caches": 1, + "run_analysis": 1 + }, + "Server": { + "cache_expire": "3600", + "host": "0.0.0.0", + "port": "5000", + "workers": 4, + "timeout": 60 + }, + "Frontend": { + "host": "0.0.0.0", + "port": "5000" + }, + "Logging": { + "logs_directory": "logs/", + "log_level": "INFO", + "verbose": 0, + "quiet": 0, + "debug": 0 + } + } + +logger = logging.getLogger(__name__) + +class AugurConfig(): + """docstring for AugurConfig""" + def __init__(self, root_augur_dir, given_config={}): + self._default_config_file_name = 'augur.config.json' + self._root_augur_dir = root_augur_dir + self._default_config = default_config + self._env_config = {} + self.load_config() + self.version = self.get_version() + self._config.update(given_config) + + def get_section(self, section_name): + try: + return self._config[section_name] + except KeyError as e: + if not self.using_default_config: + logger.warn(f"{section_name} not found in loaded config. Checking default config") + try: + return self._default_config[section_name] + except KeyError as e: + logger.error(f"No defaults found for {section_name}") + raise(e) + else: + logger.debug(f"Already using default config, skipping check for {section_name}") + + def get_version(self): + try: + return self._config["version"] + except KeyError as e: + logger.warning("No config version found. Setting version to 0.") + return 0 + + def get_value(self, section_name, value): + try: + return self._config[section_name][value] + except KeyError as e: + if not self.using_default_config: + logger.warn(f"{section_name}:{value} not found in loaded config. Checking default config") + try: + return self._default_config[section_name][value] + except KeyError as e: + logger.error(f"No defaults found for {section_name}:{value}") + raise(e) + else: + logger.debug(f"Already using default config, skipping check for {section_name}:{value}") + + def load_config(self): + self._config = None + self.using_default_config = False + + logger.debug("Attempting to load config file") + try: + config_file_path = self.discover_config_file() + try: + with open(config_file_path, 'r+') as config_file_handle: + self._config = json.loads(config_file_handle.read()) + logger.debug("Config file loaded successfully") + except json.decoder.JSONDecodeError as e: + logger.warning("Unable to parse config. Using default configuration") + self.using_default_config = True + self._config = default_config + except AugurConfigFileNotFoundException as e: + logger.warning("Config file not found. Using default configuration") + self.using_default_config = True + self._config = default_config + + self.load_env_configuration() + + def discover_config_file(self): + default_config_path = self._root_augur_dir + '/' + self._default_config_file_name + config_file_path = None + + config_locations = [self._default_config_file_name, default_config_path + , f"/opt/augur/{self._default_config_file_name}"] + if os.getenv('AUGUR_CONFIG_FILE', None) is not None: + config_file_path = os.getenv('AUGUR_CONFIG_FILE') + else: + for location in config_locations: + try: + f = open(location, "r+") + config_file_path = os.path.abspath(location) + f.close() + break + except FileNotFoundError: + pass + if config_file_path: + return config_file_path + else: + raise(AugurConfigFileNotFoundException(message=f"{self._default_config_file_name} not found", errors=None)) + + def load_env_configuration(self): + self.set_env_value(section='Database', name='key', environment_variable='AUGUR_GITHUB_API_KEY') + self.set_env_value(section='Database', name='host', environment_variable='AUGUR_DB_HOST') + self.set_env_value(section='Database', name='name', environment_variable='AUGUR_DB_NAME') + self.set_env_value(section='Database', name='port', environment_variable='AUGUR_DB_PORT') + self.set_env_value(section='Database', name='user', environment_variable='AUGUR_DB_USER') + self.set_env_value(section='Database', name='password', environment_variable='AUGUR_DB_PASSWORD') + self.set_env_value(section='Logging', name='log_level', environment_variable='AUGUR_LOG_LEVEL') + self.set_env_value(section='Logging', name='quiet', environment_variable='AUGUR_LOG_QUIET') + self.set_env_value(section='Logging', name='debug', environment_variable='AUGUR_LOG_DEBUG') + self.set_env_value(section='Logging', name='verbose', environment_variable='AUGUR_LOG_VERBOSE') + + def set_env_value(self, section, name, environment_variable, sub_config=None): + """ + Sets names and values of specified config section according to their environment variables. + """ + # using sub_config lets us grab values from nested config blocks + if sub_config is None: + sub_config = self._config + + env_value = os.getenv(environment_variable) + + if env_value is not None: + self._env_config[environment_variable] = env_value + sub_config[section][name] = env_value + # logger.info(f"{section}:[\"{name}\"] set to {env_value} by: {environment_variable}") + else: + self._env_config[environment_variable] = self.get_value(section, name) + + def get_raw_config(self): + return self._config + + def get_default_config(self): + return self._default_config + + def get_env_config(self): + return self._env_config + +class AugurConfigFileNotFoundException(Exception): + def __init__(self, message, errors): + super().__init__(message) diff --git a/augur/housekeeper/housekeeper.py b/augur/housekeeper.py similarity index 81% rename from augur/housekeeper/housekeeper.py rename to augur/housekeeper.py --- a/augur/housekeeper/housekeeper.py +++ b/augur/housekeeper.py @@ -1,69 +1,85 @@ """ Keeps data up to date """ +import coloredlogs +from copy import deepcopy import logging, os, time, requests -from multiprocessing import Process +import logging.config +from multiprocessing import Process, get_start_method from sqlalchemy.ext.automap import automap_base import sqlalchemy as s import pandas as pd from sqlalchemy import MetaData -logging.basicConfig(filename='housekeeper.log') + +from augur.logging import AugurLogging + +import warnings +warnings.filterwarnings('ignore') + +logger = logging.getLogger(__name__) class Housekeeper: - def __init__(self, jobs, broker, broker_host, broker_port, user, password, host, port, dbname): + def __init__(self, broker, augur_app): + logger.info("Booting housekeeper") - self.broker_host = broker_host - self.broker_port = broker_port + self._processes = [] + self.augur_logging = augur_app.logging + self.jobs = deepcopy(augur_app.config.get_value("Housekeeper", "jobs")) + self.broker_host = augur_app.config.get_value("Server", "host") + self.broker_port = augur_app.config.get_value("Server", "port") self.broker = broker - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - user, password, host, port, dbname - ) - - dbschema='augur_data' - self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) + self.db = augur_app.database + self.helper_db = augur_app.operations_database helper_metadata = MetaData() helper_metadata.reflect(self.helper_db, only=['worker_job']) HelperBase = automap_base(metadata=helper_metadata) HelperBase.prepare() - self.job_table = HelperBase.classes.worker_job.__table__ repoUrlSQL = s.sql.text(""" SELECT repo_git FROM repo """) - rs = pd.read_sql(repoUrlSQL, self.db, params={}) - all_repos = rs['repo_git'].values.tolist() # List of tasks that need periodic updates - self.__updatable = self.prep_jobs(jobs) + self.schedule_updates() + + def schedule_updates(self): + """ + Starts update processes + """ + self.prep_jobs() + self.augur_logging.initialize_housekeeper_logging_listener() + logger.info("Scheduling update processes") + for job in self.jobs: + process = Process(target=self.updater_process, name=job["model"], args=(self.broker_host, self.broker_port, self.broker, job, (self.augur_logging.housekeeper_job_config, self.augur_logging.get_config()))) + self._processes.append(process) + process.start() - self.__processes = [] - self.__updater() @staticmethod - def updater_process(broker_host, broker_port, broker, job): + def updater_process(broker_host, broker_port, broker, job, logging_config): """ Controls a given plugin's update process - :param name: name of object to be updated - :param delay: time needed to update - :param shared: shared object that is to also be updated + """ - + logging.config.dictConfig(logging_config[0]) + logger = logging.getLogger(f"augur.jobs.{job['model']}") + coloredlogs.install(level=logging_config[1]["log_level"], logger=logger, fmt=logging_config[1]["format_string"]) + + if logging_config[1]["quiet"]: + logger.disabled + if 'repo_group_id' in job: repo_group_id = job['repo_group_id'] - logging.info('Housekeeper spawned {} model updater process for repo group id {} with PID {}\n'.format(job['model'], repo_group_id, os.getpid())) + logger.info('Housekeeper spawned {} model updater process for repo group id {}'.format(job['model'], repo_group_id)) else: repo_group_id = None - logging.info('Housekeeper spawned {} model updater process for repo ids {} with PID {}\n'.format(job['model'], job['repo_ids'], os.getpid())) + logger.info('Housekeeper spawned {} model updater process for repo ids {}'.format(job['model'], job['repo_ids'])) try: compatible_worker_found = False @@ -76,10 +92,10 @@ def updater_process(broker_host, broker_port, broker, job): time.sleep(3) continue - logging.info("Housekeeper recognized that the broker has a worker that " + - "can handle the {} model... beginning to distribute maintained tasks\n".format(job['model'])) + logger.info("Housekeeper recognized that the broker has a worker that " + + "can handle the {} model... beginning to distribute maintained tasks".format(job['model'])) while True: - logging.info('Housekeeper updating {} model with given {}...\n'.format( + logger.info('Housekeeper updating {} model with given {}...'.format( job['model'], job['given'][0])) if job['given'][0] == 'git_url' or job['given'][0] == 'github_url': @@ -100,9 +116,9 @@ def updater_process(broker_host, broker_port, broker, job): requests.post('http://{}:{}/api/unstable/task'.format( broker_host,broker_port), json=task, timeout=10) except Exception as e: - logging.info("Error encountered: {}\n".format(e)) + logger.error("Error encountered: {}".format(e)) - logging.info(task) + logger.debug(task) time.sleep(15) @@ -119,61 +135,33 @@ def updater_process(broker_host, broker_port, broker, job): requests.post('http://{}:{}/api/unstable/task'.format( broker_host,broker_port), json=task, timeout=10) except Exception as e: - logging.info("Error encountered: {}\n".format(e)) + logger.error("Error encountered: {}".format(e)) - logging.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)\n".format(len(job['repos']))) + logger.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)".format(len(job['repos']))) time.sleep(job['delay']) - - except KeyboardInterrupt: - os.kill(os.getpid(), 9) - os._exit(0) - except: - raise - def __updater(self, jobs=None): - """ - Starts update processes - """ - logging.info("Starting update processes...") - if jobs is None: - jobs = self.__updatable - for job in jobs: - up = Process(target=self.updater_process, args=(self.broker_host, self.broker_port, self.broker, job), daemon=True) - up.start() - self.__processes.append(up) - - def update_all(self): - """ - Updates all plugins - """ - for updatable in self.__updatable: - updatable['update']() - - def schedule_updates(self): - """ - Schedules updates - """ - # don't use this, - logging.debug('Scheduling updates...') - self.__updater() + except KeyboardInterrupt as e: + pass def join_updates(self): """ Join to the update processes """ - for process in self.__processes: + for process in self._processes: + logger.debug(f"Joining {process.name} update process") process.join() def shutdown_updates(self): """ Ends all running update processes """ - for process in self.__processes: + for process in self._processes: + # logger.debug(f"Terminating {process.name} update process") process.terminate() - def prep_jobs(self, jobs): - - for job in jobs: + def prep_jobs(self): + logger.info("Preparing housekeeper jobs") + for job in self.jobs: if 'repo_group_id' in job or 'repo_ids' in job: # If RG id is 0 then it just means to query all repos where_and = 'AND' if job['model'] == 'issues' and 'repo_group_id' in job else 'WHERE' @@ -269,7 +257,7 @@ def prep_jobs(self, jobs): reorganized_repos = pd.read_sql(repo_url_sql, self.db, params={}) if len(reorganized_repos) == 0: - logging.info("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql)) + logger.warning("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql)) job['repos'] = [] continue @@ -290,7 +278,7 @@ def prep_jobs(self, jobs): 'oauth_id': 0 } result = self.helper_db.execute(self.job_table.insert().values(job_tuple)) - logging.info("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple)) + logger.debug("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple)) # If a last id is not recorded, start from beginning of repos # (first id is not necessarily 0) @@ -347,5 +335,3 @@ def prep_jobs(self, jobs): job['repos'] = rs # time.sleep(120) - return jobs - diff --git a/augur/logging.py b/augur/logging.py new file mode 100644 --- /dev/null +++ b/augur/logging.py @@ -0,0 +1,305 @@ +import logging +import logging.config +import logging.handlers +from logging import FileHandler, StreamHandler, Formatter +from multiprocessing import Process, Queue, Event, current_process +from time import sleep +import os +from pathlib import Path +import atexit +import shutil +import coloredlogs +from copy import deepcopy + +from augur import ROOT_AUGUR_DIRECTORY + +logger = logging.getLogger(__name__) + +class AugurLogging(): + + simple_format_string = "[%(process)d] %(name)s [%(levelname)s] %(message)s" + verbose_format_string = "%(asctime)s,%(msecs)dms [PID: %(process)d] %(name)s [%(levelname)s] %(message)s" + cli_format_string = "CLI: [%(module)s.%(funcName)s] [%(levelname)s] %(message)s" + config_format_string = "[%(levelname)s] %(message)s" + error_format_string = "%(asctime)s [PID: %(process)d] %(name)s [%(funcName)s() in %(filename)s:L%(lineno)d] [%(levelname)s]: %(message)s" + + @staticmethod + def get_log_directories(augur_config, reset_logfiles=True): + LOGS_DIRECTORY = augur_config.get_value("Logging", "logs_directory") + + if LOGS_DIRECTORY[0] != "/": + LOGS_DIRECTORY = ROOT_AUGUR_DIRECTORY + "/" + LOGS_DIRECTORY + + if LOGS_DIRECTORY[-1] != "/": + LOGS_DIRECTORY += "/" + + if reset_logfiles is True: + try: + shutil.rmtree(LOGS_DIRECTORY) + except FileNotFoundError as e: + pass + + Path(LOGS_DIRECTORY).mkdir(exist_ok=True) + + return LOGS_DIRECTORY + + def __init__(self, disable_logs=False, reset_logfiles=True): + self.stop_event = None + self.LOGS_DIRECTORY = None + self.WORKER_LOGS_DIRECTORY = None + self.LOG_LEVEL = None + self.VERBOSE = None + self.QUIET = None + self.DEGBUG = None + + self.logfile_config = None + self.housekeeper_job_config = None + + self._reset_logfiles = reset_logfiles + + self.formatters = { + "simple": { + "class": "logging.Formatter", + "format": AugurLogging.simple_format_string + }, + "verbose": { + "class": "logging.Formatter", + "format": AugurLogging.verbose_format_string + }, + "cli": { + "class": "logging.Formatter", + "format": AugurLogging.cli_format_string + }, + "config": { + "class": "logging.Formatter", + "format": AugurLogging.config_format_string + }, + "error": { + "class": "logging.Formatter", + "format": AugurLogging.error_format_string + } + } + + self._configure_cli_logger() + + level = logging.INFO + config_handler = StreamHandler() + config_handler.setFormatter(Formatter(fmt=AugurLogging.config_format_string)) + config_handler.setLevel(level) + + config_initialization_logger = logging.getLogger("augur.config") + config_initialization_logger.setLevel(level) + config_initialization_logger.handlers = [] + config_initialization_logger.addHandler(config_handler) + config_initialization_logger.propagate = False + + coloredlogs.install(level=level, logger=config_initialization_logger, fmt=AugurLogging.config_format_string) + + if disable_logs: + self._disable_all_logging() + + + def _disable_all_logging(self): + for logger in ["augur", "augur.application", "augur.housekeeper", "augur.config", "augur.cli", "root"]: + lg = logging.getLogger(logger) + lg.disabled = True + + def _configure_cli_logger(self): + cli_handler = StreamHandler() + cli_handler.setLevel(logging.INFO) + + cli_logger = logging.getLogger("augur.cli") + cli_logger.setLevel(logging.INFO) + cli_logger.handlers = [] + cli_logger.addHandler(cli_handler) + cli_logger.propagate = False + + coloredlogs.install(level=logging.INFO, logger=cli_logger, fmt=AugurLogging.cli_format_string) + + def _set_config(self, augur_config): + self.LOGS_DIRECTORY = AugurLogging.get_log_directories(augur_config, self._reset_logfiles) + self.LOG_LEVEL = augur_config.get_value("Logging", "log_level") + self.QUIET = int(augur_config.get_value("Logging", "quiet")) + self.DEBUG = int(augur_config.get_value("Logging", "debug")) + self.VERBOSE = int(augur_config.get_value("Logging", "verbose")) + # self.JOB_NAMES = [job["model"] for job in deepcopy(augur_config.get_value("Housekeeper", "jobs"))] + + if self.QUIET: + self._disable_all_logging() + + if self.DEBUG: + self.LOG_LEVEL = "DEBUG" + self.VERBOSE = True + + if self.VERBOSE: + self.FORMATTER = "verbose" + else: + self.FORMATTER = "simple" + self.format_string = self.formatters[self.FORMATTER]["format"] + + def configure_logging(self, augur_config): + self._set_config(augur_config) + self._configure_logfiles() + self._configure_cli_logger() + self._configure_gunicorn_logging() + logger.debug("Loggers are fully configured") + + def _configure_logfiles(self): + self.logfile_config = { + "version": 1, + "disable_existing_loggers": True, + "formatters": self.formatters, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "formatter": self.FORMATTER, + "level": self.LOG_LEVEL + }, + "logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "augur.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "errorfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "augur.err", + "mode": "a", + "level": logging.WARNING, + "formatter": "error" + }, + "server_logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "gunicorn.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "housekeeper_logfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "housekeeper.log", + "mode": "a", + "level": self.LOG_LEVEL, + "formatter": self.FORMATTER + }, + "housekeeper_errorfile": { + "class": "logging.FileHandler", + "filename": self.LOGS_DIRECTORY + "housekeeper.err", + "mode": "a", + "level": logging.WARNING, + "formatter": "error", + }, + }, + "loggers": { + "augur": { + "handlers": ["console", "logfile", "errorfile"], + "level": self.LOG_LEVEL + }, + "augur.server": { + "handlers": ["server_logfile"], + "level": self.LOG_LEVEL, + "propagate": False + }, + "augur.housekeeper": { + "handlers": ["housekeeper_logfile", "housekeeper_errorfile"], + "level": self.LOG_LEVEL, + }, + "augur.jobs": { + "handlers": ["housekeeper_logfile", "housekeeper_errorfile", "logfile", "errorfile"], + "level": self.LOG_LEVEL, + "propagate": False + } + }, + "root": { + "handlers": [], + "level": self.LOG_LEVEL + } + } + + logging.config.dictConfig(self.logfile_config) + for logger_name in ["augur", "augur.housekeeper", "augur.jobs"]: + coloredlogs.install(logger=logging.getLogger(logger_name), level=self.LOG_LEVEL, fmt=self.format_string) + + logger.debug("Logfiles initialized") + logger.debug("Logs will be written to: " + self.LOGS_DIRECTORY) + + def initialize_housekeeper_logging_listener(self): + queue = Queue() + self.housekeeper_job_config = { + "version": 1, + "disable_existing_loggers": True, + "formatters": self.formatters, + "handlers": { + "queue": { + "class": "logging.handlers.QueueHandler", + "queue": queue + } + }, + "root": { + "handlers": ["queue"], + "level": self.LOG_LEVEL + } + } + + stop_event = Event() + self.lp = Process(target=logging_listener_process, name='housekeeper_logging_listener', + args=(queue, stop_event, self.logfile_config)) + self.lp.start() + sleep(2) # just to let it fully start up + self.stop_event = stop_event + logger.debug("Houseekeeper logging listener initialized") + + def get_config(self): + return { + "log_level": self.LOG_LEVEL, + "quiet": self.QUIET, + "verbose": self.VERBOSE, + "debug": self.DEBUG, + "format_string": self.format_string + } + + def _configure_gunicorn_logging(self): + gunicorn_log_file = self.LOGS_DIRECTORY + "gunicorn.log" + self.gunicorn_logging_options = { + "errorlog": gunicorn_log_file, + "accesslog": gunicorn_log_file, + "loglevel": self.LOG_LEVEL, + "capture_output": False + } + +def logging_listener_process(queue, stop_event, config): + """ + This could be done in the main process, but is just done in a separate + process for illustrative purposes. + + This initialises logging according to the specified configuration, + starts the listener and waits for the main process to signal completion + via the event. The listener is then stopped, and the process exits. + """ + logging.config.dictConfig(config) + listener = logging.handlers.QueueListener(queue, AugurLoggingHandler()) + listener.start() + try: + stop_event.wait() + except KeyboardInterrupt: + pass + finally: + listener.stop() + +class AugurLoggingHandler: + """ + A simple handler for logging events. It runs in the listener process and + dispatches events to loggers based on the name in the received record, + which then get dispatched, by the logging system, to the handlers + configured for those loggers. + """ + + def handle(self, record): + if record.name == "root": + logger = logging.getLogger() + else: + logger = logging.getLogger(record.name) + + record.processName = '%s (for %s)' % (current_process().name, record.processName) + logger.handle(record) diff --git a/augur/metrics/__init__.py b/augur/metrics/__init__.py --- a/augur/metrics/__init__.py +++ b/augur/metrics/__init__.py @@ -1 +1,38 @@ -from .metrics import Metrics \ No newline at end of file +import os +import glob +import sys +import inspect +import types +import importlib +import logging + +logger = logging.getLogger(__name__) + +class Metrics(): + def __init__(self, app): + logger.debug("Loading metrics") + self.database = app.database + self.spdx_db = app.spdx_database + + self.models = [] #TODO: standardize this + for filename in glob.iglob("augur/metrics/**"): + file_id = get_file_id(filename) + if not file_id.startswith('__') and filename.endswith('.py') and file_id != "metrics": + self.models.append(file_id) + + for model in self.models: + importlib.import_module(f"augur.metrics.{model}") + add_metrics(self, f"augur.metrics.{model}") + +def get_file_id(path): + return os.path.splitext(os.path.basename(path))[0] + +def add_metrics(metrics, module_name): + # find all unbound endpoint functions objects + # (ones that have metadata) defined the given module_name + # and bind them to the metrics class + for name, obj in inspect.getmembers(sys.modules[module_name]): + if inspect.isfunction(obj) == True: + if hasattr(obj, 'is_metric') == True: + setattr(metrics, name, types.MethodType(obj, metrics)) + diff --git a/augur/metrics/insight.py b/augur/metrics/insight.py --- a/augur/metrics/insight.py +++ b/augur/metrics/insight.py @@ -6,8 +6,7 @@ import pandas as pd from augur.util import register_metric - -@register_metric() +@register_metric(type="repo_group_only") def top_insights(self, repo_group_id, num_repos=6): """ Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date) diff --git a/augur/metrics/metrics.py b/augur/metrics/metrics.py deleted file mode 100644 --- a/augur/metrics/metrics.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import glob -import sys -import inspect -import types -import importlib -from augur import logger - -class Metrics(): - def __init__(self, app): - self.database = app.database - self.spdx_db = app.spdx_db - - models = [] #TODO: standardize this - for filename in glob.iglob("augur/metrics/**"): - file_id = get_file_id(filename) - if not file_id.startswith('__') and filename.endswith('.py') and file_id != "metrics": - models.append(file_id) - - for model in models: - importlib.import_module(f"augur.metrics.{model}") - - for model in models: - add_metrics(self, f"augur.metrics.{model}") - -def get_file_id(path): - return os.path.splitext(os.path.basename(path))[0] - -def add_metrics(metrics, module_name): - # find all unbound endpoint functions objects - # (ones that have metadata) defined the given module_name - # and bind them to the metrics class - # Derek are you proud of me - for name, obj in inspect.getmembers(sys.modules[module_name]): - if inspect.isfunction(obj) == True: - if hasattr(obj, 'metadata') == True: - setattr(metrics, name, types.MethodType(obj, metrics)) - diff --git a/augur/metrics/release.py b/augur/metrics/release.py new file mode 100644 --- /dev/null +++ b/augur/metrics/release.py @@ -0,0 +1,88 @@ +""" +Metrics that provide data about releases +""" + +import datetime +import sqlalchemy as s +import pandas as pd +from augur.util import register_metric + +@register_metric() +def releases(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None): + """ Returns a timeseris of new reviews or pull requests opened + + :param repo_group_id: The repository's repo_group_id + :param repo_id: The repository's repo_id, defaults to None + :param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day' + :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' + :param end_date: Specifies the end date, defaults to datetime.now() + :return: DataFrame of new releases/period + """ + if not begin_date: + begin_date = '1970-1-1' + if not end_date: + end_date = datetime.datetime.now().strftime('%Y-%m-%d') + + if not repo_id: + reviews_SQL = s.sql.text(""" + SELECT + res.repo_name, + res.release_id, + res.release_name, + res.release_description, + res.release_author, + res.release_created_at, + res.release_published_at, + res.release_updated_at, + res.release_is_draft, + res.release_is_prerelease, + res.release_tag_name, + res.release_url, + COUNT(res) + FROM ( + SELECT + releases.* + repo.repo_name + FROM + releases LEFT JOIN repo ON releases.repo_id = repo.repo_id + WHERE + repo.repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id ) + ) as res + GROUP BY releases.repo_id, releases.release_id + ORDER BY releases.release_published_at DESC + """) + + results = pd.read_sql(reviews_SQL, self.database, + params={'period': period, 'repo_group_id': repo_group_id, + 'begin_date': begin_date, 'end_date': end_date }) + return results + + else: + reviews_SQL = s.sql.text(""" + SELECT + repo.repo_name, + releases.release_id, + releases.release_name, + releases.release_description, + releases.release_author, + releases.release_created_at, + releases.release_published_at, + releases.release_updated_at, + releases.release_is_draft, + releases.release_is_prerelease, + releases.release_tag_name, + releases.release_url, + COUNT(releases) + FROM + releases LEFT JOIN repo ON releases.repo_id = repo.repo_id + GROUP BY repo.repo_id, releases.release_id + ORDER BY releases.release_published_at DESC + """) + + results = pd.read_sql(reviews_SQL, self.database, + params={'period': period, 'repo_id': repo_id, + 'begin_date': begin_date, 'end_date': end_date}) + return results + +def create_release_metrics(metrics): + add_metrics(metrics, __name__) \ No newline at end of file diff --git a/augur/metrics/repo_meta.py b/augur/metrics/repo_meta.py --- a/augur/metrics/repo_meta.py +++ b/augur/metrics/repo_meta.py @@ -5,9 +5,12 @@ import datetime import sqlalchemy as s import pandas as pd -from augur import logger -from augur.util import register_metric import math +import logging + +from augur.util import register_metric + +logger = logging.getLogger("augur") @register_metric() def code_changes(self, repo_group_id, repo_id=None, period='week', begin_date=None, end_date=None): @@ -321,7 +324,7 @@ def languages(self, repo_group_id, repo_id=None): results = pd.read_sql(languages_SQL, self.database, params={'repo_id': repo_id}) return results -@register_metric() +@register_metric(type="license") def license_files(self, license_id, spdx_binary, repo_group_id, repo_id=None,): """Returns the files related to a license diff --git a/augur/models/__init__.py b/augur/models/__init__.py deleted file mode 100644 --- a/augur/models/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from sqlalchemy.orm import sessionmaker -from .user import User -from .repo import Repo, RepoGroup - - -__all__ = ['User', 'RepoGroup', 'Repo'] \ No newline at end of file diff --git a/augur/models/common.py b/augur/models/common.py deleted file mode 100644 --- a/augur/models/common.py +++ /dev/null @@ -1,2 +0,0 @@ -from sqlalchemy.ext.declarative import declarative_base -Base = declarative_base() \ No newline at end of file diff --git a/augur/models/repo.py b/augur/models/repo.py deleted file mode 100644 --- a/augur/models/repo.py +++ /dev/null @@ -1,48 +0,0 @@ -import datetime -from sqlalchemy import Table, ForeignKey, Column, Integer, String, DateTime -from sqlalchemy.orm import relationship -from .common import Base -from .user import user_has_repo_group - -repo_group_has_project = Table('repo_group_has_project', - Base.metadata, - Column('repo_group_id', ForeignKey('repo_group.id'), primary_key=True), - Column('repo_id', ForeignKey('repo.url'), primary_key=True), -) - -class Repo(Base): - """ - The Repo object models a VCS repository - """ - __tablename__ = 'repo' - - # Keys - url = Column(String(1024), primary_key=True) - vcs = Column(String(64), default='git') - - # Fields - created_at = Column(DateTime, default=datetime.datetime.utcnow) - - # Foreign Keys - repo_groups_member_of = relationship('RepoGroup', secondary=repo_group_has_project, back_populates='projects') - - def __repr__(self): - return f"<Repo(giturl='{self.password}')>" - - -class RepoGroup(Base): - """ - The RepoGroup class models lists of projects that a user wants to keep track of - """ - __tablename__ = 'repo_group' - - # Keys - id = Column(Integer, primary_key=True) - name = Column(String(128)) - - # Fields - created_at = Column(DateTime, default=datetime.datetime.utcnow) - - # Foreign Keys - projects = relationship('Repo', secondary=repo_group_has_project, back_populates='repo_groups_member_of') - users_of = relationship('User', secondary=user_has_repo_group, back_populates='repo_groups') \ No newline at end of file diff --git a/augur/models/user.py b/augur/models/user.py deleted file mode 100644 --- a/augur/models/user.py +++ /dev/null @@ -1,61 +0,0 @@ -import datetime -from sqlalchemy import Table, ForeignKey, Column, Integer, String, DateTime, Boolean -from sqlalchemy.orm import relationship -from sqlalchemy.ext.hybrid import hybrid_property -from .common import Base -from werkzeug.security import generate_password_hash, check_password_hash -from flask_login import UserMixin - -user_has_repo_group = Table('user_has_repo_group', - Base.metadata, - Column('user_id', ForeignKey('user.id'), primary_key=True), - Column('repo_group_id', ForeignKey('repo_group.id'), primary_key=True), -) - -class User(Base): - """ - The User object models users in the database. - """ - __tablename__ = 'user' - - # Keys - id = Column(Integer, primary_key=True) - username = Column(String(64), unique=True, nullable=False) - email = Column(String(64), unique=True, nullable=False) - - # Fields - password_hash = Column(String(128)) - email_confirmation_token = Column(String(128), nullable=True) - created_at = Column(DateTime, default=datetime.datetime.utcnow) - password_updated_at = Column(DateTime, default=datetime.datetime.utcnow) - last_login_at = Column(DateTime, nullable=True) - authenticated = Column(Boolean, default=False) - active = Column(Boolean, default=True) - administrator = Column(Boolean, default=False) - - # Foreign Keys - repo_groups = relationship('RepoGroup', secondary=user_has_repo_group, back_populates='users_of') - - def get_id(self): - return self.id - - def __repr__(self): - return f"<User(username='{self.username}', email='{self.email}')>" - - @hybrid_property - def password(self): - return self.password_hash - - @password.setter - def password(self, password): - self.password_hash = generate_password_hash(password) - - def check_password(self, password): - return check_password_hash(self.password_hash, password) - - def is_authenticated(self): - return self.authenticated - - def is_active(self): - # False as we do not support annonymity - return self.active diff --git a/augur/routes/__init__.py b/augur/routes/__init__.py --- a/augur/routes/__init__.py +++ b/augur/routes/__init__.py @@ -1,35 +1,34 @@ + +import logging import importlib import os import glob +import sys +import inspect -from augur import logger +logger = logging.getLogger(__name__) def get_route_files(): route_files = [] - metric_route_files = [] def get_file_id(path): return os.path.splitext(os.path.basename(path))[0] - for filename in glob.iglob("**/routes/*"): + for filename in glob.iglob("augur/routes/*"): file_id = get_file_id(filename) if not file_id.startswith('__') and filename.endswith('.py'): route_files.append(file_id) - for filename in glob.iglob("**/routes/metrics/*"): - file_id = get_file_id(filename) - if not file_id.startswith('__') and filename.endswith('.py'): - metric_route_files.append(file_id) - - return route_files, metric_route_files + return route_files -route_files, metric_route_files = get_route_files() +route_files = get_route_files() def create_routes(server): for route_file in route_files: module = importlib.import_module('.' + route_file, 'augur.routes') module.create_routes(server) - for route_file in metric_route_files: - module = importlib.import_module('.' + route_file, 'augur.routes.metrics') - module.create_routes(server) + for name, obj in inspect.getmembers(server.augur_app.metrics): + if hasattr(obj, 'is_metric') == True: + if obj.metadata['type'] == "standard": + server.add_standard_metric(obj, obj.metadata['endpoint']) diff --git a/augur/routes/batch.py b/augur/routes/batch.py --- a/augur/routes/batch.py +++ b/augur/routes/batch.py @@ -10,9 +10,10 @@ from sqlalchemy import exc from flask import request, Response from augur.util import metric_metadata -from augur import logger import json +logger = logging.getLogger(__name__) + def create_routes(server): @server.app.route('/{}/batch'.format(server.api_version), methods=['GET', 'POST']) diff --git a/augur/routes/broker.py b/augur/routes/broker.py --- a/augur/routes/broker.py +++ b/augur/routes/broker.py @@ -9,6 +9,9 @@ import requests from flask import request, Response +logger = logging.getLogger(__name__) + +# TODO: not this... def worker_start(worker_name=None): process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True) @@ -26,12 +29,12 @@ def send_task(worker_proxy): j = r.json() if 'status' not in j: - logging.info("Worker: {}'s heartbeat did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) + logger.error("Worker: {}'s heartbeat did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) worker_proxy['status'] = 'Disconnected' return if j['status'] != 'alive': - logging.info("Worker: {} is busy, setting its status as so.\n".format(worker_id)) + logger.info("Worker: {} is busy, setting its status as so.\n".format(worker_id)) return # Want to check user-created job requests first @@ -43,16 +46,16 @@ def send_task(worker_proxy): new_task = maintain_queue.pop(0) else: - logging.info("Both queues are empty for worker {}\n".format(worker_id)) + logger.debug("Both queues are empty for worker {}\n".format(worker_id)) worker_proxy['status'] = 'Idle' return - logging.info("Worker {} is idle, preparing to send the {} task to {}\n".format(worker_id, new_task['display_name'], task_endpoint)) + logger.info("Worker {} is idle, preparing to send the {} task to {}\n".format(worker_id, new_task['display_name'], task_endpoint)) try: requests.post(task_endpoint, json=new_task) worker_proxy['status'] = 'Working' except: - logging.info("Sending Worker: {} a task did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) + logger.error("Sending Worker: {} a task did not return a response, setting worker status as 'Disconnected'\n".format(worker_id)) worker_proxy['status'] = 'Disconnected' # If the worker died, then restart it worker_start(worker_id.split('.')[len(worker_id.split('.')) - 2]) @@ -71,9 +74,9 @@ def task(): for given_component in list(task['given'].keys()): given.append(given_component) model = task['models'][0] - logging.info("Broker recieved a new user task ... checking for compatible workers for given: " + str(given) + " and model(s): " + str(model) + "\n") + logger.info("Broker recieved a new user task ... checking for compatible workers for given: " + str(given) + " and model(s): " + str(model) + "\n") - logging.info("Broker's list of all workers: {}\n".format(server.broker._getvalue().keys())) + logger.debug("Broker's list of all workers: {}\n".format(server.broker._getvalue().keys())) worker_found = False compatible_workers = {} @@ -83,7 +86,7 @@ def task(): if type(server.broker[worker_id]._getvalue()) != dict: continue - logging.info("Considering compatible worker: {}\n".format(worker_id)) + logger.info("Considering compatible worker: {}\n".format(worker_id)) # Group workers by type (all gh workers grouped together etc) worker_type = worker_id.split('.')[len(worker_id.split('.'))-2] @@ -91,28 +94,28 @@ def task(): # Make worker that is prioritized the one with the smallest sum of task queues if (len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue'])) < min([compatible_workers[w]['task_load'] for w in compatible_workers.keys() if worker_type == w]): - logging.info("Worker id: {} has the smallest task load encountered so far: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']))) + logger.info("Worker id: {} has the smallest task load encountered so far: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']))) compatible_workers[worker_type]['task_load'] = len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']) compatible_workers[worker_type]['worker_id'] = worker_id for worker_type in compatible_workers.keys(): worker_id = compatible_workers[worker_type]['worker_id'] worker = server.broker[worker_id] - logging.info("Final compatible worker chosen: {} with smallest task load: {} found to work on task: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']), task)) + logger.info("Final compatible worker chosen: {} with smallest task load: {} found to work on task: {}\n".format(worker_id, len(server.broker[worker_id]['user_queue']) + len(server.broker[worker_id]['maintain_queue']), task)) if task['job_type'] == "UPDATE": worker['user_queue'].append(task) - logging.info("Added task for model: {}. New length of worker {}'s user queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['user_queue'])))) + logger.info("Added task for model: {}. New length of worker {}'s user queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['user_queue'])))) elif task['job_type'] == "MAINTAIN": worker['maintain_queue'].append(task) - logging.info("Added task for model: {}. New length of worker {}'s maintain queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['maintain_queue'])))) + logger.info("Added task for model: {}. New length of worker {}'s maintain queue: {}\n".format(model, worker_id, str(len(server.broker[worker_id]['maintain_queue'])))) if worker['status'] == 'Idle': send_task(worker) worker_found = True # Otherwise, let the frontend know that the request can't be served if not worker_found: - logging.info("Augur does not have knowledge of any workers that are capable of handing the request: {}\n".format(task)) + logger.warning("Augur does not have knowledge of any workers that are capable of handing the request: {}\n".format(task)) return Response(response=task, status=200, @@ -124,7 +127,7 @@ def worker(): and telling the broker to add this worker to the set it maintains """ worker = request.json - logging.info("Recieved HELLO message from worker: {}\n".format(worker['id'])) + logger.info("Recieved HELLO message from worker: {}\n".format(worker['id'])) if worker['id'] not in server.broker: server.broker[worker['id']] = server.manager.dict() server.broker[worker['id']]['id'] = worker['id'] @@ -139,7 +142,7 @@ def worker(): server.broker[worker['id']]['status'] = 'Idle' server.broker[worker['id']]['location'] = worker['location'] else: - logging.info("Worker: {} has been reconnected.\n".format(worker['id'])) + logger.info("Worker: {} has been reconnected.\n".format(worker['id'])) models = server.broker[worker['id']]['models'] givens = server.broker[worker['id']]['given'] user_queue = server.broker[worker['id']]['user_queue'] @@ -157,7 +160,7 @@ def worker(): def sync_queue(): task = request.json worker = task['worker_id'] - logging.info("Message recieved that worker {} completed task: {}\n".format(worker,task)) + logger.info("Message recieved that worker {} completed task: {}\n".format(worker,task)) try: models = server.broker[worker]['models'] givens = server.broker[worker]['given'] @@ -167,8 +170,8 @@ def sync_queue(): if server.broker[worker]['status'] != 'Disconnected': send_task(server.broker[worker]) except Exception as e: - logging.info("Ran into error: {}\n".format(repr(e))) - logging.info("A past instance of the {} worker finished a previous leftover task.\n".format(worker)) + logger.error("Ran into error: {}\n".format(repr(e))) + logger.error("A past instance of the {} worker finished a previous leftover task.\n".format(worker)) return Response(response=task, status=200, @@ -190,7 +193,7 @@ def get_status(): @server.app.route('/{}/workers/remove'.format(server.api_version), methods=['POST']) def remove_worker(): worker = request.json - logging.info("Recieved a message to disconnect worker: {}\n".format(worker)) + logger.info("Recieved a message to disconnect worker: {}\n".format(worker)) server.broker[worker['id']]['status'] = 'Disconnected' return Response(response=worker, status=200, @@ -200,13 +203,13 @@ def remove_worker(): def task_error(): task = request.json worker_id = task['worker_id'] - logging.info("Recieved a message that {} ran into an error on task: {}\n".format(worker_id, task)) + logger.error("Recieved a message that {} ran into an error on task: {}\n".format(worker_id, task)) if worker_id in server.broker: if server.broker[worker_id]['status'] != 'Disconnected': - logging.info("{} ran into error while completing task: {}\n".format(worker_id, task)) + logger.error("{} ran into error while completing task: {}\n".format(worker_id, task)) send_task(server.broker[worker_id]) else: - logging.info("A previous instance of {} ran into error while completing task: {}\n".format(worker_id, task)) + logger.error("A previous instance of {} ran into error while completing task: {}\n".format(worker_id, task)) return Response(response=request.json, status=200, mimetype="application/json") \ No newline at end of file diff --git a/augur/routes/manager.py b/augur/routes/manager.py --- a/augur/routes/manager.py +++ b/augur/routes/manager.py @@ -10,6 +10,9 @@ from sqlalchemy import exc from flask import request, Response import json +from augur.config import AugurConfig +import os + def create_routes(server): @@ -164,9 +167,17 @@ def get_inserted_repo(groupid, repoid, reponame, groupname, url): return inserted_repo class Repo_insertion_manager(): + ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + def __init__(self, organization_name, database_connection): self.org = organization_name self.db = database_connection + ## added for keys + self._root_augur_dir = Repo_insertion_manager.ROOT_AUGUR_DIR + self.augur_config = AugurConfig(self._root_augur_dir) + + ########## + def get_existing_repos(self, group_id): """returns repos belonging to repogroup in augur db""" @@ -178,12 +189,33 @@ def get_existing_repos(self, group_id): result = self.db.execute(select_repos_query) return result.fetchall() +## This doesn't permit importing of an individual's repo, as they don't show up under "orgs" +# def group_exists_gh(self): +# url = url = "https://api.github.com/orgs/{}".format(self.org) +# res = requests.get(url).json() +# try: +# if res['message'] == "Not Found": +# return False +# except KeyError: +# return True + +## Revised Version of Method def group_exists_gh(self): url = url = "https://api.github.com/orgs/{}".format(self.org) - res = requests.get(url).json() + ## attempting to add key due to rate limiting + gh_api_key = self.augur_config.get_value('Database', 'key') + self.headers = {'Authorization': 'token %s' % gh_api_key} + #r = requests.get(url=cntrb_url, headers=self.headers) +####### Original request code +# res = requests.get(url).json() +######## + res = requests.get(url=url, headers=self.headers).json() try: if res['message'] == "Not Found": - return False + url = url = "https://api.github.com/users/{}".format(self.org) + res = requests.get(url=url, headers=self.headers).json() + if res['message'] == "Not Found": + return False except KeyError: return True @@ -229,10 +261,12 @@ def insert_repo_group(self): def fetch_repos(self): """uses the github api to return repos belonging to the given organization""" + gh_api_key = self.augur_config.get_value('Database', 'key') + self.headers = {'Authorization': 'token %s' % gh_api_key} repos = [] page = 1 url = self.paginate(page) - res = requests.get(url).json() + res = requests.get(url, headers=self.headers).json() while res: for repo in res: repos.append(repo['name']) @@ -240,11 +274,33 @@ def fetch_repos(self): res = requests.get(self.paginate(page)).json() return repos +## Modified pagination to account for github orgs that look like orgs but are actually users. def paginate(self, page): +### Modified here to incorporate the use of a GitHub API Key + gh_api_key = self.augur_config.get_value('Database', 'key') + self.headers = {'Authorization': 'token %s' % gh_api_key} url = "https://api.github.com/orgs/{}/repos?per_page=100&page={}" + res = requests.get(url, headers=self.headers).json() + if res['message'] == "Not Found": + url = "https://api.github.com/users/{}/repos?per_page=100&page={}" + res = requests.get(url=url, headers=self.headers).json() return url.format(self.org, str(page)) - + + #r = requests.get(url=cntrb_url, headers=self.headers) +####### Original request code +# res = requests.get(url).json() +######## + res = requests.get(url=url, headers=self.headers).json() + + + +# url = "https://api.github.com/orgs/{}/repos?per_page=100&page={}" +# res = requests.get(url).json() +# if res['message'] == "Not Found": +# url = "https://api.github.com/users/{}/repos?per_page=100&page={}" +# res = requests.get(url).json() +# return url.format(self.org, str(page)) class Git_string(): """ represents possible repo, org or username arguments """ @@ -285,15 +341,15 @@ def get_repo_name(self): repo = self.name return repo[repo.find('/')+1:] -def authenticate_request(app, request): +def authenticate_request(augur_app, request): # do I like doing it like this? not at all # do I have the time to implement a better solution right now? not at all - user = app.read_config('Database', 'user') - password = app.read_config('Database', 'password') - host = app.read_config('Database', 'host') - port = app.read_config('Database', 'port') - dbname = app.read_config('Database', 'name') + user = augur_app.config.get_value('Database', 'user') + password = augur_app.config.get_value('Database', 'password') + host = augur_app.config.get_value('Database', 'host') + port = augur_app.config.get_value('Database', 'port') + dbname = augur_app.config.get_value('Database', 'name') DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( user, password, host, port, dbname diff --git a/augur/routes/metrics/commit.py b/augur/routes/metrics/commit.py deleted file mode 100644 --- a/augur/routes/metrics/commit.py +++ /dev/null @@ -1,8 +0,0 @@ -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.annual_commit_count_ranked_by_new_repo_in_repo_group,'annual-commit-count-ranked-by-new-repo-in-repo-group') - - server.add_standard_metric(metrics.annual_commit_count_ranked_by_repo_in_repo_group,'annual-commit-count-ranked-by-repo-in-repo-group') - diff --git a/augur/routes/metrics/contributor.py b/augur/routes/metrics/contributor.py deleted file mode 100644 --- a/augur/routes/metrics/contributor.py +++ /dev/null @@ -1,17 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.contributors, 'contributors') - - server.add_standard_metric(metrics.contributors_new, 'contributors-new') - - server.add_standard_metric(metrics.committers, 'committers') - - server.add_standard_metric(metrics.lines_changed_by_author,'lines-changed-by-author') - - server.add_standard_metric(metrics.top_committers, 'top-committers') - - server.add_standard_metric(metrics.contributors_code_development, 'contributors-code-development') \ No newline at end of file diff --git a/augur/routes/metrics/experimental.py b/augur/routes/metrics/experimental.py deleted file mode 100644 --- a/augur/routes/metrics/experimental.py +++ /dev/null @@ -1,6 +0,0 @@ - -def create_routes(server): - - metrics = server.augur_app.metrics - - diff --git a/augur/routes/metrics/insight.py b/augur/routes/metrics/insight.py deleted file mode 100644 --- a/augur/routes/metrics/insight.py +++ /dev/null @@ -1,13 +0,0 @@ -#SPDX-License-Identifier: MIT -from flask import Response - -def create_routes(server): - - metrics = server.augur_app.metrics - - @server.app.route(f"/{server.api_version}/repo-groups/<repo_group_id>/top-insights") - def top_insights(repo_group_id): - data = server.transform(metrics.top_insights, args=[repo_group_id]) - return Response(response=data, - status=200, - mimetype="application/json") diff --git a/augur/routes/metrics/issue.py b/augur/routes/metrics/issue.py deleted file mode 100644 --- a/augur/routes/metrics/issue.py +++ /dev/null @@ -1,39 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.issues_new, 'issues-new') - - server.add_standard_metric(metrics.issues_active, 'issues-active') - - server.add_standard_metric(metrics.issues_closed, 'issues-closed') - - server.add_standard_metric(metrics.issue_duration, 'issue-duration') - - server.add_standard_metric(metrics.issue_participants, 'issue-participants') - - server.add_standard_metric(metrics.issue_backlog, 'issue-backlog') - - server.add_standard_metric(metrics.issue_throughput, 'issue-throughput') - - server.add_standard_metric(metrics.issues_first_time_opened, 'issues-first-time-opened') - - server.add_standard_metric(metrics.issues_first_time_closed, 'issues-first-time-closed') - - server.add_standard_metric(metrics.open_issues_count, 'open-issues-count') - - server.add_standard_metric(metrics.closed_issues_count, 'closed-issues-count') - - server.add_standard_metric(metrics.issues_open_age, 'issues-open-age') - - server.add_standard_metric(metrics.issues_closed_resolution_duration, 'issues-closed-resolution-duration') - - server.add_standard_metric(metrics.issues_maintainer_response_duration, 'issues-maintainer-response-duration') - - server.add_standard_metric(metrics.average_issue_resolution_time, 'average-issue-resolution-time') - - server.add_standard_metric(metrics.issue_comments_mean, 'issue-comments-mean') - - server.add_standard_metric(metrics.issue_comments_mean_std, 'issue-comments-mean-std') diff --git a/augur/routes/metrics/message.py b/augur/routes/metrics/message.py deleted file mode 100644 --- a/augur/routes/metrics/message.py +++ /dev/null @@ -1,6 +0,0 @@ - -def create_routes(server): - - metrics = server.augur_app.metrics - - diff --git a/augur/routes/metrics/platform.py b/augur/routes/metrics/platform.py deleted file mode 100644 --- a/augur/routes/metrics/platform.py +++ /dev/null @@ -1,4 +0,0 @@ - -def create_routes(server): - metrics = server.augur_app.metrics - diff --git a/augur/routes/metrics/pull_request.py b/augur/routes/metrics/pull_request.py deleted file mode 100644 --- a/augur/routes/metrics/pull_request.py +++ /dev/null @@ -1,31 +0,0 @@ -#SPDX-License-Identifier: MIT - -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.reviews, 'reviews') - - server.add_standard_metric(metrics.reviews_accepted, 'reviews-accepted') - - server.add_standard_metric(metrics.reviews_declined, 'reviews-declined') - - server.add_standard_metric(metrics.review_duration, 'review-duration') - - server.add_standard_metric(metrics.pull_requests_merge_contributor_new, 'pull-requests-merge-contributor-new') - - server.add_standard_metric(metrics.pull_request_acceptance_rate, 'pull-request-acceptance-rate') - - server.add_standard_metric(metrics.pull_requests_closed_no_merge, 'pull-requests-closed-no-merge') - - server.add_standard_metric(metrics.pull_request_merged_status_counts, 'pull-request-merged-status-counts') - - server.add_standard_metric(metrics.pull_request_average_time_to_close, 'pull-request-average-time-to-close') - - server.add_standard_metric(metrics.pull_request_average_time_between_responses, 'pull-request-average-time-between-responses') - - server.add_standard_metric(metrics.pull_request_average_commit_counts, 'pull-request-average-commit-counts') - - server.add_standard_metric(metrics.pull_request_average_event_counts, 'pull-request-average-event-counts') - - server.add_standard_metric(metrics.pull_request_average_time_to_responses_and_close, 'pull-request-average-time-to-responses-and-close') diff --git a/augur/routes/metrics/repo_meta.py b/augur/routes/metrics/repo_meta.py deleted file mode 100644 --- a/augur/routes/metrics/repo_meta.py +++ /dev/null @@ -1,54 +0,0 @@ -#SPDX-License-Identifier: MIT -from flask import Response - -def create_routes(server): - - metrics = server.augur_app.metrics - - server.add_standard_metric(metrics.code_changes, 'code-changes') - - server.add_standard_metric(metrics.code_changes_lines, 'code-changes-lines') - - @server.app.route(f"/{server.api_version}/<license_id>/<spdx_binary>/<repo_group_id>/<repo_id>/license-files") - def get_license_files(license_id, spdx_binary, repo_group_id, repo_id): - arguments = [license_id, spdx_binary, repo_group_id, repo_id] - license_files = server.transform(metrics.license_files, args=arguments) - return Response(response=license_files, - status=200, - mimetype="application/json") - - server.add_standard_metric(metrics.sbom_download, 'sbom-download') - - server.add_standard_metric(metrics.sub_projects, 'sub-projects') - - server.add_standard_metric(metrics.cii_best_practices_badge, 'cii-best-practices-badge') - - server.add_standard_metric(metrics.forks, 'forks') - - server.add_standard_metric(metrics.fork_count, 'fork-count') - - server.add_standard_metric(metrics.languages, 'languages') - - server.add_standard_metric(metrics.license_count, 'license-count') - - server.add_standard_metric(metrics.license_coverage, 'license-coverage') - - server.add_standard_metric(metrics.license_declared, 'license-declared') - - server.add_standard_metric(metrics.stars, 'stars') - - server.add_standard_metric(metrics.stars_count, 'stars-count') - - server.add_standard_metric(metrics.watchers, 'watchers') - - server.add_standard_metric(metrics.watchers_count, 'watchers-count') - - server.add_standard_metric(metrics.annual_lines_of_code_count_ranked_by_new_repo_in_repo_group,'annual-lines-of-code-count-ranked-by-new-repo-in-repo-group') - - server.add_standard_metric(metrics.annual_lines_of_code_count_ranked_by_repo_in_repo_group,'annual-lines-of-code-count-ranked-by-repo-in-repo-group') - - server.add_standard_metric(metrics.lines_of_code_commit_counts_by_calendar_year_grouped,'lines-of-code-commit-counts-by-calendar-year-grouped') - - server.add_standard_metric(metrics.average_weekly_commits, 'average-weekly-commits') - - server.add_standard_metric(metrics.aggregate_summary, 'aggregate-summary') diff --git a/augur/routes/nonstandard_metrics.py b/augur/routes/nonstandard_metrics.py new file mode 100644 --- /dev/null +++ b/augur/routes/nonstandard_metrics.py @@ -0,0 +1,24 @@ +import base64 +import sqlalchemy as s +import pandas as pd +import json +from flask import Response + +def create_routes(server): + + metrics = server.augur_app.metrics + + @server.app.route(f"/{server.api_version}/<license_id>/<spdx_binary>/<repo_group_id>/<repo_id>/license-files") + def get_license_files(license_id, spdx_binary, repo_group_id, repo_id): + arguments = [license_id, spdx_binary, repo_group_id, repo_id] + license_files = server.transform(metrics.license_files, args=arguments) + return Response(response=license_files, + status=200, + mimetype="application/json") + + @server.app.route(f"/{server.api_version}/repo-groups/<repo_group_id>/top-insights") + def top_insights(repo_group_id): + data = server.transform(metrics.top_insights, args=[repo_group_id]) + return Response(response=data, + status=200, + mimetype="application/json") \ No newline at end of file diff --git a/augur/routes/util.py b/augur/routes/util.py --- a/augur/routes/util.py +++ b/augur/routes/util.py @@ -6,8 +6,6 @@ def create_routes(server): - metrics = server.augur_app.metrics - @server.app.route('/{}/repo-groups'.format(server.api_version)) def get_all_repo_groups(): #TODO: make this name automatic - wrapper? repoGroupsSQL = s.sql.text(""" @@ -38,7 +36,7 @@ def get_all_repos(): FROM repo left outer join - (select repo_id, COUNT ( commits.cmt_id ) AS commits_all_time from commits group by repo_id ) a on + (select repo_id, COUNT ( distinct commits.cmt_commit_hash ) AS commits_all_time from commits group by repo_id ) a on repo.repo_id = a.repo_id left outer join (select repo_id, count ( * ) as issues_all_time from issues where issues.pull_request IS NULL group by repo_id) b @@ -74,7 +72,7 @@ def get_repos_in_repo_group(repo_group_id): FROM repo left outer join - (select repo_id, COUNT ( commits.cmt_id ) AS commits_all_time from commits group by repo_id ) a on + (select repo_id, COUNT ( distinct commits.cmt_commit_hash ) AS commits_all_time from commits group by repo_id ) a on repo.repo_id = a.repo_id left outer join (select repo_id, count ( issues.issue_id) as issues_all_time from issues where issues.pull_request IS NULL group by repo_id) b @@ -202,7 +200,7 @@ def get_issues(repo_group_id, repo_id=None): @server.app.route('/{}/api-port'.format(server.api_version)) def api_port(): - response = {'port': server.augur_app.read_config('Server', 'port')} + response = {'port': server.augur_app.config.get_value('Server', 'port')} return Response(response=json.dumps(response), status=200, mimetype="application/json") diff --git a/augur/server.py b/augur/server.py --- a/augur/server.py +++ b/augur/server.py @@ -3,51 +3,50 @@ Creates a WSGI server that serves the Augur REST API """ +import glob +import sys +import inspect +import types import json import os import base64 +import logging + from flask import Flask, request, Response, redirect from flask_cors import CORS import pandas as pd + import augur -from augur.util import logger from augur.routes import create_routes AUGUR_API_VERSION = 'api/unstable' -class VueCompatibleFlask(Flask): - jinja_options = Flask.jinja_options.copy() - jinja_options.update(dict( - block_start_string='(%', - block_end_string='%)', - variable_start_string='%%', - variable_end_string='%%', - comment_start_string='(#', - comment_end_string='#)', - )) - +logger = logging.getLogger(__name__) class Server(object): """ Defines Augur's server's behavior """ - def __init__(self, frontend_folder='../frontend/public', manager=None, broker=None, housekeeper=None): + def __init__(self, augur_app=None): """ Initializes the server, creating both the Flask application and Augur application """ # Create Flask application - self.app = VueCompatibleFlask(__name__, static_folder=frontend_folder, template_folder=frontend_folder) + self.app = Flask(__name__) + logger.debug("Created Flask app") self.api_version = AUGUR_API_VERSION app = self.app CORS(app) app.url_map.strict_slashes = False - # Create Augur application - self.augur_app = augur.Application() + self.augur_app = augur_app + self.manager = augur_app.manager + self.broker = augur_app.broker + self.housekeeper = augur_app.housekeeper # Initialize cache - expire = int(self.augur_app.read_config('Server', 'cache_expire')) + expire = int(self.augur_app.config.get_value('Server', 'cache_expire')) self.cache = self.augur_app.cache.get_cache('server', expire=expire) self.cache.clear() @@ -55,10 +54,7 @@ def __init__(self, frontend_folder='../frontend/public', manager=None, broker=No self.show_metadata = False - self.manager = manager - self.broker = broker - self.housekeeper = housekeeper - + logger.debug("Creating API routes...") create_routes(self) ##################################### @@ -184,40 +180,3 @@ def add_standard_metric(self, function, endpoint, **kwargs): self.app.route(repo_endpoint)(self.routify(function, 'repo')) self.app.route(repo_group_endpoint)(self.routify(function, 'repo_group')) self.app.route(deprecated_repo_endpoint )(self.routify(function, 'deprecated_repo')) - -def run(): - """ - Runs server with configured hosts/ports - """ - server = Server() - host = server.augur_app.read_config('Server', 'host') - port = server.augur_app.read_config('Server', 'port') - Server().app.run(host=host, port=int(port), debug=True) - -wsgi_app = None -def wsgi(environ, start_response): - """ - Creates WSGI app - """ - global wsgi_app - if (wsgi_app is None): - app_instance = Server() - wsgi_app = app_instance.app - # Stuff to make proxypass work - script_name = environ.get('HTTP_X_SCRIPT_NAME', '') - if script_name: - environ['SCRIPT_NAME'] = script_name - path_info = environ['PATH_INFO'] - if path_info.startswith(script_name): - environ['PATH_INFO'] = path_info[len(script_name):] - - scheme = environ.get('HTTP_X_SCHEME', '') - if scheme: - environ['wsgi.url_scheme'] = scheme - server = environ.get('HTTP_X_FORWARDED_SERVER', '') - if server: - environ['HTTP_HOST'] = server - return wsgi_app(environ, start_response) - -if __name__ == "__main__": - run() diff --git a/augur/util.py b/augur/util.py --- a/augur/util.py +++ b/augur/util.py @@ -8,8 +8,9 @@ import types import sys import beaker +import logging -from augur import logger +logger = logging.getLogger(__name__) __ROOT = os.path.abspath(os.path.dirname(__file__)) def get_data_path(path): @@ -42,7 +43,7 @@ def get_cache(namespace, cache_manager=None): metric_metadata = [] def register_metric(metadata=None, **kwargs): """ - Decorates a function as being a metric + Register a function as being a metric """ if metadata is None: metadata = {} @@ -54,20 +55,19 @@ def decorate(function): if not hasattr(function, 'is_metric'): function.is_metric = True - function.metadata.update(metadata) - if kwargs.get('endpoint_type', None): - endpoint_type = kwargs.pop('endpoint_type') - if endpoint_type == 'repo': - function.metadata['repo_endpoint'] = kwargs.get('endpoint') - else: - function.metadata['group_endpoint'] = kwargs.get('endpoint') - function.metadata.update(dict(kwargs)) function.metadata['tag'] = re.sub('_', '-', function.__name__).lower() - function.metadata['metric_name'] = re.sub('_', ' ', function.__name__).title() + function.metadata['endpoint'] = function.metadata['tag'] + function.metadata['name'] = re.sub('_', ' ', function.__name__).title() function.metadata['model'] = re.sub(r'(.*\.)', '', function.__module__) - function.metadata['ID'] = "{}-{}".format(function.metadata['model'].lower(), function.metadata['tag']) + + if kwargs.get('type', None): + function.metadata['type'] = kwargs.get('type') + else: + function.metadata['type'] = "standard" + + function.metadata.update(metadata) return function return decorate \ No newline at end of file diff --git a/conftest.py b/conftest.py new file mode 100644 --- /dev/null +++ b/conftest.py @@ -0,0 +1,31 @@ +import pytest +import re + +from augur.application import Application +from augur.cli.run import initialize_components + +default_repo_id = "25430" +default_repo_group_id = "10" + +def create_full_routes(routes): + full_routes = [] + for route in routes: + route = re.sub("<default_repo_id>", default_repo_id, route) + route = re.sub("<default_repo_group_id>", default_repo_group_id, route) + route = "http://localhost:5000/api/unstable/" + route + full_routes.append(route) + return full_routes + [email protected](scope="session") +def augur_app(): + augur_app = Application(disable_logs=True) + return augur_app + [email protected](scope="session") +def metrics(augur_app): + return augur_app.metrics + [email protected](scope="session") +def client(augur_app): + flask_client = initialize_components(augur_app, disable_housekeeper=True).load() + return flask_client.test_client() diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -25,7 +25,6 @@ exec(open(os.path.join(here, "../../metadata.py")).read()) - sys.path.insert(0, os.path.abspath('../../../augur')) # -- General configuration ------------------------------------------------ @@ -82,8 +81,6 @@ copyright = __copyright__ author = 'Carter Landis' - - # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. diff --git a/metadata.py b/metadata.py --- a/metadata.py +++ b/metadata.py @@ -1,13 +1,11 @@ -from os import path - __name__ = "Augur" __slug__ = "augur" __url__ = "https://github.com/chaoss/augur" __short_description__ = "Python 3 package for free/libre and open-source software community metrics & data collection" -__version__ = "0.12.0" -__release__ = "0.12.0" +__version__ = "0.13.0" +__release__ = "v0.13.0" __license__ = "MIT" __copyright__ = "CHAOSS & Augurlabs 2020" diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -37,14 +37,14 @@ "sqlalchemy", "flask_login", "flask", - "pandas", + "pandas==1.0.5", "requests", "flask_cors", "flask_wtf", "psycopg2-binary", "click", "psutil", - "gunicorn==19.9.0", + "gunicorn", "six>=1.14.0" ], extras_require={ @@ -61,7 +61,7 @@ }, entry_points={ "console_scripts": [ - "augur=augur.runtime:run" + "augur=augur.cli._multicommand:run" ], } ) diff --git a/util/alembic/env.py b/util/alembic/env.py deleted file mode 100644 --- a/util/alembic/env.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import with_statement -from alembic import context -from sqlalchemy import engine_from_config, pool -from logging.config import fileConfig - - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -from augur.models.common import Base -target_metadata = Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, target_metadata=target_metadata, literal_binds=True) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - import augur.application - app = augur.application.Application() - - context.configure( - connection=app.db.connect(), - target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py b/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py deleted file mode 100644 --- a/util/alembic/versions/2eaa930b1f5a_create_basic_tables.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Create basic tables - -Revision ID: 2eaa930b1f5a -Revises: -Create Date: 2019-02-09 16:10:24.251828 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '2eaa930b1f5a' -down_revision = None -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.create_table('repo', - sa.Column('url', sa.String(length=1024), nullable=False), - sa.Column('vcs', sa.String(length=64), nullable=True), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('url') - ) - op.create_table('repo_group', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('id') - ) - op.create_table('user', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('username', sa.String(length=64), nullable=False), - sa.Column('email', sa.String(length=64), nullable=False), - sa.Column('password_hash', sa.String(length=128), nullable=True), - sa.Column('email_confirmation_token', sa.String(length=128), nullable=True), - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('password_updated_at', sa.DateTime(), nullable=True), - sa.Column('last_login_at', sa.DateTime(), nullable=True), - sa.Column('authenticated', sa.Boolean(), nullable=True), - sa.Column('active', sa.Boolean(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('email'), - sa.UniqueConstraint('username') - ) - op.create_table('repo_group_has_project', - sa.Column('repo_group_id', sa.Integer(), nullable=False), - sa.Column('repo_id', sa.String(length=1024), nullable=False), - sa.ForeignKeyConstraint(['repo_group_id'], ['repo_group.id'], ), - sa.ForeignKeyConstraint(['repo_id'], ['repo.url'], ), - sa.PrimaryKeyConstraint('repo_group_id', 'repo_id') - ) - op.create_table('user_has_repo_group', - sa.Column('user_id', sa.Integer(), nullable=False), - sa.Column('repo_group_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['repo_group_id'], ['repo_group.id'], ), - sa.ForeignKeyConstraint(['user_id'], ['user.id'], ), - sa.PrimaryKeyConstraint('user_id', 'repo_group_id') - ) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.drop_table('user_has_repo_group') - op.drop_table('repo_group_has_project') - op.drop_table('user') - op.drop_table('repo_group') - op.drop_table('repo') - # ### end Alembic commands ### diff --git a/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py b/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py deleted file mode 100644 --- a/util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Add admin to User, name to RepoGroup - -Revision ID: a051167419fa -Revises: 2eaa930b1f5a -Create Date: 2019-02-17 13:09:42.138936 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = 'a051167419fa' -down_revision = '2eaa930b1f5a' -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.add_column('repo_group', sa.Column('name', sa.String(length=128), nullable=True)) - op.add_column('user', sa.Column('administrator', sa.Boolean(), nullable=True)) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.drop_column('user', 'administrator') - op.drop_column('repo_group', 'name') - # ### end Alembic commands ### diff --git a/workers/contributor_worker/contributor_worker/worker.py b/workers/contributor_worker/contributor_worker.py similarity index 67% rename from workers/contributor_worker/contributor_worker/worker.py rename to workers/contributor_worker/contributor_worker.py --- a/workers/contributor_worker/contributor_worker/worker.py +++ b/workers/contributor_worker/contributor_worker.py @@ -8,189 +8,50 @@ import statistics, logging, os, json, time import numpy as np import datetime -from workers.standard_methods import * #init_oauths, get_table_values, get_max_id, register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process, paginate -import warnings -warnings.filterwarnings('ignore') -class ContributorWorker: +from workers.worker_base import Worker + +class ContributorWorker(Worker): """ Worker that detects anomalies on a select few of our metrics task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self._task = task - self._child = None - self._queue = Queue() - self.db = None - self.tool_source = 'Contributor Worker' - self.tool_version = '0.0.1' # See __init__.py - self.data_source = 'Augur Commit Data' - self.finishing_task = False - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["contributors"] - } - ], - "config": [self.config] - } + def __init__(self, config={}): - self.results_counter = 0 + worker_type = "contributor_worker" - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) + given = [['git_url']] + models = ['contributors'] - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) + data_tables = ['contributors', 'contributors_aliases', 'contributor_affiliations', + 'issue_events', 'pull_request_events', 'issues', 'message', 'issue_assignees', + 'pull_request_assignees', 'pull_request_reviewers', 'pull_request_meta', 'pull_request_repo'] + operations_tables = ['worker_history', 'worker_job'] - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - # produce our own MetaData object - metadata = MetaData() - helper_metadata = MetaData() + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) - # we can reflect it ourselves from a database, using options - # such as 'only' to limit what tables we look at... - metadata.reflect(self.db, only=['contributors', 'contributors_aliases', 'contributor_affiliations', - 'issue_events', 'pull_request_events', 'issues', 'message', 'issue_assignees', - 'pull_request_assignees', 'pull_request_reviewers', 'pull_request_meta', 'pull_request_repo']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job']) - - # we can then produce a set of mappings from this MetaData. - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - # calling prepare() just sets up mapped classes and relationships. - Base.prepare() - HelperBase.prepare() - - # mapped classes are ready - self.contributors_table = Base.classes.contributors.__table__ - self.contributors_aliases_table = Base.classes.contributors_aliases.__table__ - self.contributor_affiliations_table = Base.classes.contributor_affiliations.__table__ - self.issue_events_table = Base.classes.issue_events.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.issues_table = Base.classes.issues.__table__ - self.message_table = Base.classes.message.__table__ - self.issue_assignees_table = Base.classes.issue_assignees.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5432/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'contributors': - self.contributors_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass + # These 3 are included in every tuple the worker inserts (data collection info) + self.tool_source = 'Contributor Worker' + self.tool_version = '1.0.0' + self.data_source = 'Augur Commit Data' def contributors_model(self, entry_info, repo_id): + # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's + self.logger.info("Querying starting ids info...\n") + + self.cntrb_id_inc = self.get_max_id('contributors', 'cntrb_id') + # Get and insert all users (emails) found by the facade worker self.insert_facade_contributors(entry_info, repo_id) # Get and insert all users github considers to be contributors for this repo - query_github_contributors(self, entry_info, repo_id) + self.query_github_contributors(entry_info, repo_id) - logging.info("Searching users for commits from the facade worker for repo with entry info: {}\n".format(entry_info)) + self.logger.info("Searching users for commits from the facade worker for repo with entry info: {}\n".format(entry_info)) # Get all distinct combinations of emails and names by querying the repo's commits userSQL = s.sql.text(""" @@ -242,7 +103,7 @@ def contributors_model(self, entry_info, repo_id): commit_cntrbs = json.loads(pd.read_sql(userSQL, self.db, \ params={'repo_id': repo_id}).to_json(orient="records")) - logging.info("We found {} distinct emails to search for in this repo (repo_id = {})".format( + self.logger.info("We found {} distinct emails to search for in this repo (repo_id = {})\n".format( len(commit_cntrbs), repo_id)) # For every unique commit contributor info combination... @@ -283,7 +144,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(times_used_tuple)) self.results_counter += 1 - logging.info("Updated cntrb_created_at and cntrb_last_used columns for existing " + self.logger.info("Updated cntrb_created_at and cntrb_last_used columns for existing " "tuple in the contributors table with email: {}\n".format(contributor['commit_email'])) # If cntrb_full_name column is not filled, go ahead and fill it bc we have that info @@ -297,7 +158,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(name_col)) - logging.info("Inserted cntrb_full_name column for existing tuple in the contributors " + self.logger.info("Inserted cntrb_full_name column for existing tuple in the contributors " "table with email: {}\n".format(contributor['cntrb_email'])) # If cntrb_canonical column is not filled, go ahead and fill it w main email bc @@ -312,7 +173,7 @@ def contributors_model(self, entry_info, repo_id): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==contributor['cntrb_id']).values(canonical_col)) - logging.info("Inserted cntrb_canonical column for existing tuple in the contributors " + self.logger.info("Inserted cntrb_canonical column for existing tuple in the contributors " "table with email: {}\n".format(contributor['cntrb_email'])) @@ -347,20 +208,20 @@ def contributors_model(self, entry_info, repo_id): url = 'https://api.github.com/search/users?q={}+in:email'.format( cmt_cntrb['email']) - logging.info("Hitting endpoint: " + url + " ...\n") + self.logger.info("Hitting endpoint: " + url + " ...\n") r = requests.get(url=url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) results = r.json() # If no matches or bad response, continue with other contributors if 'total_count' not in results: - logging.info("Search query returned an empty response, moving on...\n") + self.logger.info("Search query returned an empty response, moving on...\n") continue if results['total_count'] == 0: - logging.info("Search query did not return any results, moving on...\n") + self.logger.info("Search query did not return any results, moving on...\n") continue - logging.info("When searching for a contributor with info {}, we found the following users: {}\n".format( + self.logger.info("When searching for a contributor with info {}, we found the following users: {}\n".format( cmt_cntrb, results)) # Grab first result and make sure it has the highest match score @@ -370,9 +231,9 @@ def contributors_model(self, entry_info, repo_id): match = item cntrb_url = ("https://api.github.com/users/" + match['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) contributor = r.json() # Fill in all github information @@ -407,11 +268,12 @@ def contributors_model(self, entry_info, repo_id): } result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_id==self.cntrb_id_inc).values(cntrb_gh_info)) - logging.info("Updated existing tuple in the contributors table with github info after " + self.logger.info("Updated existing tuple in the contributors table with github info after " "a successful search query on a facade commit's author : {} {}\n".format(contributor, cntrb_gh_info)) # Dupe check + self.logger.info('Checking dupes.\n') dupe_cntrb_sql = s.sql.text(""" SELECT contributors.* FROM contributors inner join ( @@ -424,10 +286,18 @@ def contributors_model(self, entry_info, repo_id): dupe_cntrbs = pd.read_sql(dupe_cntrb_sql, self.db, params={}) - # Turn this column from nan to None - dupe_cntrbs['gh_user_id'] = dupe_cntrbs['gh_user_id'].where(pd.notnull(dupe_cntrbs['gh_user_id']), None) + self.logger.info(f'There are {len(dupe_cntrbs)} duplicates.\n') + + # Turn columns from nan/nat to None + dupe_cntrbs = dupe_cntrbs.replace({pd.NaT: None}) for i, cntrb_existing in dupe_cntrbs.iterrows(): + + self.logger.info(f'Processing dupe: {cntrb_existing}.\n') + if i == 0: + self.logger.info('skipping first\n') + continue + cntrb_new = cntrb_existing.copy() del cntrb_new['cntrb_id'] del cntrb_new['data_collection_date'] @@ -447,22 +317,29 @@ def contributors_model(self, entry_info, repo_id): dupe_ids = pd.read_sql(dupe_ids_sql, self.db, params={'pk': pk, \ 'email': cntrb_new['cntrb_email']})['cntrb_id'].values.tolist() - self.map_new_id(self, dupe_ids, pk) + self.map_new_id(dupe_ids, pk) delete_dupe_ids_sql = s.sql.text(""" DELETE FROM contributors WHERE cntrb_id <> {} - AND cntrb_email = '{}' + AND cntrb_email = '{}'; """.format(pk, cntrb_new['cntrb_email'])) - self.db.execute(delete_dupe_ids_sql) + self.logger.info(f'Trying to delete dupes with sql: {delete_dupe_ids_sql}') + + try: + result = self.db.execute(delete_dupe_ids_sql) + except Exception as e: + self.logger.info(f'Deleting dupes failed with error: {e}') + + self.logger.info('Deleted duplicates.\n') # Register this task as completed - register_task_completion(self, entry_info, repo_id, "contributors") + self.register_task_completion(entry_info, repo_id, "contributors") def insert_facade_contributors(self, entry_info, repo_id): - logging.info("Beginning process to insert contributors from facade commits for repo w entry info: {}\n".format(entry_info)) + self.logger.info("Beginning process to insert contributors from facade commits for repo w entry info: {}\n".format(entry_info)) # Get all distinct combinations of emails and names by querying the repo's commits userSQL = s.sql.text(""" @@ -498,7 +375,7 @@ def insert_facade_contributors(self, entry_info, repo_id): """) commit_cntrbs = json.loads(pd.read_sql(userSQL, self.db, params={'repo_id': repo_id}).to_json(orient="records")) - logging.info("We found {} distinct contributors needing insertion (repo_id = {})".format( + self.logger.info("We found {} distinct contributors needing insertion (repo_id = {})".format( len(commit_cntrbs), repo_id)) for cntrb in commit_cntrbs: @@ -511,10 +388,10 @@ def insert_facade_contributors(self, entry_info, repo_id): 'cntrb_full_name': cntrb['name'] } result = self.db.execute(self.contributors_table.insert().values(cntrb_tuple)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: {}\n".format(cntrb['email'])) + self.logger.info("Inserted contributor: {}\n".format(cntrb['email'])) def handle_alias(self, tuple): cntrb_email = tuple['cntrb_email'] # canonical @@ -522,7 +399,7 @@ def handle_alias(self, tuple): cntrb_id = tuple['cntrb_id'] # Check existing contributors table tuple - existing_tuples = retrieve_tuple(self, {'cntrb_email': tuple['commit_email']}, ['contributors']) + existing_tuples = self.retrieve_tuple({'cntrb_email': tuple['commit_email']}, ['contributors']) if len(existing_tuples) == 0: """ Insert alias tuple into the contributor table """ @@ -543,15 +420,15 @@ def handle_alias(self, tuple): del cntrb['cntrb_id'] result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) + self.logger.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) alias_id = self.cntrb_id_inc elif len(existing_tuples) > 1: # fix all dupe references to dupe cntrb ids before we delete them - logging.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the contributors table, we will delete all tuples with this cntrb_email and re-insert only 1\n") - logging.info("For cntrb_email: {}".format(tuple['commit_email'])) + self.logger.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the contributors table, we will delete all tuples with this cntrb_email and re-insert only 1\n") + self.logger.info("For cntrb_email: {}".format(tuple['commit_email'])) """ Insert alias tuple into the contributor table """ @@ -576,7 +453,7 @@ def handle_alias(self, tuple): del cntrb['cntrb_id'] result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) + self.logger.info("Inserted alias into the contributors table with email: {}\n".format(cntrb['cntrb_email'])) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) alias_id = self.cntrb_id_inc @@ -610,14 +487,14 @@ def handle_alias(self, tuple): try: # Delete all dupes result = self.db.execute(deleteSQL) - logging.info("Deleted all non-canonical contributors with the email: {}\n".format(commit_email)) + self.logger.info("Deleted all non-canonical contributors with the email: {}\n".format(commit_email)) except Exception as e: - logging.info("When trying to delete a duplicate contributor, worker ran into error: {}".format(e)) + self.logger.info("When trying to delete a duplicate contributor, worker ran into error: {}".format(e)) else: #then there would be exactly 1 existing tuple, so that id is the one we want alias_id = existing_tuples[0]['cntrb_id'] - logging.info('Checking canonicals match.\n') + self.logger.info('Checking canonicals match.\n') alias_sql = s.sql.text(""" SELECT * FROM contributors @@ -636,14 +513,14 @@ def handle_alias(self, tuple): result = self.db.execute(self.contributors_table.update().where( self.contributors_table.c.cntrb_canonical==canonical_id_result.iloc[0]['cntrb_canonical'] ).values(canonical_col)) - logging.info("Updated cntrb_canonical column for existing tuple in the contributors " + self.logger.info("Updated cntrb_canonical column for existing tuple in the contributors " "table with email: {}\n".format(tuple['cntrb_email'])) # Now check existing alias table tuple - existing_tuples = retrieve_tuple(self, {'alias_email': commit_email}, ['contributors_aliases']) + existing_tuples = self.retrieve_tuple({'alias_email': commit_email}, ['contributors_aliases']) if len(existing_tuples) == 0: - logging.info("Finding cntrb_id for canonical email: {}".format(cntrb_email)) + self.logger.info("Finding cntrb_id for canonical email: {}".format(cntrb_email)) canonical_id_sql = s.sql.text(""" SELECT cntrb_id as canonical_id from contributors @@ -652,7 +529,7 @@ def handle_alias(self, tuple): canonical_id_result = json.loads(pd.read_sql(canonical_id_sql, self.db, params={'email': cntrb_email}).to_json( orient="records")) if len(canonical_id_result) > 1: - logging.info("MORE THAN ONE CANONICAL CONTRIBUTOR found for email: {}".format(cntrb_email)) + self.logger.info("MORE THAN ONE CANONICAL CONTRIBUTOR found for email: {}".format(cntrb_email)) alias_tuple = { 'cntrb_id': canonical_id_result[0]['canonical_id'], 'cntrb_a_id': alias_id, @@ -665,9 +542,9 @@ def handle_alias(self, tuple): } result = self.db.execute(self.contributors_aliases_table.insert().values(alias_tuple)) self.results_counter += 1 - logging.info("Inserted alias with email: {}\n".format(commit_email)) + self.logger.info("Inserted alias with email: {}\n".format(commit_email)) if len(existing_tuples) > 1: - logging.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the alias " + self.logger.info("THERE IS A CASE FOR A DUPLICATE CONTRIBUTOR in the alias " "table AND NEED TO ADD DELETION LOGIC: {}\n".format(existing_tuples)) def map_new_id(self, dupe_ids, new_id): @@ -693,48 +570,49 @@ def map_new_id(self, dupe_ids, new_id): alias_result = self.db.execute(self.contributors_aliases_table.update().where( self.contributors_aliases_table.c.cntrb_a_id.in_(dupe_ids)).values(alias_update_col)) - logging.info("Updated cntrb_a_id column for tuples in the contributors_aliases table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_a_id column for tuples in the contributors_aliases table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) except Exception as e: - logging.info(f'Alias re-map already done... error: {e}') + self.logger.info(f'Alias re-map already done... error: {e}') issue_events_result = self.db.execute(self.issue_events_table.update().where( self.issue_events_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the issue_events table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the issue_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_events_result = self.db.execute(self.pull_request_events_table.update().where( self.pull_request_events_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the pull_request_events table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the pull_request_events table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issues_cntrb_result = self.db.execute(self.issues_table.update().where( self.issues_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuples in the issues table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuples in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issues_reporter_result = self.db.execute(self.issues_table.update().where( self.issues_table.c.reporter_id.in_(dupe_ids)).values(reporter_col)) - logging.info("Updated reporter_id column in the issues table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated reporter_id column in the issues table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) issue_assignee_result = self.db.execute(self.issue_assignees_table.update().where( self.issue_assignees_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the issue_assignees table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the issue_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_assignee_result = self.db.execute(self.pull_request_assignees_table.update().where( self.pull_request_assignees_table.c.contrib_id.in_(dupe_ids)).values(pr_assignee_col)) - logging.info("Updated contrib_id column for tuple in the pull_request_assignees table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated contrib_id column for tuple in the pull_request_assignees table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) message_result = self.db.execute(self.message_table.update().where( self.message_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the message table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the message table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_reviewers_result = self.db.execute(self.pull_request_reviewers_table.update().where( self.pull_request_reviewers_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_reviewers table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_reviewers table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_meta_result = self.db.execute(self.pull_request_meta_table.update().where( self.pull_request_meta_table.c.cntrb_id.in_(dupe_ids)).values(update_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_meta table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_meta table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) pr_repo_result = self.db.execute(self.pull_request_repo_table.update().where( self.pull_request_repo_table.c.pr_cntrb_id.in_(dupe_ids)).values(pr_repo_col)) - logging.info("Updated cntrb_id column for tuple in the pull_request_repo table with value: {} replaced with new cntrb id: {}".format(id['cntrb_id'], self.cntrb_id_inc)) + self.logger.info("Updated cntrb_id column for tuple in the pull_request_repo table with value: {} replaced with new cntrb id: {}".format(new_id, self.cntrb_id_inc)) + self.logger.info('Done mapping new id.\n') diff --git a/workers/contributor_worker/contributor_worker/__init__.py b/workers/contributor_worker/contributor_worker/__init__.py deleted file mode 100644 --- a/workers/contributor_worker/contributor_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.0.1' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/contributor_worker/contributor_worker/runtime.py b/workers/contributor_worker/contributor_worker/runtime.py deleted file mode 100644 --- a/workers/contributor_worker/contributor_worker/runtime.py +++ /dev/null @@ -1,110 +0,0 @@ -from flask import Flask, jsonify, request -from contributor_worker.worker import ContributorWorker -import click, os, json, logging, requests -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}\n".format(str(request.json))) - app.contributor_worker.task = request.json - - #set task - return jsonify({"success": "sucess"}) - - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": app.contributor_worker._queue, - "tasks": [{ - "given": list(app.contributor_worker._queue) - }] - }) - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.contributor_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51252, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'contributor_worker', None, {}) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.contributor_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "anomaly_days": worker_info['anomaly_days'] if 'anomaly_days' in worker_info else 2, - "training_days": worker_info['training_days'] if 'training_days' in worker_info else 365, - "confidence_interval": worker_info['confidence_interval'] if 'confidence_interval' in worker_info else .95, - "contamination": worker_info['contamination'] if 'contamination' in worker_info else 0.041, - 'metrics': worker_info['metrics'] if 'metrics' in worker_info else {"issues-new": "issues", - "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"} - } - - #create instance of the worker - app.contributor_worker = ContributorWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - print("Starting Flask App on host {} with port {} with pid: ".format(broker_host, worker_port) + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - print("Killing Flask App: {} and telling broker that this worker is disconnected.".format(str(os.getpid()))) - try: - logging.info("Sending disconnected message to broker... @ -> {} with info: {}\n".format('http://{}:{}/api/unstable/workers'.format( - config['broker_host'], config['broker_port']), config)) - requests.post('http://{}:{}/api/unstable/workers/remove'.format( - config['broker_host'], config['broker_port']), json=config) #hello message - except Exception as e: - logging.info("Ran into error: {}".format(e)) - logging.info("Broker's port is busy, worker will not be able to accept tasks, " - "please restart Augur if you want this worker to attempt connection again.") - diff --git a/workers/contributor_worker/runtime.py b/workers/contributor_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/contributor_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.contributor_worker.contributor_worker import ContributorWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ContributorWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/contributor_worker/setup.py b/workers/contributor_worker/setup.py --- a/workers/contributor_worker/setup.py +++ b/workers/contributor_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="contributor_worker", - version="0.0.2", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -31,7 +31,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'contributor_worker_start=contributor_worker.runtime:main', + 'contributor_worker_start=workers.contributor_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/facade_worker/facade_worker/facade00mainprogram.py b/workers/facade_worker/facade_worker/facade00mainprogram.py --- a/workers/facade_worker/facade_worker/facade00mainprogram.py +++ b/workers/facade_worker/facade_worker/facade00mainprogram.py @@ -26,20 +26,8 @@ # repos. It also rebuilds analysis data, checks any changed affiliations and # aliases, and caches data for display. -import pymysql -import sys -import platform -import imp -import time -import datetime -import html.parser -import subprocess -import os -import getopt -import xlsxwriter -import configparser +import pymysql, sys, platform, imp, time, datetime, html.parser, subprocess, os, getopt, xlsxwriter, configparser, logging from multiprocessing import Process, Queue - from facade_worker.facade01config import Config#increment_db, update_db, migrate_database_config, database_connection, get_setting, update_status, log_activity from facade_worker.facade02utilitymethods import update_repo_log, trim_commit, store_working_author, trim_author from facade_worker.facade03analyzecommit import analyze_commit @@ -48,55 +36,49 @@ from facade_worker.facade06analyze import analysis from facade_worker.facade07rebuildcache import nuke_affiliations, fill_empty_affiliations, invalidate_caches, rebuild_unknown_affiliation_and_web_caches -from workers.standard_methods import read_config +from workers.util import read_config +from workers.worker_base import Worker + +html = html.parser.HTMLParser() -import logging +class FacadeWorker(Worker): + def __init__(self, config={}, task=None): + worker_type = "facade_worker" -# if platform.python_implementation() == 'PyPy': -# import pymysql -# else: -# import MySQLdb -# ## End Imports + # Define what this worker can be given and know how to interpret + given = [['repo_group']] + models = ['commits'] -html = html.parser.HTMLParser() + # Define the tables needed to insert, update, or delete on + data_tables = [] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Facade-specific config + self.cfg = Config(self.logger) + + # Define data collection info + # self.tool_source = 'Facade Worker' + # self.tool_version = '1.0.0' + # self.data_source = 'Git Log' -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class FacadeWorker: - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(os.getpid())) - - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.cfg = Config() - - ### The real program starts here ### + self.tool_source = '\'Facade Worker\'' + self.tool_version = '\'1.0.1\'' + self.data_source = '\'Git Log\'' + + def initialize_database_connections(self): # Set up the database - db_user = self.config['user'] - db_pass = self.config['password'] - db_name = self.config['database'] - db_host = self.config['host'] - db_port = self.config['port'] - db_user_people = self.config['user'] - db_pass_people = self.config['password'] - db_name_people = self.config['database'] - db_host_people = self.config['host'] - db_port_people = self.config['port'] + db_user = self.config['user_database'] + db_pass = self.config['password_database'] + db_name = self.config['name_database'] + db_host = self.config['host_database'] + db_port = self.config['port_database'] # Open a general-purpose connection - db,cursor = self.cfg.database_connection( + self.db, self.cursor = self.cfg.database_connection( db_host, db_user, db_pass, @@ -104,157 +86,68 @@ def __init__(self, config, task=None): db_port, False, False) # Open a connection for the people database - db_people,cursor_people = self.cfg.database_connection( - db_host_people, - db_user_people, - db_pass_people, - db_name_people, - db_port_people, True, False) + self.db_people,self.cursor_people = self.cfg.database_connection( + db_host, + db_user, + db_pass, + db_name, + db_port, True, False) # Check if the database is current and update it if necessary try: - current_db = int(self.cfg.get_setting('database_version')) + self.current_db = int(self.cfg.get_setting('database_version')) except: # Catch databases which existed before database versioning - current_db = -1 - - #WHAT IS THE UPSTREAM_DB??? - # if current_db < upstream_db: - - # print(("Current database version: %s\nUpstream database version %s\n" % - # (current_db, upstream_db))) - - # self.cfg.update_db(current_db); + self.current_db = -1 - self.commit_model() - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - rg_id = value['given']['repo_group_id'] - - """ Query all repos """ - # repoUrlSQL = s.sql.text(""" - # SELECT repo_id,repo_group_id,repo_git FROM repo WHERE repo_group_id = '{}' - # """.format(rg_id)) - # rs = pd.read_sql(repoUrlSQL, self.db, params={}) - try: - if value['job_type'] == "UPDATE": - self._queue.put(CollectorTask(message_type='TASK', entry_info=value)) - elif value['job_type'] == "MAINTAIN": - self._maintain_queue.put(CollectorTask(message_type='TASK', entry_info=value)) - - except Exception as e: - logging.info("error: {}".format(e)) - - self._task = CollectorTask(message_type='TASK', entry_info={"task": value, "repo_id": repo_id}) - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...") - if self._child is None: - self._child = Process(target=self.collect, args=()) - self._child.start() - def collect(self): """ Function to process each entry in the worker's task queue Determines what action to take based off the message type """ + self.initialize_logging() # need to initialize logging again in child process cause multiprocessing + self.logger.info("Starting data collection process\n") + self.initialize_database_connections() while True: - time.sleep(0.5) if not self._queue.empty(): - message = self._queue.get() - logging.info("Popped off message: {}".format(message.entry_info)) - self.working_on = "UPDATE" + message = self._queue.get() # Get the task off our MP queue else: - if not self._maintain_queue.empty(): - message = self._maintain_queue.get() - logging.info("Popped off message: {}".format(message.entry_info)) - self.working_on = "MAINTAIN" - else: - break - - if message.type == 'EXIT': + break + self.logger.info("Popped off message: {}\n".format(str(message))) + + if message['job_type'] == 'STOP': break - if message.type != 'TASK': - raise ValueError(f'{message.type} is not a recognized task type') - - if message.type == 'TASK': - try: - git_url = message.entry_info['task']['given']['git_url'] - self.query_issues({'git_url': git_url, 'repo_id': message.entry_info['repo_id']}) - except Exception as e: - logging.info("Worker ran into an error for task: {}\n".format(message.entry_info['task'])) - logging.info("Error encountered: " + repr(e) + "\n") - logging.info("Notifying broker and logging task failure in database...\n") - message.entry_info['task']['worker_id'] = self.config['id'] - requests.post("http://{}:{}/api/unstable/task_error".format( - self.config['broker_host'],self.config['broker_port']), json=message.entry_info['task']) - # Add to history table - task_history = { - "repo_id": message.entry_info['repo_id'], - "worker": self.config['id'], - "job_model": message.entry_info['task']['models'][0], - "oauth_id": self.config['zombie_id'], - "timestamp": datetime.datetime.now(), - "status": "Error", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job error for: " + str(message.entry_info['task']) + "\n") - - # Update job process table - updated_job = { - "since_id_str": message.entry_info['repo_id'], - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==message.entry_info['task']['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + message.entry_info['task']['models'][0] + "\n") - - # Reset results counter for next task - self.results_counter = 0 - pass - - def commit_model(self): + # If task is not a valid job type + if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': + raise ValueError('{} is not a recognized task type'.format(message['job_type'])) + pass + try: + self.commits_model(message) + except Exception as e: + self.logger.error(e) + raise(e) + break + + def commits_model(self, message): # Figure out what we need to do - limited_run = read_config("Facade", name="limited_run", default=0) - delete_marked_repos = read_config("Facade", name="delete_marked_repos", default=0) - pull_repos = read_config("Facade", name="pull_repos", default=0) - clone_repos = read_config("Facade", name="clone_repos", default=1) - check_updates = read_config("Facade", name="check_updates", default=0) - force_updates = read_config("Facade", name="force_updates", default=0) - run_analysis = read_config("Facade", name="run_analysis", default=0) - force_analysis = read_config("Facade", name="force_analysis", default=0) - nuke_stored_affiliations = read_config("Facade", name="nuke_stored_affiliations", default=0) - fix_affiliations = read_config("Facade", name="fix_affiliations", default=1) - force_invalidate_caches = read_config("Facade", name="force_invalidate_caches", default=0) - rebuild_caches = read_config("Facade", name="rebuild_caches", default=1) #if abs((datetime.datetime.strptime(self.cfg.get_setting('aliases_processed')[:-3], + limited_run = self.augur_config.get_value("Facade", "limited_run") + delete_marked_repos = self.augur_config.get_value("Facade", "delete_marked_repos") + pull_repos = self.augur_config.get_value("Facade", "pull_repos") + clone_repos = self.augur_config.get_value("Facade", "clone_repos") + check_updates = self.augur_config.get_value("Facade", "check_updates") + force_updates = self.augur_config.get_value("Facade", "force_updates") + run_analysis = self.augur_config.get_value("Facade", "run_analysis") + force_analysis = self.augur_config.get_value("Facade", "force_analysis") + nuke_stored_affiliations = self.augur_config.get_value("Facade", "nuke_stored_affiliations") + fix_affiliations = self.augur_config.get_value("Facade", "fix_affiliations") + force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches") + rebuild_caches = self.augur_config.get_value("Facade", "rebuild_caches") #if abs((datetime.datetime.strptime(self.cfg.get_setting('aliases_processed')[:-3], # '%Y-%m-%d %I:%M:%S.%f') - datetime.datetime.now()).total_seconds()) // 3600 > int(self.cfg.get_setting( # 'update_frequency')) else 0 - force_invalidate_caches = read_config("Facade", name="force_invalidate_caches", default=0) - create_xlsx_summary_files = read_config("Facade", name="create_xlsx_summary_files", default=0) - multithreaded = read_config("Facade", name="multithreaded", default=1) + force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches") + create_xlsx_summary_files = self.augur_config.get_value("Facade", "create_xlsx_summary_files") + multithreaded = self.augur_config.get_value("Facade", "multithreaded") opts,args = getopt.getopt(sys.argv[1:],'hdpcuUaAmnfIrx') for opt in opts: @@ -355,9 +248,9 @@ def commit_model(self): if len(repo_base_directory) == 0: self.cfg.log_activity('Error','No base directory. It is unsafe to continue.') - update_status('Failed: No base directory') + self.cfg.update_status('Failed: No base directory') sys.exit(1) - + # Begin working start_time = time.time() diff --git a/workers/facade_worker/facade_worker/facade01config.py b/workers/facade_worker/facade_worker/facade01config.py --- a/workers/facade_worker/facade_worker/facade01config.py +++ b/workers/facade_worker/facade_worker/facade01config.py @@ -39,15 +39,15 @@ import json import logging -from workers.standard_methods import read_config - +from workers.util import read_config class Config: - def __init__(self): + def __init__(self, logger): self.upstream_db = 7 self.cursor = None self.cursor_people = None + self.logger = logger self.db = None self.db_people = None @@ -60,9 +60,14 @@ def __init__(self): " in your \'Workers\' -> \'facade_worker\' object in your config " "to the directory in which you want to clone repos. Exiting...") sys.exit(1) - self.tool_source = '\'FacadeAugur\'' - self.tool_version = '\'0.0.1\'' - self.data_source = '\'git_repository\'' + + # self.tool_source = 'Facade Worker' + # self.tool_version = '1.0.0' + # self.data_source = 'Git Log' + + self.tool_source = '\'Facade Worker\'' + self.tool_version = '\'1.0.1\'' + self.data_source = '\'Git Log\'' # Figure out how much we're going to log logging.basicConfig(filename='worker_{}.log'.format(worker_options['port']), filemode='w', level=logging.INFO) @@ -147,6 +152,8 @@ def database_connection(self, db_host,db_user,db_pass,db_name, db_port, people, # connection that should provide maximum performance depending upon the # interpreter in use. + ##TODO: Postgres connections as we make them ARE threadsafe. We *could* refactor this accordingly: https://www.psycopg.org/docs/connection.html #noturgent + # if platform.python_implementation() == 'PyPy': db_schema = 'augur_data' @@ -162,6 +169,8 @@ def database_connection(self, db_host,db_user,db_pass,db_name, db_port, people, cursor = db.cursor()#pymysql.cursors.DictCursor) +## TODO: Does this need a block for if the database connection IS multithreaded? I think so, @gabe-heim + if people and not multi_threaded_connection: self.cursor_people = cursor self.db_people = db @@ -199,7 +208,7 @@ def log_activity(self, level, status): # "Debug", then just print it and don't save it in the database. log_options = ('Error','Quiet','Info','Verbose','Debug') - logging.info("* %s\n" % status) + self.logger.info("* %s\n" % status) if self.log_level == 'Debug' and level == 'Debug': return @@ -209,7 +218,7 @@ def log_activity(self, level, status): self.cursor.execute(query, (level, status)) self.db.commit() except Exception as e: - logging.info('Error encountered: {}\n'.format(e)) + self.logger.info('Error encountered: {}\n'.format(e)) # Set up the database db_user = read_config('Database', 'user', 'AUGUR_DB_USER', 'augur') diff --git a/workers/facade_worker/facade_worker/facade03analyzecommit.py b/workers/facade_worker/facade_worker/facade03analyzecommit.py --- a/workers/facade_worker/facade_worker/facade03analyzecommit.py +++ b/workers/facade_worker/facade_worker/facade03analyzecommit.py @@ -38,7 +38,7 @@ import configparser import traceback -from workers.standard_methods import read_config +from workers.util import read_config def analyze_commit(cfg, repo_id, repo_loc, commit, multithreaded): diff --git a/workers/facade_worker/facade_worker/facade06analyze.py b/workers/facade_worker/facade_worker/facade06analyze.py --- a/workers/facade_worker/facade_worker/facade06analyze.py +++ b/workers/facade_worker/facade_worker/facade06analyze.py @@ -146,6 +146,8 @@ def update_analysis_log(repos_id,status): cfg.log_activity('Debug','Commits missing from repo %s: %s' % (repo[0],len(missing_commits))) +## TODO: Verify if the multithreaded approach here is optimal for postgresql + if multithreaded: from multiprocessing import Pool diff --git a/workers/facade_worker/facade_worker/facade07rebuildcache.py b/workers/facade_worker/facade_worker/facade07rebuildcache.py --- a/workers/facade_worker/facade_worker/facade07rebuildcache.py +++ b/workers/facade_worker/facade_worker/facade07rebuildcache.py @@ -156,7 +156,6 @@ def discover_null_affiliations(attribution,email): cfg.log_activity('Debug','Found domain match for %s' % email) - # try: for match in matches: update = ("UPDATE commits " "SET cmt_%s_affiliation = %%s " @@ -164,7 +163,6 @@ def discover_null_affiliations(attribution,email): "AND cmt_%s_affiliation IS NULL " "AND cmt_%s_date::date >= %%s::date" % (attribution, attribution, attribution, attribution)) - #"AND cmt_%s_date >= TO_TIMESTAMP(%%s, 'YYYY-MM-DD')" % cfg.log_activity('Info', 'attr: {} \nmatch:{}\nsql: {}'.format(attribution, match, update)) @@ -175,15 +173,6 @@ def discover_null_affiliations(attribution,email): cfg.log_activity('Info', 'Error encountered: {}'.format(e)) cfg.log_activity('Info', 'Affiliation insertion failed for %s ' % email) - # except Exception as e: - # cfg.log_activity('Info', '1st Error encountered: {}'.format(e)) - # cfg.log_activity('Info', 'Attribution matching failed for %s ' % email) - # except Exception as e: - # logging.info('2nd Error encountered: {}'.format(e)) - # cfg.log_activity('Info', 'Attribution matching failed and exception logging failed') - # else: - # cfg.log_activity('Info', 'Attribution matching failed and exception logging failed and the exception to the exception failed.') - def discover_alias(email): # Match aliases with their canonical email diff --git a/workers/facade_worker/facade_worker/runtime.py b/workers/facade_worker/facade_worker/runtime.py --- a/workers/facade_worker/facade_worker/runtime.py +++ b/workers/facade_worker/facade_worker/runtime.py @@ -1,102 +1,23 @@ from flask import Flask, jsonify, request, Response import click, os, json, requests, logging -from facade_worker.facade00mainprogram import FacadeWorker -from workers.standard_methods import read_config +from workers.facade_worker.facade_worker.facade00mainprogram import FacadeWorker +from workers.util import create_server, WorkerGunicornApplication -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(request.json)) - app.facade_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.facade_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51258, help='Port') -def main(augur_url, host, port): +def main(): """ Declares singular worker and creates the server and flask app that it will be running on """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'facade_worker', None, None) - worker_port = worker_info['port'] if 'port' in worker_info else port - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - config = { - "id": "com.augurlabs.core.facade_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } + app = Flask(__name__) + app.worker = FacadeWorker() - #create instance of the worker - app.facade_worker = FacadeWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") + create_server(app) + WorkerGunicornApplication(app).run() - app.run(debug=app.debug, host=host, port=worker_port) + if app.worker._child is not None: + app.worker._child.terminate() try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) except: pass - logging.info("Killing Flask App: " + str(os.getpid())) + os.kill(os.getpid(), 9) - diff --git a/workers/github_worker/github_worker/worker.py b/workers/github_worker/github_worker.py similarity index 56% rename from workers/github_worker/github_worker/worker.py rename to workers/github_worker/github_worker.py --- a/workers/github_worker/github_worker/worker.py +++ b/workers/github_worker/github_worker.py @@ -2,217 +2,61 @@ from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData import requests, time, logging, json, os from datetime import datetime -from sqlalchemy.ext.declarative import declarative_base -from workers.standard_methods import * +from workers.worker_base import Worker -class GitHubWorker: +class GitHubWorker(Worker): """ Worker that collects data from the Github API and stores it in our database task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - - self._task = task # task currently being worked on (dict) - self._child = None # process of currently running task (multiprocessing process) - self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes) - self.db = None # sql alchemy db session + def __init__(self, config={}): - # These 3 are included in every tuple the worker inserts (data collection info) - self.tool_source = 'GitHub API Worker' - self.tool_version = '0.0.3' # See __init__.py - self.data_source = 'GitHub API' - - self.results_counter = 0 # count of tuples inserted in the database (to store stats for each task in op tables) - self.finishing_task = True # if we are finishing a previous task, pagination works differenty - - self.specs = { - "id": self.config['id'], # what the broker knows this worker as - "location": self.config['location'], # host + port worker is running on (so broker can send tasks here) - "qualifications": [ - { - "given": [["github_url"]], # type of repo this worker can be given as a task - "models":["issues"] # models this worker can fill for a repo as a task - } - ], - "config": [self.config] - } - - DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - # Create an sqlalchemy engine for both database schemas - logging.info("Making database connections... {}".format(DB_STR)) - db_schema = 'augur_data' - self.db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(db_schema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) + worker_type = 'github_worker' - metadata = MetaData() - helper_metadata = MetaData() + given = [['github_url']] + models = ['issues'] - # Reflect only the tables we will use for each schema's metadata object - metadata.reflect(self.db, only=['contributors', 'issues', 'issue_labels', 'message', + data_tables = ['contributors', 'issues', 'issue_labels', 'message', 'issue_message_ref', 'issue_events','issue_assignees','contributors_aliases', 'pull_request_assignees', 'pull_request_events', 'pull_request_reviewers', 'pull_request_meta', - 'pull_request_repo']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - # So we can access all our tables when inserting, updating, etc - self.contributors_table = Base.classes.contributors.__table__ - self.issues_table = Base.classes.issues.__table__ - self.issue_labels_table = Base.classes.issue_labels.__table__ - self.issue_events_table = Base.classes.issue_events.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.message_table = Base.classes.message.__table__ - self.issues_message_ref_table = Base.classes.issue_message_ref.__table__ - self.issue_assignees_table = Base.classes.issue_assignees.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.contributors_aliases_table = Base.classes.contributors_aliases.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ + 'pull_request_repo'] + operations_tables = ['worker_history', 'worker_job'] - # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's - logging.info("Querying starting ids info...\n") - - self.issue_id_inc = get_max_id(self, 'issues', 'issue_id') - - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - - self.msg_id_inc = get_max_id(self, 'message', 'msg_id') - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) + # These 3 are included in every tuple the worker inserts (data collection info) + self.tool_source = 'GitHub API Worker' + self.tool_version = '1.0.0' + self.data_source = 'GitHub API' - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5433/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - # If the task has one of our "valid" job types - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - # Setting that causes paginating through ALL pages, not just unknown ones - # This setting is set by the housekeeper and is attached to the task before it gets sent here - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - - self._task = value - self.run() + self.finishing_task = True # if we are finishing a previous task, pagination works differenty + self.platform_id = 25150 # GitHub - def cancel(self): - """ Delete/cancel current task - """ - self._task = None + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - # Spawn a subprocess to handle message reading and performing the tasks - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['github_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'issues': - self.issues_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass def issues_model(self, entry_info, repo_id): """ Data collection function Query the GitHub API for issues """ + + # Get max ids so we know where we are in our insertion and to have the current id when inserting FK's + self.logger.info("Querying starting ids info...\n") + + self.issue_id_inc = self.get_max_id('issues', 'issue_id') + + self.msg_id_inc = self.get_max_id('message', 'msg_id') github_url = entry_info['given']['github_url'] - logging.info("Beginning filling the issues model for repo: " + github_url + "\n") - record_model_process(self, repo_id, 'issues') + self.logger.info("Beginning filling the issues model for repo: " + github_url + "\n") # Contributors are part of this model, and finding all for the repo saves us # from having to add them as we discover committers in the issue process - query_github_contributors(self, entry_info, repo_id) + self.query_github_contributors(entry_info, repo_id) # Extract the owner/repo for the endpoint path = urlparse(github_url) @@ -238,14 +82,14 @@ def issues_model(self, entry_info, repo_id): duplicate_col_map = {'gh_issue_id': 'id'} #list to hold issues needing insertion - issues = paginate(self, issues_url, duplicate_col_map, update_col_map, table, table_pkey, + issues = self.paginate(issues_url, duplicate_col_map, update_col_map, table, table_pkey, 'WHERE repo_id = {}'.format(repo_id)) - + self.logger.info(issues) # Discover and remove duplicates before we start inserting - logging.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") + self.logger.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") for issue_dict in issues: - logging.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") + self.logger.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") # Add the FK repo_id to the dict being inserted issue_dict['repo_id'] = repo_id @@ -253,17 +97,17 @@ def issues_model(self, entry_info, repo_id): # Figure out if this issue is a PR # still unsure about this key value pair/what it means pr_id = None - if "pull_request" in issue_dict: - logging.info("Issue is a PR\n") + if 'pull_request' in issue_dict: + self.logger.info("Issue is a PR\n") # Right now we are just storing our issue id as the PR id if it is one pr_id = self.issue_id_inc else: - logging.info("Issue is not a PR\n") + self.logger.info("Issue is not a PR\n") # Begin on the actual issue... issue = { "repo_id": issue_dict['repo_id'], - "reporter_id": find_id_from_login(self, issue_dict['user']['login']), + "reporter_id": self.find_id_from_login(issue_dict['user']['login']), "pull_request": pr_id, "pull_request_id": pr_id, "created_at": issue_dict['created_at'], @@ -292,20 +136,20 @@ def issues_model(self, entry_info, repo_id): if issue_dict['flag'] == 'need_update': result = self.db.execute(self.issues_table.update().where( self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) - logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( + self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( issue_dict['id'])) self.issue_id_inc = issue_dict['pkey'] elif issue_dict['flag'] == 'need_insertion': try: result = self.db.execute(self.issues_table.insert().values(issue)) - logging.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) self.results_counter += 1 self.issue_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + + self.logger.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + " and title of: {} and gh_issue_num of: {}\n".format(issue_dict['title'],issue_dict['number'])) except Exception as e: - logging.info("When inserting an issue, ran into the following error: {}\n".format(e)) - logging.info(issue) + self.logger.info("When inserting an issue, ran into the following error: {}\n".format(e)) + self.logger.info(issue) continue # Check if the assignee key's value is already recorded in the assignees key's value @@ -316,13 +160,13 @@ def issues_model(self, entry_info, repo_id): # Handles case if there are no assignees if collected_assignees[0] is not None: - logging.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") + self.logger.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") for assignee_dict in collected_assignees: if type(assignee_dict) != dict: continue assignee = { "issue_id": self.issue_id_inc, - "cntrb_id": find_id_from_login(self, assignee_dict['login']), + "cntrb_id": self.find_id_from_login(assignee_dict['login']), "tool_source": self.tool_source, "tool_version": self.tool_version, "data_source": self.data_source, @@ -331,13 +175,13 @@ def issues_model(self, entry_info, repo_id): } # Commit insertion to the assignee table result = self.db.execute(self.issue_assignees_table.insert().values(assignee)) - logging.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + + self.logger.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + " with login/cntrb_id: " + assignee_dict['login'] + " " + str(assignee['cntrb_id']) + "\n") else: - logging.info("Issue does not have any assignees\n") + self.logger.info("Issue does not have any assignees\n") # Insert the issue labels to the issue_labels table for label_dict in issue_dict['labels']: @@ -357,10 +201,10 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issue_labels_table.insert().values(label)) - logging.info("Primary key inserted into the issue_labels table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issue_labels table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted issue label with text: " + label_dict['name'] + "\n") + self.logger.info("Inserted issue label with text: " + label_dict['name'] + "\n") #### Messages/comments and events insertion @@ -375,19 +219,19 @@ def issues_model(self, entry_info, repo_id): duplicate_col_map = {'msg_timestamp': 'created_at'} #list to hold contributors needing insertion or update - issue_comments = paginate(self, comments_url, duplicate_col_map, update_col_map, table, table_pkey, + issue_comments = self.paginate(comments_url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="WHERE msg_id IN (SELECT msg_id FROM issue_message_ref WHERE issue_id = {})".format( self.issue_id_inc)) - logging.info("Number of comments needing insertion: {}\n".format(len(issue_comments))) + self.logger.info("Number of comments needing insertion: {}\n".format(len(issue_comments))) for comment in issue_comments: try: - commenter_cntrb_id = find_id_from_login(self, comment['user']['login']) + commenter_cntrb_id = self.find_id_from_login(comment['user']['login']) except: commenter_cntrb_id = None issue_comment = { - "pltfrm_id": 25150, + "pltfrm_id": self.platform_id, "msg_text": comment['body'], "msg_timestamp": comment['created_at'], "cntrb_id": commenter_cntrb_id, @@ -397,13 +241,13 @@ def issues_model(self, entry_info, repo_id): } try: result = self.db.execute(self.message_table.insert().values(issue_comment)) - logging.info("Primary key inserted into the message table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the message table: {}".format(result.inserted_primary_key)) self.results_counter += 1 self.msg_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted issue comment with id: {}\n".format(self.msg_id_inc)) + self.logger.info("Inserted issue comment with id: {}\n".format(self.msg_id_inc)) except Exception as e: - logging.info("Worker ran into error when inserting a message, likely had invalid characters. error: {}".format(e)) + self.logger.info("Worker ran into error when inserting a message, likely had invalid characters. error: {}".format(e)) ### ISSUE MESSAGE REF TABLE ### @@ -417,8 +261,8 @@ def issues_model(self, entry_info, repo_id): "issue_msg_ref_src_node_id": comment['node_id'] } - result = self.db.execute(self.issues_message_ref_table.insert().values(issue_message_ref)) - logging.info("Primary key inserted into the issue_message_ref table: {}".format(result.inserted_primary_key)) + result = self.db.execute(self.issue_message_ref_table.insert().values(issue_message_ref)) + self.logger.info("Primary key inserted into the issue_message_ref table: {}".format(result.inserted_primary_key)) self.results_counter += 1 # Base of the url for event endpoints @@ -434,7 +278,7 @@ def issues_model(self, entry_info, repo_id): pseudo_key_gh = 'url' pseudo_key_augur = 'node_url' table = 'issue_events' - event_table_values = get_table_values(self, [pseudo_key_augur], [table], "WHERE issue_id = {}".format(self.issue_id_inc)) + event_table_values = self.get_table_values([pseudo_key_augur], [table], "WHERE issue_id = {}".format(self.issue_id_inc)) # Paginate backwards through all the events but get first page in order # to determine if there are multiple pages and if the 1st page covers all @@ -442,29 +286,29 @@ def issues_model(self, entry_info, repo_id): multiple_pages = False while True: - logging.info("Hitting endpoint: " + events_url.format(i) + " ...\n") + self.logger.info("Hitting endpoint: " + events_url.format(i) + " ...\n") r = requests.get(url=events_url.format(i), headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) # Find last page so we can decrement from there if 'last' in r.links and not multiple_pages and not self.finishing_task: param = r.links['last']['url'][-6:] i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") + self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n") multiple_pages = True elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") + self.logger.info("Only 1 page of request\n") elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ... " + self.logger.info("Finishing a previous task, paginating forwards ... " "excess rate limit requests will be made\n") j = r.json() # Checking contents of requests with what we already have in the db - new_events = check_duplicates(j, event_table_values, pseudo_key_gh) + new_events = self.check_duplicates(j, event_table_values, pseudo_key_gh) if len(new_events) == 0 and multiple_pages and 'last' in r.links: if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown events, breaking from pagination.\n") + self.logger.info("No more pages with unknown events, breaking from pagination.\n") break elif len(new_events) != 0: to_add = [obj for obj in new_events if obj not in issue_events] @@ -474,29 +318,29 @@ def issues_model(self, entry_info, repo_id): # Since we already wouldve checked the first page... break if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") + self.logger.info("No more pages to check, breaking from pagination.\n") break - logging.info("Number of events needing insertion: " + str(len(issue_events)) + "\n") + self.logger.info("Number of events needing insertion: " + str(len(issue_events)) + "\n") # If the issue is closed, then we search for the closing event and store the user's id cntrb_id = None if 'closed_at' in issue_dict: for event in issue_events: if str(event['event']) != "closed": - logging.info("not closed, continuing") + self.logger.info("not closed, continuing") continue if not event['actor']: continue - cntrb_id = find_id_from_login(self, event['actor']['login']) + cntrb_id = self.find_id_from_login(event['actor']['login']) if cntrb_id is not None: break # Need to hit this single contributor endpoint to get extra created at data... cntrb_url = ("https://api.github.com/users/" + event['actor']['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) + self.update_gh_rate_limit(r) contributor = r.json() company = None @@ -543,20 +387,17 @@ def issues_model(self, entry_info, repo_id): # Commit insertion to table result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format( + self.logger.info("Primary key inserted into the contributors table: {}".format( result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - # Increment our global track of the cntrb id for the possibility of it being used as a FK - self.cntrb_id_inc = int(result.inserted_primary_key[0]) + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") for event in issue_events: if event['actor'] is not None: - event['cntrb_id'] = find_id_from_login(self, event['actor']['login']) + event['cntrb_id'] = self.find_id_from_login(event['actor']['login']) if event['cntrb_id'] is None: - logging.info("SOMETHING WRONG WITH FINDING ID FROM LOGIN") + self.logger.info("SOMETHING WRONG WITH FINDING ID FROM LOGIN") continue # event['cntrb_id'] = None else: @@ -578,10 +419,10 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issue_events_table.insert().values(issue_event)) - logging.info("Primary key inserted into the issue_events table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the issue_events table: " + str(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted issue event: " + event['event'] + " for issue id: {}\n".format(self.issue_id_inc)) + self.logger.info("Inserted issue event: " + event['event'] + " for issue id: {}\n".format(self.issue_id_inc)) if cntrb_id is not None: update_closing_cntrb = { @@ -589,11 +430,11 @@ def issues_model(self, entry_info, repo_id): } result = self.db.execute(self.issues_table.update().where( self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) - logging.info("Updated tuple in the issues table with contributor that closed it, issue_id: {}\n".format( + self.logger.info("Updated tuple in the issues table with contributor that closed it, issue_id: {}\n".format( issue_dict['id'])) self.issue_id_inc += 1 #Register this task as completed - register_task_completion(self, entry_info, repo_id, "issues") + self.register_task_completion(entry_info, repo_id, "issues") diff --git a/workers/github_worker/github_worker/__init__.py b/workers/github_worker/github_worker/__init__.py deleted file mode 100644 --- a/workers/github_worker/github_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.1.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/github_worker/github_worker/runtime.py b/workers/github_worker/github_worker/runtime.py deleted file mode 100644 --- a/workers/github_worker/github_worker/runtime.py +++ /dev/null @@ -1,107 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from github_worker.worker import GitHubWorker -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.github_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.github_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51236, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'github_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: # for multiple instances of workers - try: # trying each port for an already-alive worker until a free port is found - print("New github worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.github_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - app.github_worker = GitHubWorker(config) # declares the worker that will be running on this server with specified config - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.github_worker._child is not None: - app.github_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/github_worker/runtime.py b/workers/github_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/github_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.github_worker.github_worker import GitHubWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitHubWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/github_worker/setup.py b/workers/github_worker/setup.py --- a/workers/github_worker/setup.py +++ b/workers/github_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="github_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'github_worker_start=github_worker.runtime:main', + 'github_worker_start=workers.github_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/template_worker/template_worker/__init__.py b/workers/gitlab_issues_worker/__init__.py similarity index 50% rename from workers/template_worker/template_worker/__init__.py rename to workers/gitlab_issues_worker/__init__.py --- a/workers/template_worker/template_worker/__init__.py +++ b/workers/gitlab_issues_worker/__init__.py @@ -1,4 +1,4 @@ -"""gh_repo_info_worker - Augur Worker that collects GitHub Repo Info data""" +"""gitlab_issues_worker - Augur Worker that collects Gitlab Issue Info""" __version__ = '0.0.0' __author__ = 'Augur Team <[email protected]>' diff --git a/workers/gitlab_issues_worker/gitlab_issues_worker.py b/workers/gitlab_issues_worker/gitlab_issues_worker.py new file mode 100644 --- /dev/null +++ b/workers/gitlab_issues_worker/gitlab_issues_worker.py @@ -0,0 +1,193 @@ +import logging, os, sys, time, requests, json +from datetime import datetime +from multiprocessing import Process, Queue +import pandas as pd +import sqlalchemy as s +from workers.worker_base import Worker + + +class GitLabIssuesWorker(Worker): + def __init__(self, config={}): + + # Define what this worker can be given and know how to interpret + + # given is usually either [['github_url']] or [['git_url']] (depending if your + # worker is exclusive to repos that are on the GitHub platform) + worker_type = "gitlab_issues_worker" + given = [['git_url']] + + # The name the housekeeper/broker use to distinguish the data model this worker can fill + # You will also need to name the method that does the collection for this model + # in the format *model name*_model() such as fake_data_model() for example + models = ['gitlab_issues'] + + # Define the tables needed to insert, update, or delete on + # The Worker class will set each table you define here as an attribute + # so you can reference all of them like self.message_table or self.repo_table + data_tables = ['contributors', 'issues', 'issue_labels', 'message', 'repo', + 'issue_message_ref', 'issue_events','issue_assignees','contributors_aliases', + 'pull_request_assignees', 'pull_request_events', 'pull_request_reviewers', 'pull_request_meta', + 'pull_request_repo'] + # For most workers you will only need the worker_history and worker_job tables + # from the operations schema, these tables are to log worker task histories + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Request headers updation + + gitlab_api_key = self.augur_config.get_value("Database", "gitlab_api_key") + self.config.update({ + "gitlab_api_key": gitlab_api_key + }) + self.headers = {"PRIVATE-TOKEN" : self.config['gitlab_api_key']} + + + # Define data collection info + self.tool_source = 'Gitlab API Worker' + self.tool_version = '0.0.0' + self.data_source = 'GitLab API' + + + def gitlab_issues_model(self, task, repo_id): + """ This is just an example of a data collection method. All data collection + methods for all workers currently accept this format of parameters. If you + want to change these parameters, you can re-define the collect() method to + overwrite the Worker class' version of it (which is the method that calls + this method). + + :param task: the task generated by the housekeeper and sent to the broker which + was then sent to this worker. Takes the example dict format of: + { + 'job_type': 'MAINTAIN', + 'models': ['fake_data'], + 'display_name': 'fake_data model for url: https://github.com/vmware/vivace', + 'given': { + 'git_url': 'https://github.com/vmware/vivace' + } + } + :param repo_id: the collect() method queries the repo_id given the git/github url + and passes it along to make things easier. An int such as: 27869 + """ + + # Collection and insertion of data happens here + + # Collecting issue info from Gitlab API + self.issue_id_inc = self.get_max_id('issues', 'issue_id') + self.msg_id_inc = self.get_max_id('message', 'msg_id') + self.logger.info('Beginning the process of GitLab Issue Collection...'.format(str(os.getpid()))) + gitlab_base = 'https://gitlab.com/api/v4' + intermediate_url = '{}/projects/{}/issues?per_page=100&state=opened&'.format(gitlab_base, 18754962) + gitlab_issues_url = intermediate_url + "page={}" + + + # Get issues that we already have stored + # Set pseudo key (something other than PK) to + # check dupicates with + table = 'issues' + table_pkey = 'issue_id' + update_col_map = {'issue_state': 'state'} + duplicate_col_map = {'gh_issue_id': 'id'} + + #list to hold issues needing insertion + issues = self.paginate(gitlab_issues_url, duplicate_col_map, update_col_map, table, table_pkey, + 'WHERE repo_id = {}'.format(repo_id), platform="gitlab") + + self.logger.info(issues) + self.logger.info("Count of issues needing update or insertion: " + str(len(issues)) + "\n") + for issue_dict in issues: + self.logger.info("Begin analyzing the issue with title: " + issue_dict['title'] + "\n") + pr_id = None + if "pull_request" in issue_dict: + self.logger.info("This is an MR\n") + # Right now we are just storing our issue id as the MR id if it is one + pr_id = self.issue_id_inc + else: + self.logger.info("Issue is not an MR\n") + + # Insert data into models + issue = { + "repo_id": issue_dict['project_id'], + "reporter_id": self.find_id_from_login(issue_dict['author']['username'], platform='gitlab'), + "pull_request": pr_id, + "pull_request_id": pr_id, + "created_at": issue_dict['created_at'], + "issue_title": issue_dict['title'], + "issue_body": issue_dict['description'] if 'description' in issue_dict else None, + "comment_count": issue_dict['user_notes_count'], + "updated_at": issue_dict['updated_at'], + "closed_at": issue_dict['closed_at'], + "repository_url": issue_dict['_links']['project'], + "issue_url": issue_dict['_links']['self'], + "labels_url": issue_dict['labels'], + "comments_url": issue_dict['_links']['notes'], + "events_url": None, + "html_url": issue_dict['_links']['self'], + "issue_state": issue_dict['state'], + "issue_node_id": None, + "gh_issue_id": issue_dict['id'], + "gh_issue_number": issue_dict['iid'], + "gh_user_id": issue_dict['author']['id'], + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + # Commit insertion to the issues table + if issue_dict['flag'] == 'need_update': + self.logger.info("UPDATE FLAG") + result = self.db.execute(self.issues_table.update().where( + self.issues_table.c.gh_issue_id==issue_dict['id']).values(issue)) + self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( + issue_dict['id'])) + self.issue_id_inc = issue_dict['pkey'] + elif issue_dict['flag'] == 'need_insertion': + self.logger.info("INSERT FLAG") + try: + result = self.db.execute(self.issues_table.insert().values(issue)) + self.logger.info("Primary key inserted into the issues table: " + str(result.inserted_primary_key)) + self.results_counter += 1 + self.issue_id_inc = int(result.inserted_primary_key[0]) + self.logger.info("Inserted issue with our issue_id being: {}".format(self.issue_id_inc) + + " and title of: {} and gh_issue_num of: {}\n".format(issue_dict['title'], issue_dict['iid'])) + except Exception as e: + self.logger.info("When inserting an issue, ran into the following error: {}\n".format(e)) + self.logger.info(issue) + # continue + + # issue_assigness + self.logger.info("assignees", issue_dict['assignees']) + collected_assignees = issue_dict['assignees'] + if issue_dict['assignee'] not in collected_assignees: + collected_assignees.append(issue_dict['assignee']) + if collected_assignees[0] is not None: + self.logger.info("Count of assignees to insert for this issue: " + str(len(collected_assignees)) + "\n") + for assignee_dict in collected_assignees: + if type(assignee_dict) != dict: + continue + assignee = { + "issue_id": self.issue_id_inc, + "cntrb_id": self.find_id_from_login(assignee_dict['username'], platform='gitlab'), + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source, + "issue_assignee_src_id": assignee_dict['id'], + "issue_assignee_src_node": None + } + self.logger.info("assignee info", assignee) + # Commit insertion to the assignee table + result = self.db.execute(self.issue_assignees_table.insert().values(assignee)) + self.logger.info("Primary key inserted to the issues_assignees table: " + str(result.inserted_primary_key)) + self.results_counter += 1 + + self.logger.info("Inserted assignee for issue id: " + str(self.issue_id_inc) + + " with login/cntrb_id: " + assignee_dict['username'] + " " + str(assignee['cntrb_id']) + "\n") + else: + self.logger.info("Issue does not have any assignees\n") + + # Register this task as completed. + # This is a method of the worker class that is required to be called upon completion + # of any data collection model, this lets the broker know that this worker is ready + # for another task + self.register_task_completion(task, repo_id, 'gitlab_issues') + diff --git a/workers/gitlab_issues_worker/runtime.py b/workers/gitlab_issues_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/gitlab_issues_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.gitlab_issues_worker.gitlab_issues_worker import GitLabIssuesWorker +from workers.util import WorkerGunicornApplication, create_server + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitLabIssuesWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) \ No newline at end of file diff --git a/workers/gitlab_issues_worker/setup.py b/workers/gitlab_issues_worker/setup.py new file mode 100644 --- /dev/null +++ b/workers/gitlab_issues_worker/setup.py @@ -0,0 +1,41 @@ +import io +import os +import re + +from setuptools import find_packages +from setuptools import setup + +def read(filename): + filename = os.path.join(os.path.dirname(__file__), filename) + text_type = type(u"") + with io.open(filename, mode="r", encoding='utf-8') as fd: + return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) + +setup( + name="gitlab_issues_worker", + version="0.0.0", + url="https://github.com/chaoss/augur", + license='MIT', + author="Augur Team", + author_email="", + description="Gitlab Worker", + packages=find_packages(exclude=('tests',)), + install_requires=[ + 'flask', + 'requests', + 'psycopg2-binary', + 'click' + ], + entry_points={ + 'console_scripts': [ + 'gitlab_issues_worker_start=workers.gitlab_issues_worker.runtime:main', + ], + }, + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + ] +) diff --git a/workers/insight_worker/insight_worker/__init__.py b/workers/insight_worker/__init__.py similarity index 100% rename from workers/insight_worker/insight_worker/__init__.py rename to workers/insight_worker/__init__.py diff --git a/workers/insight_worker/insight_worker/worker.py b/workers/insight_worker/insight_worker.py similarity index 79% rename from workers/insight_worker/insight_worker/worker.py rename to workers/insight_worker/insight_worker.py --- a/workers/insight_worker/insight_worker/worker.py +++ b/workers/insight_worker/insight_worker.py @@ -10,179 +10,55 @@ import scipy.stats import datetime from sklearn.ensemble import IsolationForest -from workers.standard_methods import * #init_oauths, get_table_values, get_max_id, register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process, paginate +from workers.worker_base import Worker import warnings warnings.filterwarnings('ignore') -class InsightWorker: +class InsightWorker(Worker): """ Worker that detects anomalies on a select few of our metrics task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self._task = task - self._child = None - self._queue = Queue() - self.db = None + def __init__(self, config={}): + + worker_type = "insight_worker" + + given = [['git_url']] + models = ['insights'] + + data_tables = ['chaoss_metric_status', 'repo_insights', 'repo_insights_records'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({ + 'api_host': self.augur_config.get_value('Server', 'host'), + 'api_port': self.augur_config.get_value('Server', 'port') + }) + + # These 3 are included in every tuple the worker inserts (data collection info) self.tool_source = 'Insight Worker' - self.tool_version = '0.0.3' # See __init__.py + self.tool_version = '1.0.0' self.data_source = 'Augur API' + self.refresh = True self.send_insights = True - self.finishing_task = False self.anomaly_days = self.config['anomaly_days'] self.training_days = self.config['training_days'] self.contamination = self.config['contamination'] self.confidence = self.config['confidence_interval'] / 100 self.metrics = self.config['metrics'] - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["insights"] - } - ], - "config": [self.config] - } - - self.results_counter = 0 - - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - # produce our own MetaData object - metadata = MetaData() - helper_metadata = MetaData() - - # we can reflect it ourselves from a database, using options - # such as 'only' to limit what tables we look at... - metadata.reflect(self.db, only=['chaoss_metric_status', 'repo_insights', 'repo_insights_records']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job']) - - # we can then produce a set of mappings from this MetaData. - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - # calling prepare() just sets up mapped classes and relationships. - Base.prepare() - HelperBase.prepare() - - # mapped classes are ready - self.chaoss_metric_status_table = Base.classes['chaoss_metric_status'].__table__ - self.repo_insights_table = Base.classes['repo_insights'].__table__ - self.repo_insights_records_table = Base.classes['repo_insights_records'].__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - 'database_connection_string': 'psql://{}:5432/augur'.format(self.config['broker_host']), - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - self._task = value - self.run() - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() # Get the task off our MP queue - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - # If task is not a valid job type - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query repo_id corresponding to repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught - # and worker can move onto the next task without stopping - try: - # Call method corresponding to model sent in task - if message['models'][0] == 'insights': - self.insights_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - def insights_model(self, entry_info, repo_id): logging.info("Discovering insights for task with entry info: {}\n".format(entry_info)) - record_model_process(self, repo_id, 'insights') """ Collect data """ base_url = 'http://{}:{}/api/unstable/repo-groups/9999/repos/{}/'.format( - self.config['broker_host'], self.config['broker_port'], repo_id) + self.config['api_host'], self.config['api_port'], repo_id) # Dataframe to hold all endpoint results # Subtract configurable amount of time @@ -218,7 +94,7 @@ def insights_model(self, entry_info, repo_id): # If none of the endpoints returned data if df.size == 0: logging.info("None of the provided endpoints provided data for this repository. Anomaly detection is 'done'.\n") - register_task_completion(self, entry_info, repo_id, "insights") + self.register_task_completion(entry_info, repo_id, "insights") return """ Deletion of old insights """ @@ -258,7 +134,7 @@ def insights_model(self, entry_info, repo_id): result = self.db.execute(delete_points_SQL, repo_id=repo_id, min_date=min_date) # get table values to check for dupes later on - insight_table_values = get_table_values(self, ['*'], ['repo_insights_records'], where_clause="WHERE repo_id = {}".format(repo_id)) + insight_table_values = self.get_table_values(['*'], ['repo_insights_records'], where_clause="WHERE repo_id = {}".format(repo_id)) to_model_columns = df.columns[0:len(self.metrics)+1] @@ -415,7 +291,7 @@ def classify_anomalies(df,metric): logging.info("error occurred while storing datapoint: {}\n".format(repr(e))) break - register_task_completion(self, entry_info, repo_id, "insights") + self.register_task_completion(entry_info, repo_id, "insights") def confidence_interval_insights(self, entry_info): """ Anomaly detection method based on confidence intervals @@ -423,7 +299,6 @@ def confidence_interval_insights(self, entry_info): # Update table of endpoints before we query them all logging.info("Discovering insights for task with entry info: {}".format(entry_info)) - record_model_process(self, repo_id, 'insights') # Set the endpoints we want to discover insights for endpoints = [{'cm_info': "issues-new"}, {'cm_info': "code-changes"}, {'cm_info': "code-changes-lines"}, @@ -445,10 +320,10 @@ def confidence_interval_insights(self, entry_info): # If we are discovering insights for a group vs repo, the base url will change if 'repo_group_id' in entry_info and 'repo_id' not in entry_info: base_url = 'http://{}:{}/api/unstable/repo-groups/{}/'.format( - self.config['broker_host'],self.config['broker_port'], entry_info['repo_group_id']) + self.config['api_host'],self.config['api_port'], entry_info['repo_group_id']) else: base_url = 'http://{}:{}/api/unstable/repo-groups/9999/repos/{}/'.format( - self.config['broker_host'],self.config['broker_port'], repo_id) + self.config['api_host'],self.config['api_port'], repo_id) # Hit and discover insights for every endpoint we care about for endpoint in endpoints: @@ -610,50 +485,6 @@ def is_unique_key(key): self.register_task_completion(entry_info, "insights") - def register_task_completion(self, entry_info, model): - # Task to send back to broker - task_completed = { - 'worker_id': self.config['id'], - 'job_type': entry_info['job_type'], - 'repo_id': repo_id, - 'git_url': entry_info['git_url'] - } - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.config['zombie_id'], - "timestamp": datetime.datetime.now(), - "status": "Success", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job completion for: " + str(task_completed) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where( - self.job_table.c.job_model==model).values(updated_job)) - logging.info("Update job process for model: " + model + "\n") - - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - - # Reset results counter for next task - self.results_counter = 0 - def send_insight(self, insight, units_from_mean): try: repoSQL = s.sql.text(""" @@ -821,9 +652,9 @@ def confidence_interval(self, data, timeperiod='week', confidence=.95): def update_metrics(self): logging.info("Preparing to update metrics ...\n\n" + "Hitting endpoint: http://{}:{}/api/unstable/metrics/status ...\n".format( - self.config['broker_host'],self.config['broker_port'])) + self.config['api_host'],self.config['api_port'])) r = requests.get(url='http://{}:{}/api/unstable/metrics/status'.format( - self.config['broker_host'],self.config['broker_port'])) + self.config['api_host'],self.config['api_port'])) data = r.json() active_metrics = [metric for metric in data if metric['backend_status'] == 'implemented'] diff --git a/workers/insight_worker/insight_worker/runtime.py b/workers/insight_worker/insight_worker/runtime.py deleted file mode 100644 --- a/workers/insight_worker/insight_worker/runtime.py +++ /dev/null @@ -1,110 +0,0 @@ -from flask import Flask, jsonify, request -from insight_worker.worker import InsightWorker -import click, os, json, logging, requests -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}\n".format(str(request.json))) - app.insight_worker.task = request.json - - #set task - return jsonify({"success": "sucess"}) - - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": app.insight_worker._queue, - "tasks": [{ - "given": list(app.insight_worker._queue) - }] - }) - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.insight_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51252, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'insight_worker', None, {}) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.insight_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "anomaly_days": worker_info['anomaly_days'] if 'anomaly_days' in worker_info else 2, - "training_days": worker_info['training_days'] if 'training_days' in worker_info else 365, - "confidence_interval": worker_info['confidence_interval'] if 'confidence_interval' in worker_info else .95, - "contamination": worker_info['contamination'] if 'contamination' in worker_info else 0.041, - 'metrics': worker_info['metrics'] if 'metrics' in worker_info else {"issues-new": "issues", - "code-changes": "commit_count", "code-changes-lines": "added", - "reviews": "pull_requests", "contributors-new": "new_contributors"} - } - - #create instance of the worker - app.insight_worker = InsightWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - print("Starting Flask App on host {} with port {} with pid: ".format(broker_host, worker_port) + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - print("Killing Flask App: {} and telling broker that this worker is disconnected.".format(str(os.getpid()))) - try: - logging.info("Sending disconnected message to broker... @ -> {} with info: {}\n".format('http://{}:{}/api/unstable/workers'.format( - config['broker_host'], config['broker_port']), config)) - requests.post('http://{}:{}/api/unstable/workers/remove'.format( - config['broker_host'], config['broker_port']), json=config) #hello message - except Exception as e: - logging.info("Ran into error: {}".format(e)) - logging.info("Broker's port is busy, worker will not be able to accept tasks, " - "please restart Augur if you want this worker to attempt connection again.") - diff --git a/workers/insight_worker/runtime.py b/workers/insight_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/insight_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.insight_worker.insight_worker import InsightWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = InsightWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/insight_worker/setup.py b/workers/insight_worker/setup.py --- a/workers/insight_worker/setup.py +++ b/workers/insight_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="insight_worker", - version="0.0.2", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -31,7 +31,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'insight_worker_start=insight_worker.runtime:main', + 'insight_worker_start=workers.insight_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/linux_badge_worker/__init__.py b/workers/linux_badge_worker/__init__.py new file mode 100644 diff --git a/workers/linux_badge_worker/linux_badge_worker.py b/workers/linux_badge_worker/linux_badge_worker.py new file mode 100644 --- /dev/null +++ b/workers/linux_badge_worker/linux_badge_worker.py @@ -0,0 +1,63 @@ +import os +from datetime import datetime +import logging +import requests +import json +from urllib.parse import quote +from multiprocessing import Process, Queue + +import pandas as pd +import sqlalchemy as s +from sqlalchemy.ext.automap import automap_base +from sqlalchemy import MetaData +from workers.worker_base import Worker + +class LinuxBadgeWorker(Worker): + """ Worker that collects repo badging data from CII + config: database credentials, broker information, and ID + """ + def __init__(self, config={}): + + worker_type = "linux_badge_worker" + + given = [['git_url']] + models = ['badges'] + + data_tables = ['repo_badging'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({"endpoint": "https://bestpractices.coreinfrastructure.org/projects.json?pq="}) + self.tool_source = 'Linux Badge Worker' + self.tool_version = '1.0.0' + self.data_source = 'CII Badging API' + + + def badges_model(self, entry_info, repo_id): + """ Data collection and storage method + Query the CII API and store the result in the DB for the badges model + """ + git_url = entry_info['given']['git_url'] + self.logger.info("Collecting data for {}".format(git_url)) + extension = quote(git_url[0:-4]) + + url = self.config['endpoint'] + extension + self.logger.info("Hitting CII endpoint: " + url + " ...") + data = requests.get(url=url).json() + + if data != []: + self.logger.info("Inserting badging data for " + git_url) + self.db.execute(self.repo_badging_table.insert()\ + .values(repo_id=repo_id, + data=data, + tool_source=self.tool_source, + tool_version=self.tool_version, + data_source=self.data_source)) + + self.results_counter += 1 + else: + self.logger.info("No CII data found for {}\n".format(git_url)) + + self.register_task_completion(entry_info, repo_id, "badges") diff --git a/workers/linux_badge_worker/linux_badge_worker/__init__.py b/workers/linux_badge_worker/linux_badge_worker/__init__.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""linux_badge_worker - Augur worker that collects CII badging data""" - -__tool_source__ = 'Linux Badge Worker' -__tool_version__ = '0.1.0' -__data_source__ = 'CII Badging API' - -__author__ = 'Augur Team <[email protected]>' -__all__ = [] - diff --git a/workers/linux_badge_worker/linux_badge_worker/runtime.py b/workers/linux_badge_worker/linux_badge_worker/runtime.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/runtime.py +++ /dev/null @@ -1,107 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from linux_badge_worker.worker import BadgeWorker -from workers.standard_methods import read_config - -def create_server(app): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.linux_badge_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.linux_badge_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51235, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'linux_badge_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.linux_badge_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json?pq=", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - app.linux_badge_worker = BadgeWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - - if app.linux_badge_worker._child is not None: - app.linux_badge_worker._child.terminate() - - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) diff --git a/workers/linux_badge_worker/linux_badge_worker/worker.py b/workers/linux_badge_worker/linux_badge_worker/worker.py deleted file mode 100644 --- a/workers/linux_badge_worker/linux_badge_worker/worker.py +++ /dev/null @@ -1,240 +0,0 @@ -import os -from datetime import datetime -import logging -import requests -import json -from urllib.parse import quote -from multiprocessing import Process, Queue - -from linux_badge_worker import __data_source__, __tool_source__, __tool_version__ -import pandas as pd -import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData -from workers.standard_methods import register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class BadgeWorker: - """ Worker that collects repo badging data from CII - config: database credentials, broker information, and ID - """ - def __init__(self, config, task=None): - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.config = config - - self.db = None - self.repo_badging_table = None - - self._task = task - self._queue = Queue() - self._child = None - - self.history_id = None - self.finishing_task = False - self.working_on = None - self.results_counter = 0 - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["badges"] - } - ], - "config": [self.config] - } - - self._db_str = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], - self.config['password'], - self.config['host'], - self.config['port'], - self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self._db_str, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self._db_str, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - logging.info("Database connection established...") - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['repo_badging']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - self.repo_badging_table = Base.classes.repo_badging.__table__ - logging.info("ORM setup complete...") - - # Organize different api keys/oauths available - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauth_sql = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(0)) - - for oauth in [{'oauth_id': 0, 'access_token': 0}] + json.loads(pd.read_sql(oauth_sql, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) \ - - datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - - # Send broker hello message - connect_to_broker(self) - logging.info("Connected to the broker...\n") - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "key": "", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['github_api_key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - - self._task = value - self.run() - - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def badges_model(self, entry_info, repo_id): - """ Data collection and storage method - Query the CII API and store the result in the DB for the badges model - """ - git_url = entry_info['given']['git_url'] - logging.info("Collecting data for {}".format(git_url)) - extension = quote(git_url[0:-4]) - - url = self.config['endpoint'] + extension - logging.info("Hitting CII endpoint: " + url + " ...") - data = requests.get(url=url).json() - - if data != []: - logging.info("Inserting badging data for " + git_url) - self.db.execute(self.repo_badging_table.insert()\ - .values(repo_id=repo_id, - data=data, - tool_source=__tool_source__, - tool_version=__tool_version__, - data_source=__data_source__)) - - self.results_counter += 1 - else: - logging.info("No CII data found for {}\n".format(git_url)) - - register_task_completion(self, entry_info, repo_id, "badges") - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'badges': - self.badges_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() diff --git a/workers/linux_badge_worker/runtime.py b/workers/linux_badge_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/linux_badge_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.linux_badge_worker.linux_badge_worker import LinuxBadgeWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = LinuxBadgeWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/linux_badge_worker/setup.py b/workers/linux_badge_worker/setup.py --- a/workers/linux_badge_worker/setup.py +++ b/workers/linux_badge_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="linux_badge_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'linux_badge_worker_start=linux_badge_worker.runtime:main', + 'linux_badge_worker_start=workers.linux_badge_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/metric_status_worker/metric_status_worker/__init__.py b/workers/metric_status_worker/metric_status_worker/__init__.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""augur_worker_github - Augur Worker that collects GitHub data""" - -__version__ = '0.1.0' -__author__ = 'Augur Team <[email protected]>' -__all__ = [] diff --git a/workers/metric_status_worker/metric_status_worker/runtime.py b/workers/metric_status_worker/metric_status_worker/runtime.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/runtime.py +++ /dev/null @@ -1,108 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, logging, requests, json -from metric_status_worker.worker import MetricStatusWorker -import os -import json -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': #will post a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.metric_status_worker.task = request.json - - #set task - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "success" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.metric_status_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51263, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'metric_status_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.metric_status_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - app.metric_status_worker = MetricStatusWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=port) - if app.metric_status_worker._child is not None: - app.metric_status_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/metric_status_worker/metric_status_worker/worker.py b/workers/metric_status_worker/metric_status_worker/worker.py deleted file mode 100644 --- a/workers/metric_status_worker/metric_status_worker/worker.py +++ /dev/null @@ -1,719 +0,0 @@ -import base64 -import logging -import os -import re -import sys -import json -import time -from abc import ABC -from datetime import datetime -from multiprocessing import Process, Queue -from urllib.parse import urlparse - -import pandas as pd -import requests -import sqlalchemy as s -from github import Github -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - - -class MetricStatusWorker: - def __init__(self, config, task=None): - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.config = config - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.db = None - self.table = None - self.API_KEY = self.config['key'] - self.tool_source = 'Metric Status Worker' - self.tool_version = '0.0.1' - self.data_source = 'GitHub API' - self.results_counter = 0 - self.working_on = None - - - # url = 'https://api.github.com' - # response = requests.get(url, headers=self.headers) - # self.rate_limit = int(response.headers['X-RateLimit-Remaining']) - - specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["chaoss_metric_status"] - } - ], - "config": [self.config] - } - - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] - ) - - logging.info("Making database connections...") - - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['chaoss_metric_status']) - # helper_metadata.reflect(self.helper_db) - - Base = automap_base(metadata=metadata) - - Base.prepare() - - self.chaoss_metric_status_table = Base.classes['chaoss_metric_status'].__table__ - - try: - requests.post('http://{}:{}/api/unstable/workers'.format( - self.config['broker_host'],self.config['broker_port']), json=specs) - except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker') - sys.exit('Cannot connect to the broker! Quitting...') - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced """ - return self._task - - @task.setter - def task(self, value): - try: - if value['job_type'] == 'UPDATE': - self._queue.put(CollectorTask('TASK', {})) - elif value['job_type'] == 'MAINTAIN': - self._maintain_queue.put(CollectorTask('TASK', {})) - - if 'focused_task' in value: - if value['focused_task'] == 1: - self.finishing_task = True - except Exception as e: - logging.error("Error: {},".format(str(e))) - - self._task = CollectorTask(message_type='TASK', entry_info={}) - self.run() - - def cancel(self): - """ Delete/cancel current task """ - self._task = None - - def run(self): - logging.info("Running...") - if self._child is None: - self._child = Process(target=self.collect, args=()) - self._child.start() - requests.post("http://{}:{}/api/unstable/add_pids".format( - self.config['broker_host'],self.config['broker_port']), json={'pids': [self._child.pid, os.getpid()]}) - - def collect(self): - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = 'UPDATE' - elif not self._maintain_queue.empty(): - message = self._maintain_queue.get() - logging.info("Popped off message: {}".format(str(message.entry_info))) - self.working_on = "MAINTAIN" - else: - break - - - if message.type == 'EXIT': - break - if message.type != 'TASK': - raise ValueError( - f'{message.type} is not a recognized task type') - - if message.type == 'TASK': - self.update_metrics(message.entry_info) - - def update_metrics(self, entry_info): - """ Data colletction function - Query the github api for metric status - """ - status = MetricsStatus(self.API_KEY) - status.create_metrics_status() - metrics = status.metrics_status - - # convert to dict - dict_metrics = [] - for metric in metrics: - metric_info = { - 'cm_group': metric['group'], - 'cm_source': metric['data_source'], - 'cm_type': metric['metric_type'], - 'cm_backend_status': metric['backend_status'], - 'cm_frontend_status': metric['frontend_status'], - 'cm_api_endpoint_repo': metric['endpoint_repo'], - 'cm_api_endpoint_rg': metric['endpoint_group'], - 'cm_defined': metric['is_defined'], - 'cm_name': metric['display_name'], - 'cm_working_group': metric['group'], - 'cm_info': metric['tag'], - 'cm_working_group_focus_area': metric['focus_area'], - 'tool_source': self.tool_source, - 'tool_version': self.tool_version, - 'data_source': self.data_source, - } - dict_metrics.append(metric_info) - - need_insertion = self.filter_duplicates({'cm_api_endpoint_repo': "cm_api_endpoint_repo", 'cm_backend_status':'cm_api_endpoint_rg'}, ['chaoss_metric_status'], - dict_metrics) - logging.info("Count of contributors needing insertion: " + str(len(need_insertion)) + "\n") - for metric in need_insertion: - result = self.db.execute(self.chaoss_metric_status_table.insert().values(metric)) - logging.info("Primary key inserted into the metrics table: " + str(result.inserted_primary_key)) - self.results_counter += 1 - - self.register_task_completion() - - - # def filter_duplicates(self, og_data): - # need_insertion = [] - # colSQL = s.sql.text(""" - # SELECT * FROM chaoss_metric_status - # """) - # values = pd.read_sql(colSQL, self.db) - # for obj in og_data: - # location = values.loc[ (values['cm_name']==obj['cm_name'] ) & ( values['cm_working_group']==obj[ - # 'cm_working_group']) & ()] - # if not location.empty: - # logging.info("value of tuple exists: " + str(obj['cm_name'])) - # else: - # need_insertion.append(obj) - # - # logging.info("While filtering duplicates, we reduced the data size from " + str(len(og_data)) + - # " to " + str(len(need_insertion)) + "\n") - # - # return need_insertion - - def filter_duplicates(self, cols, tables, og_data): - need_insertion = [] - - table_str = tables[0] - del tables[0] - for table in tables: - table_str += ", " + table - for col in cols.keys(): - colSQL = s.sql.text(""" - SELECT {} FROM {} - """.format(col, table_str)) - values = pd.read_sql(colSQL, self.db, params={}) - - for obj in og_data: - if values.isin([obj[cols[col]]]).any().any(): - logging.info("value of tuple exists: " + str(obj[cols[col]]) + "\n") - elif obj not in need_insertion: - need_insertion.append(obj) - logging.info("While filtering duplicates, we reduced the data size from " + str(len(og_data)) + - " to " + str(len(need_insertion)) + "\n") - return need_insertion - - def update_exist_metrics(self, metrics): - need_update = [] - need_insert = [] - - for metric in metrics: - result = self.db.execute(self.chaoss_metric_status_table.update().where((self.chaoss_metric_status_table.c.cm_name == metric['cm_name'])&(self.chaoss_metric_status_table.c.cm_group == metric['cm_group']) & ((self.chaoss_metric_status_table.c.cm_api_endpoint_repo != metric['cm_api_endpoint_repo']) | (self.chaoss_metric_status_table.c.cm_api_endpoint_rg != metric['cm_api_endpoint_rg'])|(self.chaoss_metric_status_table.c.cm_source != metric['cm_source'])) - ).values(metric)) - - if result.rowcount: - logging.info("Update Metric {}-{}".format(metric['cm_group'], metric['cm_name'])) - - def register_task_completion(self): - task_completed = { - 'worker_id': self.config['id'], - 'job_type': self.working_on, - } - - logging.info("Telling broker we completed task: " + str(task_completed) + "\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - self.results_counter = 0 - - - - -class FrontendStatusExtractor(object): - - def __init__(self): - pass - self.api_text = open(os.path.abspath(os.path.dirname(os.path.dirname(os.getcwd()))) + - "/frontend/src/AugurAPI.ts", 'r').read() - self.attributes = re.findall( - r'(?:(GitEndpoint|Endpoint|Timeseries|addRepoMetric|addRepoGroupMetric)\()\'(.*)\', \'(.*)\'', - self.api_text) - self.timeseries = [ - attribute for attribute in self.attributes if attribute[0] == "Timeseries"] - self.endpoints = [ - attribute for attribute in self.attributes if attribute[0] == "Endpoint"] - self.git_endpoints = [ - attribute for attribute in self.attributes if attribute[0] == "GitEndpoint"] - self.repo_metrics = [ - attribute for attribute in self.attributes if attribute[0] == 'addRepoMetric'] - self.group_metric = [ - attribute for attribute in self.attributes if attribute[0] == 'addRepoMetric'] - - def determine_frontend_status(self, metric): - metric.frontend_status = 'unimplemented' - attribute = None - - if metric.metric_type == "timeseries": - attribute = next((attribute for attribute in self.timeseries if - "/api/unstable/<owner>/<repo>/timeseries/{}".format(attribute[2]) == metric.endpoint_repo), - None) - - elif metric.metric_type == "metric": - attribute = next((attribute for attribute in self.endpoints if - "/api/unstable/<owner>/<repo>/{}".format(attribute[2]) == metric.endpoint_repo), None) - if not attribute: - attribute = next((attribute for attribute in self.repo_metrics if - "/api/unstable/repo-groups/<repo_group_id>/repos/<repo_id>/{}".format( - attribute[2]) == metric.endpoint_repo), None) - if not attribute and metric.endpoint_group: - attribute = next((attribute for attribute in self.repo_metrics if - "/api/unstable/repo-groups/<repo_group_id>/{}".format(attribute[2]) == metric.endpoint_group), None) - - elif metric.metric_type == "git": - attribute = next((attribute for attribute in self.git_endpoints if - "/api/unstable/git/{}".format(attribute[2]) == metric.endpoint_repo), None) - - if attribute is not None: - metric.frontend_status = 'implemented' - metric.chart_mapping = attribute[1] - else: - metric.frontend_status = 'unimplemented' - - -class Metric(ABC): - - def __init__(self): - self.ID = None - self.tag = None - self.display_name = None - self.group = None - self.backend_status = 'unimplemented' - self.frontend_status = 'unimplemented' - self.chart_mapping = None - self.data_source = None - self.metric_type = None - self.documentation_url = None - self.is_defined = False - self.focus_area = None - self.endpoint_group = None - self.endpoint_repo = None - - -class GroupedMetric(Metric): - - def __init__(self, display_name, group, tag, focus_area): - Metric.__init__(self) - self.display_name = display_name - self.tag = tag - self.ID = re.sub(r'-$|\*', '', 'none' + '-' + self.tag) - self.group = group - self.focus_area = focus_area - - -class ImplementedMetric(Metric): - - def __init__(self, metadata, frontend_status_extractor): - Metric.__init__(self) - - self.ID = metadata['ID'] - self.tag = metadata['tag'] - self.display_name = metadata['metric_name'] - self.backend_status = 'implemented' - self.data_source = metadata['source'] - self.group = "experimental" - self.endpoint_group = None - self.endpoint_repo = None - - - if 'metric_type' in metadata: - self.metric_type = metadata['metric_type'] - else: - self.metric_type = 'metric' - - if 'endpoint' in metadata: - if 'group_endpoint' in metadata: - self.endpoint_group = metadata['group_endpoint'] - if 'repo_endpoint' in metadata: - self.endpoint_repo = metadata['repo_endpoint'] - else: - self.endpoint_repo = metadata['endpoint'] - frontend_status_extractor.determine_frontend_status(self) - - -class MetricsStatus(object): - wg_evolution = { - "repo": "chaoss/wg-evolution", - "focus_area": "focus_areas", - "name": 'evolution' - } - - wg_diversity_inclusion = { - "repo": "chaoss/wg-diversity-inclusion", - "focus_area": "focus-areas", - "name": "diversity-inclusion" - } - - wg_value = { - "repo": "chaoss/wg-value", - "focus_area": 'focus-areas', - "name": "value" - } - - wg_common = { - "repo": "chaoss/wg-common", - "focus_area": "focus-areas", - "name": "common" - } - - wg_risk = { - "repo": "chaoss/wg-risk", - "focus_area": "focus-areas", - "name": "risk" - } - - def __init__(self, githubapi): - self.__githubapi = githubapi - self.github = Github(self.__githubapi) - - # TODO: don't hardcode this - self.groups = { - "evolution": "Evolution", - "diversity-inclusion": "Diversity and Inclusion metrics", - "value": "Value", - "risk": "Risk", - "common": "Common", - "experimental": "Experimental", - "all": "All" - } - - self.implemented_metrics = [] - - self.evo_metrics = [] - self.di_metrics = [] - self.risk_metrics = [] - self.value_metrics = [] - self.common_metrics = [] - self.experimental_metrics = [] - - self.metrics_by_group = [] - - self.metrics_status = [] - - self.data_sources = [] - self.metric_types = [] - self.tags = {} - self.metadata = [] - - def create_metrics_status(self): - - self.build_implemented_metrics() - - self.evo_metrics = self.create_grouped_metrics( - self.wg_evolution, "evolution") - self.risk_metrics = self.create_grouped_metrics(self.wg_risk, "risk") - self.common_metrics = self.create_grouped_metrics( - self.wg_common, 'common') - self.di_metrics = self.create_grouped_metrics( - self.wg_diversity_inclusion, 'diversity-inclusion') - self.value_metrics = self.create_grouped_metrics( - self.wg_value, 'value') - - self.metrics_by_group = [self.evo_metrics, self.risk_metrics, - self.common_metrics, self.di_metrics, self.value_metrics] - - self.create_experimental_metrics() - self.metrics_by_group.append(self.experimental_metrics) - # - self.copy_implemented_metrics() - - self.find_defined_metrics() - - self.build_metrics_status() - - # self.build_metadata() - - def build_implemented_metrics(self): - frontend_status_extractor = FrontendStatusExtractor() - - r = requests.get( - url='http://{}:{}/api/unstable/batch/metadata'.format( - self.config['broker_host'],self.config['broker_port'])) - data = json.loads(r.text) - - for metric in data: - if "ID" in metric.keys(): - self.implemented_metrics.append( - ImplementedMetric(metric, frontend_status_extractor)) - - def create_grouped_metrics(self, group, group_name): - metrics = self.find_metrics_from_focus_area( - group['repo'], group['focus_area']) - - remote_metrics = [] - for metric in metrics: - remote_metrics.append(GroupedMetric(metric.display_name, group['name'], metric.tag, - metric.focus_area)) - - return remote_metrics - - def find_metrics_from_focus_area(self, repo_name, focus_area_path): - focus_areas = self.github.get_repo( - repo_name).get_dir_contents(focus_area_path) - metrics = [] - for area in focus_areas: - # get focus area name from filename - # focus_area_name = re.sub('.md','',re.sub('-', ' ',area.name)) - focus_area_name = None - focus_area_name_splited = [a.capitalize() for a in re.sub( - '.md', '', re.sub('[_]|[-]', ' ', area.name)).split()] - focus_area_name = ' '.join(focus_area_name_splited) - - # extract structure :focus_area_name/readme.md - if area.type == 'dir': - tmp = self.github.get_repo( - repo_name).get_dir_contents(area.path) - readme = [a for a in tmp if 'readme' in a.name.lower()] - if len(readme) == 0: - continue - else: - area = readme[0] - elif 'readme' in area.name.lower() or 'changelog' in area.name.lower(): - continue - - # decode content; github api return encoded content - decoded_content = base64.b64decode(area.content).decode('utf-8') - metric_name_tag = self.parse_table( - decoded_content) or self.parse_list(decoded_content) - - for name, tag in metric_name_tag.items(): - add_metric = Metric() - add_metric.display_name = name - add_metric.tag = tag - add_metric.focus_area = focus_area_name - - metrics.append(add_metric) - - if metric_name_tag is None: - continue - - return metrics - - def parse_table(self, md_content): - # group 0 is header, group 2 is |---|--|, and group 3 is table content - tables = re.findall( - r'^(\|?[^\n]+\|[^\n]+\|?\r?\n)((?:\|?\s*:?[-]+\s*:?)+\|?)(\n(?:\|?[^\n]+\|[^\n]+\|?\r?\n?)*)?$', md_content, - re.MULTILINE) - - if not tables: - return None - - box = [] - metrics_name_tag = {} - for table in tables: - # get metric name by 'metric_name' index in column - metric_index, length_in_row = self.get_metric_index_in_table_row( - table[0]) - table_content = [x.strip() - for x in table[2].replace('\n', '|').split('|')] - # remove two empty str - table_content.pop(0) - table_content.pop() - - raw_metrics = [table_content[a] for a in range( - metric_index, len(table_content), length_in_row)] - - for raw_metric in raw_metrics: - metric_name, metric_link = self.is_has_link( - raw_metric, md_content) - metric_name = re.sub('[\[]|[\]]', '', metric_name) - if not metric_link: - metric_link = re.sub(' ', '-', metric_name).lower() - metrics_name_tag[metric_name] = self.link_to_tag( - metric_name, str(metric_link)) - - return metrics_name_tag - - def get_metric_index_in_table_row(self, row): - header_names = [x.strip().lower() for x in row.split('|')] - # print(header_names) - index = None - if 'metric' in header_names: - index = header_names.index('metric') - elif 'name' in header_names: - index = header_names.index('name') - - return index, len(header_names) - - def parse_list(self, md_content): - matched_lists = re.findall(r'[-]\s+(.+)\n', md_content) - metric_names = {} - # print(matched_lists) - for matched in matched_lists: - # print(matched) - metirc_name = re.sub(r'.+:\s', '', matched) - metirc_name, metric_link = self.is_has_link( - metirc_name, md_content) - metirc_name = re.sub('[\[]|[\]]', '', metirc_name) - metric_names[metirc_name] = self.link_to_tag( - metirc_name, metric_link) - return metric_names - - def is_has_link(self, s, md_content): - # remove leading whitespace if exist - s = s.strip() - pattern_inline = re.compile(r'\[([^\[\]]+)\]\(([^)]+)') - match = pattern_inline.match(s) - - if match: - return match.group(1), match.group(2) - - pattern_ref = re.compile(r'\[([^\[\]]+)\]\[([^]]+)') - match2 = pattern_ref.match(s) - - if match2: - link = match2.group(2) - p = re.compile(r'\n\[' + link + r'\]:\s+(.+)\n') - res = p.search(md_content, re.DOTALL) - if res: - return match2.group(1), res.group(1) - else: - return s, None - - def link_to_tag(self, name, s): - - # generate tag if undefined metric - if not s: - return re.sub(' ', '-', name.lower()) - - pattern = re.compile(r'\/?([a-zA-Z_-]+)(\.md)?$') - m = pattern.search(s) - if m: - return re.sub('_', '-', re.sub('.md', '', m.group(1).lower())) - else: - return re.sub(' ', '-', re.sub('\(s\)', 's', name)) - - def create_experimental_metrics(self): - tags = [] - for group in self.metrics_by_group: - for metric in group: - tags.append(metric.tag) - - self.experimental_metrics = [ - metric for metric in self.implemented_metrics if metric.tag not in tags] - - def copy_implemented_metrics(self): - # takes implemented metrics and copies their data to the appropriate metric object - # I am so very sorry - # TODO: burn this into the ground - for group in enumerate(self.metrics_by_group): - if group[1] is not self.experimental_metrics: - for grouped_metric in group[1]: - defined_implemented_metrics = [ - metric for metric in self.implemented_metrics if grouped_metric.tag == metric.tag] - if defined_implemented_metrics != []: - for metric in defined_implemented_metrics: - metric.group = group[1][0].group - metric.focus_area = grouped_metric.focus_area - group[1].append(metric) - self.implemented_metrics.remove(metric) - grouped_metric.ID = 'n/a' - self.metrics_by_group[group[0]] = [ - metric for metric in group[1] if metric.ID != 'n/a'] - - def find_defined_metrics(self): - # return map {tag: html_url} - repo_names = [self.wg_common['repo'], self.wg_evolution['repo'], - self.wg_diversity_inclusion['repo'], self.wg_risk['repo'], self.wg_value['repo']] - - md_files = {} - - for repo_name in repo_names: - repo = self.github.get_repo(repo_name) - contents = repo.get_contents("") - - while len(contents) > 1: - file_content = contents.pop(0) - if file_content.type == "dir": - contents.extend(repo.get_contents(file_content.path)) - elif '.md' in file_content.name: - name = re.sub( - '_', '-', re.sub('.md', '', file_content.name)) - md_files[name.lower()] = file_content.html_url - - for group in self.metrics_by_group: - for metric in group: - if metric.tag in md_files.keys(): - metric.is_defined = True - metric.documentation_url = md_files[metric.tag] - - def build_metrics_status(self): - for group in self.metrics_by_group: - for metric in group: - self.metrics_status.append(metric.__dict__) - - def build_metadata(self): - self.get_metric_sources() - self.get_metric_types() - self.get_metric_tags() - - self.metadata = { - "remotes": { - "diversity_inclusion_urls": self.diversity_inclusion_urls, - "growth_maturity_decline_urls": self.growth_maturity_decline_urls, - "risk_urls": self.risk_urls, - "value_urls": self.value_urls, - "activity_repo_urls": self.activity_urls - }, - "groups": self.groups, - "data_sources": self.data_sources, - "metric_types": self.metric_types, - "tags": self.tags - } - - def get_metric_sources(self): - for data_source in [metric['data_source'] for metric in self.metrics_status]: - data_source = data_source.lower() - if data_source not in self.data_sources and data_source != "none": - self.data_sources.append(data_source) - self.data_sources.append("all") - - def get_metric_types(self): - for metric_type in [metric['metric_type'] for metric in self.metrics_status]: - metric_type = metric_type.lower() - if metric_type not in self.metric_types and metric_type != "none": - self.metric_types.append(metric_type) - self.metric_types.append("all") - - def get_metric_tags(self): - for tag in [(metric['tag'], metric['group']) for metric in self.metrics_status]: - # tag[0] = tag[0].lower() - if tag[0] not in [tag[0] for tag in self.tags] and tag[0] != "none": - self.tags[tag[0]] = tag[1] \ No newline at end of file diff --git a/workers/pull_request_worker/pull_request_worker/__init__.py b/workers/pull_request_worker/__init__.py similarity index 100% rename from workers/pull_request_worker/pull_request_worker/__init__.py rename to workers/pull_request_worker/__init__.py diff --git a/workers/pull_request_worker/pull_request_worker/worker.py b/workers/pull_request_worker/pull_request_worker.py similarity index 61% rename from workers/pull_request_worker/pull_request_worker/worker.py rename to workers/pull_request_worker/pull_request_worker.py --- a/workers/pull_request_worker/pull_request_worker/worker.py +++ b/workers/pull_request_worker/pull_request_worker.py @@ -1,225 +1,42 @@ import ast, json, logging, os, sys, time, traceback, requests from datetime import datetime from multiprocessing import Process, Queue -from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base -from workers.standard_methods import * from sqlalchemy.sql.expression import bindparam +from workers.worker_base import Worker -class GHPullRequestWorker: +class GitHubPullRequestWorker(Worker): """ Worker that collects Pull Request related data from the Github API and stores it in our database. :param task: most recent task the broker added to the worker's queue :param config: holds info like api keys, descriptions, and database connection strings """ - def __init__(self, config, task=None): - self._task = task - self._child = None - self._queue = Queue() - self._maintain_queue = Queue() - self.working_on = None - self.config = config - LOG_FORMAT = '%(levelname)s:[%(name)s]: %(message)s' - logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO, format=LOG_FORMAT) - logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid()))) - self.db = None - self.table = None - self.API_KEY = self.config['key'] - self.tool_source = 'GitHub Pull Request Worker' - self.tool_version = '0.0.1' # See __init__.py - self.data_source = 'GitHub API' - self.results_counter = 0 - self.headers = {'Authorization': f'token {self.API_KEY}'} - self.history_id = None - self.finishing_task = True - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [['github_url']], - "models":['pull_requests', 'pull_request_commits', 'pull_request_files'] - } - ], - "config": [self.config] - } + def __init__(self, config={}): - self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], self.config['password'], self.config['host'], - self.config['port'], self.config['database'] - ) + worker_type = "pull_request_worker" - #Database connections - logging.info("Making database connections...\n") - dbschema = 'augur_data' - self.db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) + # Define what this worker can be given and know how to interpret + given = [['github_url']] + models = ['pull_requests', 'pull_request_commits', 'pull_request_files'] - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['contributors', 'pull_requests', + # Define the tables needed to insert, update, or delete on + data_tables = ['contributors', 'pull_requests', 'pull_request_assignees', 'pull_request_events', 'pull_request_labels', 'pull_request_message_ref', 'pull_request_meta', 'pull_request_repo', 'pull_request_reviewers', 'pull_request_teams', 'message', 'pull_request_commits', - 'pull_request_files']) - - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.contributors_table = Base.classes.contributors.__table__ - self.pull_requests_table = Base.classes.pull_requests.__table__ - self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__ - self.pull_request_events_table = Base.classes.pull_request_events.__table__ - self.pull_request_labels_table = Base.classes.pull_request_labels.__table__ - self.pull_request_message_ref_table = Base.classes.pull_request_message_ref.__table__ - self.pull_request_meta_table = Base.classes.pull_request_meta.__table__ - self.pull_request_repo_table = Base.classes.pull_request_repo.__table__ - self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__ - self.pull_request_teams_table = Base.classes.pull_request_teams.__table__ - self.message_table = Base.classes.message.__table__ - self.pull_request_commits_table = Base.classes.pull_request_commits.__table__ - self.pull_request_files_table = Base.classes.pull_request_files.__table__ - - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - logging.info("Querying starting ids info...\n") - - # Increment so we are ready to insert the 'next one' of each of these most recent ids - self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1 - self.pr_id_inc = get_max_id(self, 'pull_requests', 'pull_request_id') - self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id') - self.msg_id_inc = get_max_id(self, 'message', 'msg_id') - self.pr_msg_ref_id_inc = get_max_id(self, 'pull_request_message_ref', 'pr_msg_ref_id') - self.label_id_inc = get_max_id(self, 'pull_request_labels', 'pr_label_id') - self.event_id_inc = get_max_id(self, 'pull_request_events', 'pr_event_id') - self.reviewer_id_inc = get_max_id(self, 'pull_request_reviewers', 'pr_reviewer_map_id') - self.assignee_id_inc = get_max_id(self, 'pull_request_assignees', 'pr_assignee_map_id') - self.pr_meta_id_inc = get_max_id(self, 'pull_request_meta', 'pr_repo_meta_id') - - # Organize different api keys/oauths available - init_oauths(self) - - # Send broker hello message - connect_to_broker(self) - - # self.pull_requests_graphql({ - # 'job_type': 'MAINTAIN', - # 'models': ['pull_request_files'], - # 'display_name': 'pull_request_files model for url: https://github.com/zephyrproject-rtos/actions_sandbox.git', - # 'given': { - # 'github_url': 'https://github.com/zephyrproject-rtos/actions_sandbox.git' - # } - # }, 25201) - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - github_url = value['given']['github_url'] - - repo_url_SQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(github_url)) - rs = pd.read_sql(repo_url_SQL, self.db, params={}) - - try: - repo_id = int(rs.iloc[0]['repo_id']) - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - if 'focused_task' in value: - if value['focused_task'] == 1: - self.finishing_task = True - - except Exception as e: - logging.error(f"error: {e}, or that repo is not in our database: {value}\n") - - self._task = value - self.run() + 'pull_request_files'] + operations_tables = ['worker_history', 'worker_job'] - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - # Query all repos with repo url of given task - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['github_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'pull_requests': - self.pull_requests_model(message, repo_id) - elif message['models'][0] == 'pull_request_commits': - self.pull_request_commits_model(message, repo_id) - elif message['models'][0] == 'pull_request_files': - self.pull_requests_graphql(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + # Define data collection info + self.tool_source = 'GitHub Pull Request Worker' + self.tool_version = '1.0.0' + self.data_source = 'GitHub API' + def graphql_paginate(self, query, data_subjects, before_parameters=None): """ Paginate a GitHub GraphQL query backwards @@ -227,7 +44,7 @@ def graphql_paginate(self, query, data_subjects, before_parameters=None): :rtype: A Pandas DataFrame, contains all data contained in the pages """ - logging.info(f'Start paginate with params: \n{data_subjects} ' + self.logger.info(f'Start paginate with params: \n{data_subjects} ' f'\n{before_parameters}') def all_items(dictionary): @@ -249,6 +66,7 @@ def all_items(dictionary): tuples = [] def find_root_of_subject(data, key_subject): + self.logger.info(f'Finding {key_subject} root of {data}') key_nest = None for subject, nest in data.items(): if key_subject in nest: @@ -262,7 +80,7 @@ def find_root_of_subject(data, key_subject): for data_subject, nest in data_subjects.items(): - logging.info(f'Beginning paginate process for field {data_subject} ' + self.logger.info(f'Beginning paginate process for field {data_subject} ' f'for query: {query}') page_count = 0 @@ -274,13 +92,13 @@ def find_root_of_subject(data, key_subject): success = False for attempt in range(num_attempts): - logging.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint ' + self.logger.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint ' f'page number {page_count}\n') response = requests.post(base_url, json={'query': query.format( **before_parameters)}, headers=self.headers) - update_gh_rate_limit(self, response) + self.update_gh_rate_limit(response) try: data = response.json() @@ -288,9 +106,9 @@ def find_root_of_subject(data, key_subject): data = json.loads(json.dumps(response.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) + self.logger.info("Error!: {}".format(data['errors'])) if data['errors'][0]['type'] == 'RATE_LIMITED': - update_gh_rate_limit(self, response) + self.update_gh_rate_limit(response) num_attempts -= 1 continue @@ -302,18 +120,18 @@ def find_root_of_subject(data, key_subject): data = root['edges'] break else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url)) break if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': num_attempts -= 1 - update_gh_rate_limit(self, response, temporarily_disable=True) + self.update_gh_rate_limit(response, temporarily_disable=True) if data['message'] == 'Bad credentials': - update_gh_rate_limit(self, response, bad_credentials=True) + self.update_gh_rate_limit(response, bad_credentials=True) if not success: - logging.info('GraphQL query failed: {}'.format(query)) + self.logger.info('GraphQL query failed: {}'.format(query)) continue before_parameters.update({ @@ -323,7 +141,7 @@ def find_root_of_subject(data, key_subject): tuples += data - logging.info(f'Paged through {page_count} pages and ' + self.logger.info(f'Paged through {page_count} pages and ' f'collected {len(tuples)} data points\n') if not nest: @@ -333,9 +151,9 @@ def find_root_of_subject(data, key_subject): before_parameters=before_parameters) - def pull_requests_graphql(self, task_info, repo_id): + def pull_request_files_model(self, task_info, repo_id): - owner, repo = get_owner_repo(task_info['given']['github_url']) + owner, repo = self.get_owner_repo(task_info['given']['github_url']) # query existing PRs and the respective url we will append the commits url to pr_number_sql = s.sql.text(""" @@ -349,7 +167,7 @@ def pull_requests_graphql(self, task_info, repo_id): for index, pull_request in enumerate(pr_numbers.itertuples()): - logging.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}') + self.logger.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}') query = """ {{ @@ -394,26 +212,24 @@ def pull_requests_graphql(self, task_info, repo_id): WHERE pull_request_files.pull_request_id = pull_requests.pull_request_id AND repo_id = :repo_id """) - logging.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n') + self.logger.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n') table_values = pd.read_sql(table_values_sql, self.db, params={'repo_id': repo_id}) # Compare queried values against table values for dupes/updates if len(pr_file_rows) > 0: table_columns = pr_file_rows[0].keys() else: - logging.info(f'No rows need insertion for repo {repo_id}\n') - register_task_completion(self, task_info, repo_id, 'pull_request_files') + self.logger.info(f'No rows need insertion for repo {repo_id}\n') + self.register_task_completion(task_info, repo_id, 'pull_request_files') + return # Compare queried values against table values for dupes/updates pr_file_rows_df = pd.DataFrame(pr_file_rows) pr_file_rows_df = pr_file_rows_df.dropna(subset=['pull_request_id']) - pr_file_rows_df['need_update'] = 0 dupe_columns = ['pull_request_id', 'pr_file_path'] update_columns = ['pr_file_additions', 'pr_file_deletions'] - logging.info(f'{pr_file_rows_df}') - logging.info(f'{table_values}') need_insertion = pr_file_rows_df.merge(table_values, suffixes=('','_table'), how='outer', indicator=True, on=dupe_columns).loc[ lambda x : x['_merge']=='left_only'][table_columns] @@ -423,14 +239,13 @@ def pull_requests_graphql(self, task_info, repo_id): on=update_columns, suffixes=('','_table'), how='outer',indicator=True ).loc[lambda x : x['_merge']=='left_only'][table_columns] - need_updates['b_pull_request_id'] = need_updates['pull_request_id'] need_updates['b_pr_file_path'] = need_updates['pr_file_path'] pr_file_insert_rows = need_insertion.to_dict('records') pr_file_update_rows = need_updates.to_dict('records') - logging.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and ' + self.logger.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and ' f'{len(need_updates)} updates.\n') if len(pr_file_update_rows) > 0: @@ -447,7 +262,7 @@ def pull_requests_graphql(self, task_info, repo_id): ) success = True except Exception as e: - logging.info('error: {}'.format(e)) + self.logger.info('error: {}'.format(e)) time.sleep(5) if len(pr_file_insert_rows) > 0: @@ -460,14 +275,22 @@ def pull_requests_graphql(self, task_info, repo_id): ) success = True except Exception as e: - logging.info('error: {}'.format(e)) + self.logger.info('error: {}'.format(e)) time.sleep(5) - register_task_completion(self, task_info, repo_id, 'pull_request_files') + self.register_task_completion(task_info, repo_id, 'pull_request_files') def pull_request_commits_model(self, task_info, repo_id): """ Queries the commits related to each pull request already inserted in the db """ + self.logger.info("Querying starting ids info...\n") + + # Increment so we are ready to insert the 'next one' of each of these most recent ids + self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 + self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id') + self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id') + + # query existing PRs and the respective url we will append the commits url to pr_url_sql = s.sql.text(""" SELECT DISTINCT pr_url, pull_requests.pull_request_id @@ -484,7 +307,7 @@ def pull_request_commits_model(self, task_info, repo_id): update_col_map = {} # Use helper paginate function to iterate the commits url and check for dupes - pr_commits = paginate(self, commits_url, duplicate_col_map, update_col_map, table, table_pkey, + pr_commits = self.paginate(commits_url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="where pull_request_id = {}".format(pull_request.pull_request_id)) for pr_commit in pr_commits: # post-pagination, iterate results @@ -500,9 +323,9 @@ def pull_request_commits_model(self, task_info, repo_id): 'data_source': 'GitHub API', } result = self.db.execute(self.pull_request_commits_table.insert().values(pr_commit_row)) - logging.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n") + self.logger.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n") - register_task_completion(self, task_info, repo_id, 'pull_request_commits') + self.register_task_completion(task_info, repo_id, 'pull_request_commits') def pull_requests_model(self, entry_info, repo_id): """Pull Request data collection function. Query GitHub API for PhubRs. @@ -510,11 +333,18 @@ def pull_requests_model(self, entry_info, repo_id): :param entry_info: A dictionary consisiting of 'git_url' and 'repo_id' :type entry_info: dict """ + + self.logger.info("Querying starting ids info...\n") + + # Increment so we are ready to insert the 'next one' of each of these most recent ids + self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 + self.pr_id_inc = self.get_max_id('pull_requests', 'pull_request_id') + self.pr_meta_id_inc = self.get_max_id('pull_request_meta', 'pr_repo_meta_id') + github_url = entry_info['given']['github_url'] - logging.info('Beginning collection of Pull Requests...\n') - logging.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n') - record_model_process(self, repo_id, 'pull_requests') + self.logger.info('Beginning collection of Pull Requests...\n') + self.logger.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n') owner, repo = self.get_owner_repo(github_url) @@ -530,12 +360,12 @@ def pull_requests_model(self, entry_info, repo_id): duplicate_col_map = {'pr_src_id': 'id'} #list to hold pull requests needing insertion - prs = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, + prs = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey, where_clause='WHERE repo_id = {}'.format(repo_id), value_update_col_map={'pr_augur_contributor_id': float('nan')}) # Discover and remove duplicates before we start inserting - logging.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n") + self.logger.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n") for pr_dict in prs: @@ -553,7 +383,7 @@ def pull_requests_model(self, entry_info, repo_id): 'pr_src_state': pr_dict['state'], 'pr_src_locked': pr_dict['locked'], 'pr_src_title': pr_dict['title'], - 'pr_augur_contributor_id': find_id_from_login(self, pr_dict['user']['login']), + 'pr_augur_contributor_id': self.find_id_from_login(pr_dict['user']['login']), 'pr_body': pr_dict['body'], 'pr_created_at': pr_dict['created_at'], 'pr_updated_at': pr_dict['updated_at'], @@ -581,21 +411,21 @@ def pull_requests_model(self, entry_info, repo_id): } if pr_dict['flag'] == 'need_insertion': - logging.info(f'PR {pr_dict["id"]} needs to be inserted\n') + self.logger.info(f'PR {pr_dict["id"]} needs to be inserted\n') result = self.db.execute(self.pull_requests_table.insert().values(pr)) - logging.info(f"Added Pull Request: {result.inserted_primary_key}") + self.logger.info(f"Added Pull Request: {result.inserted_primary_key}") self.pr_id_inc = int(result.inserted_primary_key[0]) elif pr_dict['flag'] == 'need_update': result = self.db.execute(self.pull_requests_table.update().where( self.pull_requests_table.c.pr_src_id==pr_dict['id']).values(pr)) - logging.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format( + self.logger.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format( pr_dict['id'])) self.pr_id_inc = pr_dict['pkey'] else: - logging.info("PR does not need to be inserted. Fetching its id from DB") + self.logger.info("PR does not need to be inserted. Fetching its id from DB") pr_id_sql = s.sql.text(""" SELECT pull_request_id FROM pull_requests WHERE pr_src_id={} @@ -609,16 +439,16 @@ def pull_requests_model(self, entry_info, repo_id): self.query_reviewers(pr_dict['requested_reviewers'], self.pr_id_inc) self.query_pr_meta(pr_dict['head'], pr_dict['base'], self.pr_id_inc) - logging.info(f"Inserted PR data for {owner}/{repo}") + self.logger.info(f"Inserted PR data for {owner}/{repo}") self.results_counter += 1 - register_task_completion(self, entry_info, repo_id, 'pull_requests') + self.register_task_completion(entry_info, repo_id, 'pull_requests') def query_labels(self, labels, pr_id): - logging.info('Querying PR Labels\n') + self.logger.info('Querying PR Labels\n') if len(labels) == 0: - logging.info('No new labels to add\n') + self.logger.info('No new labels to add\n') return table = 'pull_request_labels' @@ -629,12 +459,12 @@ def query_labels(self, labels, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - pr_labels_table_values = get_table_values(self, cols_query, [table]) + pr_labels_table_values = self.get_table_values(cols_query, [table]) - new_labels = assign_tuple_action(self, labels, pr_labels_table_values, update_col_map, duplicate_col_map, + new_labels = self.assign_tuple_action(labels, pr_labels_table_values, update_col_map, duplicate_col_map, table_pkey) - logging.info(f'Found {len(new_labels)} labels\n') + self.logger.info(f'Found {len(new_labels)} labels\n') for label_dict in new_labels: @@ -653,14 +483,13 @@ def query_labels(self, labels, pr_id): if label_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_labels_table.insert().values(label)) - logging.info(f"Added PR Label: {result.inserted_primary_key}\n") - logging.info(f"Inserted PR Labels data for PR with id {pr_id}\n") + self.logger.info(f"Added PR Label: {result.inserted_primary_key}\n") + self.logger.info(f"Inserted PR Labels data for PR with id {pr_id}\n") self.results_counter += 1 - self.label_id_inc = int(result.inserted_primary_key[0]) def query_pr_events(self, owner, repo, gh_pr_no, pr_id): - logging.info('Querying PR Events\n') + self.logger.info('Querying PR Events\n') url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' + '/events?per_page=100&page={}') @@ -674,14 +503,14 @@ def query_pr_events(self, owner, repo, gh_pr_no, pr_id): duplicate_col_map = {'issue_event_src_id': 'id'} #list to hold contributors needing insertion or update - pr_events = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey) + pr_events = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n") + self.logger.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n") for pr_event_dict in pr_events: if pr_event_dict['actor']: - cntrb_id = find_id_from_login(self, pr_event_dict['actor']['login']) + cntrb_id = self.find_id_from_login(pr_event_dict['actor']['login']) else: cntrb_id = 1 @@ -700,18 +529,17 @@ def query_pr_events(self, owner, repo, gh_pr_no, pr_id): } result = self.db.execute(self.pull_request_events_table.insert().values(pr_event)) - logging.info(f"Added PR Event: {result.inserted_primary_key}\n") + self.logger.info(f"Added PR Event: {result.inserted_primary_key}\n") self.results_counter += 1 - self.event_id_inc = int(result.inserted_primary_key[0]) - logging.info(f"Inserted PR Events data for PR with id {pr_id}\n") + self.logger.info(f"Inserted PR Events data for PR with id {pr_id}\n") def query_reviewers(self, reviewers, pr_id): - logging.info('Querying Reviewers') + self.logger.info('Querying Reviewers') if reviewers is None or len(reviewers) == 0: - logging.info('No reviewers to add') + self.logger.info('No reviewers to add') return table = 'pull_request_reviewers' @@ -722,15 +550,15 @@ def query_reviewers(self, reviewers, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - reviewers_table_values = get_table_values(self, cols_query, [table]) + reviewers_table_values = self.get_table_values(cols_query, [table]) - new_reviewers = assign_tuple_action(self, reviewers, reviewers_table_values, update_col_map, duplicate_col_map, + new_reviewers = self.assign_tuple_action(reviewers, reviewers_table_values, update_col_map, duplicate_col_map, table_pkey) for reviewers_dict in new_reviewers: if 'login' in reviewers_dict: - cntrb_id = find_id_from_login(self, reviewers_dict['login']) + cntrb_id = self.find_id_from_login(reviewers_dict['login']) else: cntrb_id = 1 @@ -744,18 +572,17 @@ def query_reviewers(self, reviewers, pr_id): if reviewers_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_reviewers_table.insert().values(reviewer)) - logging.info(f"Added PR Reviewer {result.inserted_primary_key}") + self.logger.info(f"Added PR Reviewer {result.inserted_primary_key}") - self.reviewer_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 - logging.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}") + self.logger.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}") def query_assignee(self, assignees, pr_id): - logging.info('Querying Assignees') + self.logger.info('Querying Assignees') if assignees is None or len(assignees) == 0: - logging.info('No assignees to add') + self.logger.info('No assignees to add') return table = 'pull_request_assignees' @@ -766,15 +593,15 @@ def query_assignee(self, assignees, pr_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - assignee_table_values = get_table_values(self, cols_query, [table]) + assignee_table_values = self.get_table_values(cols_query, [table]) - assignees = assign_tuple_action(self, assignees, assignee_table_values, update_col_map, duplicate_col_map, + assignees = self.assign_tuple_action(assignees, assignee_table_values, update_col_map, duplicate_col_map, table_pkey) for assignee_dict in assignees: if 'login' in assignee_dict: - cntrb_id = find_id_from_login(self, assignee_dict['login']) + cntrb_id = self.find_id_from_login(assignee_dict['login']) else: cntrb_id = 1 @@ -788,15 +615,14 @@ def query_assignee(self, assignees, pr_id): if assignee_dict['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_assignees_table.insert().values(assignee)) - logging.info(f'Added PR Assignee {result.inserted_primary_key}') + self.logger.info(f'Added PR Assignee {result.inserted_primary_key}') - self.assignee_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 - logging.info(f'Finished inserting PR Assignee data for PR with id {pr_id}') + self.logger.info(f'Finished inserting PR Assignee data for PR with id {pr_id}') def query_pr_meta(self, head, base, pr_id): - logging.info('Querying PR Meta') + self.logger.info('Querying PR Meta') table = 'pull_request_meta' duplicate_col_map = {'pr_sha': 'sha'} @@ -808,12 +634,12 @@ def query_pr_meta(self, head, base, pr_id): update_keys += list(value_update_col_map.keys()) cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - meta_table_values = get_table_values(self, cols_query, [table]) + meta_table_values = self.get_table_values(cols_query, [table]) pr_meta_dict = { - 'head': assign_tuple_action(self, [head], meta_table_values, update_col_map, duplicate_col_map, + 'head': self.assign_tuple_action([head], meta_table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map=value_update_col_map)[0], - 'base': assign_tuple_action(self, [base], meta_table_values, update_col_map, duplicate_col_map, + 'base': self.assign_tuple_action([base], meta_table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map=value_update_col_map)[0] } @@ -824,7 +650,7 @@ def query_pr_meta(self, head, base, pr_id): 'pr_src_meta_label': pr_meta_data['label'], 'pr_src_meta_ref': pr_meta_data['ref'], 'pr_sha': pr_meta_data['sha'], - 'cntrb_id': find_id_from_login(self, pr_meta_data['user']['login']) if pr_meta_data['user'] \ + 'cntrb_id': self.find_id_from_login(pr_meta_data['user']['login']) if pr_meta_data['user'] \ and 'login' in pr_meta_data['user'] else None, 'tool_source': self.tool_source, 'tool_version': self.tool_version, @@ -836,13 +662,12 @@ def query_pr_meta(self, head, base, pr_id): self.pull_request_meta_table.c.pr_sha==pr_meta['pr_sha'] and self.pull_request_meta_table.c.pr_head_or_base==pr_side ).values(pr_meta)) - logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format( - issue_dict['id'])) - self.issue_id_inc = issue_dict['pkey'] + # self.logger.info("Updated tuple in the issues table with existing gh_issue_id: {}".format(issue_dict['id'])) + self.pr_meta_id_inc = pr_meta_data['pkey'] elif pr_meta_data['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_meta_table.insert().values(pr_meta)) - logging.info(f'Added PR Head {result.inserted_primary_key}') + self.logger.info(f'Added PR Head {result.inserted_primary_key}') self.pr_meta_id_inc = int(result.inserted_primary_key[0]) self.results_counter += 1 @@ -857,12 +682,12 @@ def query_pr_meta(self, head, base, pr_id): if pr_meta_data['repo']: self.query_pr_repo(pr_meta_data['repo'], pr_side, self.pr_meta_id_inc) else: - logging.info('No new PR Head data to add') + self.logger.info('No new PR Head data to add') - logging.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}') + self.logger.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}') def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): - logging.info('Querying PR Comments') + self.logger.info('Querying PR Comments') url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' + '/comments?per_page=100&page={}') @@ -876,20 +701,21 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): duplicate_col_map = {'pr_message_ref_src_comment_id': 'id'} #list to hold contributors needing insertion or update - pr_messages = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey) + pr_messages = self.paginate(url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n") + self.logger.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n") for pr_msg_dict in pr_messages: if pr_msg_dict['user'] and 'login' in pr_msg_dict['user']: - cntrb_id = find_id_from_login(self, pr_msg_dict['user']['login']) + cntrb_id = self.find_id_from_login(pr_msg_dict['user']['login']) else: cntrb_id = 1 msg = { 'rgls_id': None, - 'msg_text': pr_msg_dict['body'], + 'msg_text': pr_msg_dict['body'].replace("0x00", "____") if \ + 'body' in pr_msg_dict else None, 'msg_timestamp': pr_msg_dict['created_at'], 'msg_sender_email': None, 'msg_header': None, @@ -901,12 +727,11 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): } result = self.db.execute(self.message_table.insert().values(msg)) - logging.info(f'Added PR Comment {result.inserted_primary_key}') - self.msg_id_inc = int(result.inserted_primary_key[0]) + self.logger.info(f'Added PR Comment {result.inserted_primary_key}') pr_msg_ref = { 'pull_request_id': pr_id, - 'msg_id': self.msg_id_inc, + 'msg_id': int(result.inserted_primary_key[0]), 'pr_message_ref_src_comment_id': pr_msg_dict['id'], 'pr_message_ref_src_node_id': pr_msg_dict['node_id'], 'tool_source': self.tool_source, @@ -917,15 +742,14 @@ def query_pr_comments(self, owner, repo, gh_pr_no, pr_id): result = self.db.execute( self.pull_request_message_ref_table.insert().values(pr_msg_ref) ) - logging.info(f'Added PR Message Ref {result.inserted_primary_key}') - self.pr_msg_ref_id_inc = int(result.inserted_primary_key[0]) + self.logger.info(f'Added PR Message Ref {result.inserted_primary_key}') self.results_counter += 1 - logging.info(f'Finished adding PR Message data for PR with id {pr_id}') + self.logger.info(f'Finished adding PR Message data for PR with id {pr_id}') def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): - logging.info(f'Querying PR {pr_repo_type} repo') + self.logger.info(f'Querying PR {pr_repo_type} repo') table = 'pull_request_repo' duplicate_col_map = {'pr_src_repo_id': 'id'} @@ -935,13 +759,13 @@ def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): update_keys = list(update_col_map.keys()) if update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - pr_repo_table_values = get_table_values(self, cols_query, [table]) + pr_repo_table_values = self.get_table_values(cols_query, [table]) - new_pr_repo = assign_tuple_action(self, [pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map, + new_pr_repo = self.assign_tuple_action([pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map, table_pkey)[0] if new_pr_repo['owner'] and 'login' in new_pr_repo['owner']: - cntrb_id = find_id_from_login(self, new_pr_repo['owner']['login']) + cntrb_id = self.find_id_from_login(new_pr_repo['owner']['login']) else: cntrb_id = 1 @@ -962,20 +786,8 @@ def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id): if new_pr_repo['flag'] == 'need_insertion': result = self.db.execute(self.pull_request_repo_table.insert().values(pr_repo)) - logging.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}') + self.logger.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}') self.results_counter += 1 - logging.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}') - - def get_owner_repo(self, github_url): - split = github_url.split('/') - - owner = split[-2] - repo = split[-1] - - if '.git' in repo: - repo = repo[:-4] - - return owner, repo - + self.logger.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}') diff --git a/workers/pull_request_worker/pull_request_worker/runtime.py b/workers/pull_request_worker/pull_request_worker/runtime.py deleted file mode 100644 --- a/workers/pull_request_worker/pull_request_worker/runtime.py +++ /dev/null @@ -1,109 +0,0 @@ -import json, logging, os, click -import requests -from flask import Flask, Response, jsonify, request -from pull_request_worker.worker import GHPullRequestWorker -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - if request.method == 'POST': # POST a task to be added to the queue - logging.info("Sending to work on task: {}".format(str(request.json))) - app.gh_pr_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.gh_pr_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51239, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'pull_request_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - print("New pull request worker trying port: {}\n".format(worker_port)) - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.pull_request_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - - #create instance of the worker - - app.gh_pr_worker = GHPullRequestWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - app.run(debug=app.debug, host=host, port=worker_port) - if app.gh_pr_worker._child is not None: - app.gh_pr_worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/pull_request_worker/runtime.py b/workers/pull_request_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/pull_request_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.pull_request_worker.pull_request_worker import GitHubPullRequestWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = GitHubPullRequestWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/pull_request_worker/setup.py b/workers/pull_request_worker/setup.py --- a/workers/pull_request_worker/setup.py +++ b/workers/pull_request_worker/setup.py @@ -13,7 +13,7 @@ def read(filename): setup( name="pull_request_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", @@ -28,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'pull_request_worker_start=pull_request_worker.runtime:main', + 'pull_request_worker_start=workers.pull_request_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/repo_info_worker/repo_info_worker/__init__.py b/workers/release_worker/__init__.py similarity index 50% rename from workers/repo_info_worker/repo_info_worker/__init__.py rename to workers/release_worker/__init__.py --- a/workers/repo_info_worker/repo_info_worker/__init__.py +++ b/workers/release_worker/__init__.py @@ -1,4 +1,4 @@ -"""gh_repo_info_worker - Augur Worker that collects GitHub Repo Info data""" +"""gh_release_worker - Augur Worker that collects GitHub Repo Info data""" __version__ = '0.0.0' __author__ = 'Augur Team <[email protected]>' diff --git a/workers/release_worker/release_worker.py b/workers/release_worker/release_worker.py new file mode 100644 --- /dev/null +++ b/workers/release_worker/release_worker.py @@ -0,0 +1,157 @@ +import logging, os, sys, time, requests, json +from datetime import datetime +from multiprocessing import Process, Queue +from urllib.parse import urlparse +import pandas as pd +import sqlalchemy as s +from sqlalchemy import MetaData +from sqlalchemy.ext.automap import automap_base +from workers.worker_base import Worker + +#TODO - fully edit to match releases +class ReleaseWorker(Worker): + def __init__(self, config={}): + + worker_type = "release_worker" + + # Define what this worker can be given and know how to interpret + given = [['github_url']] + models = ['releases'] + + # Define the tables needed to insert, update, or delete on + data_tables = ['releases'] + operations_tables = ['worker_history', 'worker_job'] + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Define data collection info + self.tool_source = 'Release Worker' + self.tool_version = '1.0.0' + self.data_source = 'GitHub API' + + def insert_release(self, repo_id, owner, release): + author = release['author']['name']+'_'+release['author']['company'] + # Put all data together in format of the table + self.logger.info(f'Inserting release for repo with id:{repo_id}, owner:{owner}, release name:{release["name"]}\n') + release_inf = { + 'release_id': release['id'], + 'repo_id': repo_id, + 'release_name': release['name'], + 'release_description': release['description'], + 'release_author': release['author'], + 'release_created_at': release['createdAt'], + 'release_published_at': release['publishedAt'], + 'release_updated_at': release['updatedAt'], + 'release_is_draft': release['isDraft'], + 'release_is_prerelease': release['isPrerelease'], + 'release_tag_name': release['tagName'], + 'release_url': release['url'], + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source + } + + result = self.db.execute(self.releases_table.insert().values(release_inf)) + self.logger.info(f"Primary Key inserted into releases table: {result.inserted_primary_key}\n") + self.results_counter += 1 + + self.logger.info(f"Inserted info for {owner}/{repo}/{release['name']}\n") + + #Register this task as completed + self.register_task_completion(task, release_id, "releases") + return + + def releases_model(self, task, repo_id): + + github_url = task['given']['github_url'] + + self.logger.info("Beginning filling the releases model for repo: " + github_url + "\n") + + owner, repo = self.get_owner_repo(github_url) + + url = 'https://api.github.com/graphql' + + query = """ + { + repository(owner:"%s", name:"%s"){ + id + releases(orderBy: {field: CREATED_AT, direction: ASC}, last: %d) { + edges { + node { + name + publishedAt + createdAt + description + id + isDraft + isPrerelease + tagName + url + updatedAt + author { + name + company + } + } + } + } + } + } + """ % (owner, repo, 10) + + # Hit the graphql endpoint and retry 3 times in case of failure + num_attempts = 0 + success = False + while num_attempts < 3: + self.logger.info("Hitting endpoint: {} ...\n".format(url)) + r = requests.post(url, json={'query': query}, headers=self.headers) + self.update_gh_rate_limit(r) + + try: + data = r.json() + except: + data = json.loads(json.dumps(r.text)) + + if 'errors' in data: + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': + self.update_gh_rate_limit(r) + continue + + if 'data' in data: + success = True + data = data['data']['repository'] + break + else: + self.logger.info("Request returned a non-data dict: {}\n".format(data)) + if data['message'] == 'Not Found': + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + break + if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': + self.update_gh_rate_limit(r, temporarily_disable=True) + continue + if data['message'] == 'Bad credentials': + self.update_gh_rate_limit(r, bad_credentials=True) + continue + num_attempts += 1 + if not success: + self.register_task_failure(task, repo_id, "Failed to hit endpoint: {}".format(url)) + return + + self.logger.info("repository value is: {}\n".format(data)) + + if 'releases' in data: + if 'edges' in data['releases']: + for n in data['releases']['edges']: + if 'node' in n: + release = n['node'] + self.insert_release(self, repo_id, owner, release) + else: + self.logger.info("There's no release to insert. Current node is not available in releases: {}\n".format(n)) + else: + self.logger.info("There are no releases to insert for current repository: {}\n".format(data)) + else: + self.logger.info("Graphql response does not contain repository: {}\n".format(data)) + + diff --git a/workers/release_worker/runtime.py b/workers/release_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/release_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.release_worker.release_worker import ReleaseWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ReleaseWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/metric_status_worker/setup.py b/workers/release_worker/setup.py similarity index 83% rename from workers/metric_status_worker/setup.py rename to workers/release_worker/setup.py --- a/workers/metric_status_worker/setup.py +++ b/workers/release_worker/setup.py @@ -5,22 +5,20 @@ from setuptools import find_packages from setuptools import setup - def read(filename): filename = os.path.join(os.path.dirname(__file__), filename) text_type = type(u"") with io.open(filename, mode="r", encoding='utf-8') as fd: return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) - setup( - name="metric_status_worker", - version="0.1.0", + name="release_worker", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", author_email="[email protected]", - description="Augur Worker that collects GitHub data", + description="Augur Worker that collects data about GitHub releases", packages=find_packages(exclude=('tests',)), install_requires=[ 'flask', @@ -30,7 +28,7 @@ def read(filename): ], entry_points={ 'console_scripts': [ - 'metric_status_worker_start=metric_status_worker.runtime:main', + 'release_worker_start=workers.release_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/repo_info_worker/__init__.py b/workers/repo_info_worker/__init__.py new file mode 100644 diff --git a/workers/repo_info_worker/repo_info_worker/worker.py b/workers/repo_info_worker/repo_info_worker.py similarity index 56% rename from workers/repo_info_worker/repo_info_worker/worker.py rename to workers/repo_info_worker/repo_info_worker.py --- a/workers/repo_info_worker/repo_info_worker/worker.py +++ b/workers/repo_info_worker/repo_info_worker.py @@ -1,37 +1,44 @@ import logging, os, sys, time, requests, json from datetime import datetime from multiprocessing import Process, Queue -from urllib.parse import urlparse import pandas as pd import sqlalchemy as s -from sqlalchemy import MetaData -from sqlalchemy.ext.automap import automap_base from workers.worker_base import Worker +# NOTE: This worker primarily inserts rows into the REPO_INFO table, which serves the primary purposes of +# 1. Displaying discrete metadata like "number of forks" and how they change over time +# 2. Validating other workers, like those related to pull requests, issues, and commits. Our totals should be at or very near the totals in the repo_info table. + +# This table also updates the REPO table in 2 cases: +# 1. Recognizing when a repository is a forked repository by updating the "forked_from" field and +# 2. Recognizing when a repository is archived, and recording the data we observed the change in status. + class RepoInfoWorker(Worker): - def __init__(self, config): + def __init__(self, config={}): + + worker_type = "repo_info_worker" # Define what this worker can be given and know how to interpret given = [['github_url']] models = ['repo_info'] # Define the tables needed to insert, update, or delete on - data_tables = ['repo_info'] + data_tables = ['repo_info', 'repo'] operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) # Define data collection info self.tool_source = 'Repo Info Worker' - self.tool_version = '0.0.1' + self.tool_version = '1.0.0' self.data_source = 'GitHub API' def repo_info_model(self, task, repo_id): github_url = task['given']['github_url'] - logging.info("Beginning filling the repo_info model for repo: " + github_url + "\n") + self.logger.info("Beginning filling the repo_info model for repo: " + github_url + "\n") owner, repo = self.get_owner_repo(github_url) @@ -99,8 +106,9 @@ def repo_info_model(self, task, repo_id): # Hit the graphql endpoint and retry 3 times in case of failure num_attempts = 0 success = False + data = None while num_attempts < 3: - logging.info("Hitting endpoint: {} ...\n".format(url)) + self.logger.info("Hitting endpoint: {} ...\n".format(url)) r = requests.post(url, json={'query': query}, headers=self.headers) self.update_gh_rate_limit(r) @@ -110,8 +118,8 @@ def repo_info_model(self, task, repo_id): data = json.loads(json.dumps(r.text)) if 'errors' in data: - logging.info("Error!: {}".format(data['errors'])) - if data['errors']['message'] == 'API rate limit exceeded': + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': self.update_gh_rate_limit(r) continue @@ -120,9 +128,9 @@ def repo_info_model(self, task, repo_id): data = data['data']['repository'] break else: - logging.info("Request returned a non-data dict: {}\n".format(data)) + self.logger.info("Request returned a non-data dict: {}\n".format(data)) if data['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) break if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': self.update_gh_rate_limit(r, temporarily_disable=True) @@ -132,14 +140,23 @@ def repo_info_model(self, task, repo_id): continue num_attempts += 1 if not success: - self.register_task_failure(task, repo_id, "Failed to hit endpoint: {}".format(url)) + self.logger.error('Cannot hit endpoint after 3 attempts. \"Completing\" task.\n') + self.register_task_completion(self.task, repo_id, 'repo_info') + return + + # Just checking that the data is accessible (would not be if repo no longer exists) + try: + data['updatedAt'] + except Exception as e: + self.logger.error('Cannot access repo_info data: {}\nError: {}. \"Completing\" task.'.format(data, e)) + self.register_task_completion(self.task, repo_id, 'repo_info') return # Get committers count info that requires seperate endpoint committers_count = self.query_committers_count(owner, repo) # Put all data together in format of the table - logging.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n') + self.logger.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n') rep_inf = { 'repo_id': repo_id, 'last_updated': data['updatedAt'] if 'updatedAt' in data else None, @@ -177,16 +194,34 @@ def repo_info_model(self, task, repo_id): } result = self.db.execute(self.repo_info_table.insert().values(rep_inf)) - logging.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n") + self.logger.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n") self.results_counter += 1 - logging.info(f"Inserted info for {owner}/{repo}\n") + # Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table. + forked = self.is_forked(owner, repo) + archived = self.is_archived(owner, repo) + archived_date_collected = None + if archived is not False: + archived_date_collected = archived + archived = 1 + else: + archived = 0 - #Register this task as completed - self.register_task_completion(task, repo_id, "repo_info") + rep_additional_data = { + 'forked_from': forked, + 'repo_archived': archived, + 'repo_archived_date_collected': archived_date_collected + } + result = self.db.execute(self.repo_table.update().where( + self.repo_table.c.repo_id==repo_id).values(rep_additional_data)) + + self.logger.info(f"Inserted info for {owner}/{repo}\n") + + # Register this task as completed + self.register_task_completion(self.task, repo_id, "repo_info") def query_committers_count(self, owner, repo): - logging.info('Querying committers count\n') + self.logger.info('Querying committers count\n') url = f'https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100' committers = 0 @@ -201,7 +236,67 @@ def query_committers_count(self, owner, repo): else: url = r.links['next']['url'] except Exception: - logging.exception('An error occured while querying contributor count\n') + self.logger.exception('An error occured while querying contributor count\n') return committers + def is_forked(self, owner, repo): #/repos/:owner/:repo parent + self.logger.info('Querying parent info to verify if the repo is forked\n') + url = f'https://api.github.com/repos/{owner}/{repo}' + + r = requests.get(url, headers=self.headers) + self.update_gh_rate_limit(r) + + data = self.get_repo_data(url, r) + + if 'fork' in data: + if 'parent' in data: + return data['parent']['full_name'] + return 'Parent not available' + + return False + + def is_archived(self, owner, repo): + self.logger.info('Querying committers count\n') + url = f'https://api.github.com/repos/{owner}/{repo}' + + r = requests.get(url, headers=self.headers) + self.update_gh_rate_limit(r) + + data = self.get_repo_data(url, r) + + if 'archived' in data: + if data['archived']: + if 'updated_at' in data: + return data['updated_at'] + return 'Date not available' + return False + + return False + + def get_repo_data(self, url, response): + success = False + try: + data = response.json() + except: + data = json.loads(json.dumps(response.text)) + + if 'errors' in data: + self.logger.info("Error!: {}".format(data['errors'])) + if data['errors'][0]['message'] == 'API rate limit exceeded': + self.update_gh_rate_limit(response) + + if 'id' in data: + success = True + else: + self.logger.info("Request returned a non-data dict: {}\n".format(data)) + if data['message'] == 'Not Found': + self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': + self.update_gh_rate_limit(r, temporarily_disable=True) + if data['message'] == 'Bad credentials': + self.update_gh_rate_limit(r, bad_credentials=True) + if not success: + self.register_task_failure(self.task, repo_id, "Failed to hit endpoint: {}".format(url)) + + return data diff --git a/workers/repo_info_worker/repo_info_worker/runtime.py b/workers/repo_info_worker/repo_info_worker/runtime.py deleted file mode 100644 --- a/workers/repo_info_worker/repo_info_worker/runtime.py +++ /dev/null @@ -1,55 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from repo_info_worker.worker import RepoInfoWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'repo_info_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.repo_info_worker.{}".format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - 'gh_api_key': read_config('Database', 'key', 'AUGUR_GITHUB_API_KEY', 'key') - } - - #create instance of the worker - app.worker = RepoInfoWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - if app.worker._child is not None: - app.worker._child.terminate() - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/repo_info_worker/runtime.py b/workers/repo_info_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/repo_info_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.repo_info_worker.repo_info_worker import RepoInfoWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = RepoInfoWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/repo_info_worker/setup.py b/workers/repo_info_worker/setup.py --- a/workers/repo_info_worker/setup.py +++ b/workers/repo_info_worker/setup.py @@ -13,22 +13,21 @@ def read(filename): setup( name="repo_info_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', author="Augurlabs", author_email="[email protected]", description="Augur Worker that collects general data about a repo on GitHub", - packages=find_packages(exclude=('tests',)), + packages=find_packages(), install_requires=[ 'flask', 'requests', - 'psycopg2-binary', - 'click' + 'psycopg2-binary' ], entry_points={ 'console_scripts': [ - 'repo_info_worker_start=repo_info_worker.runtime:main', + 'repo_info_worker_start=workers.repo_info_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/standard_methods.py b/workers/standard_methods.py deleted file mode 100644 --- a/workers/standard_methods.py +++ /dev/null @@ -1,712 +0,0 @@ -""" Helper methods constant across all workers """ -import requests, datetime, time, traceback, json, os, sys, math -import sqlalchemy as s -import pandas as pd -import os -import sys, logging -from urllib.parse import urlparse - -def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}): - """ map objects => { *our db col* : *gh json key*} """ - need_insertion_count = 0 - need_update_count = 0 - for i, obj in enumerate(new_data): - if type(obj) != dict: - logging.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) - continue - - obj['flag'] = 'none' # default of no action needed - existing_tuple = None - for db_dupe_key in list(duplicate_col_map.keys()): - - if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any(): - if table_values[table_values[db_dupe_key].isin( - [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records'): - existing_tuple = table_values[table_values[db_dupe_key].isin( - [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0] - continue - - logging.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) - obj['flag'] = 'need_insertion' - need_insertion_count += 1 - break - - if obj['flag'] == 'need_insertion': - logging.info('Already determined that current tuple needs insertion, skipping checking updates. ' - 'Moving to next tuple.\n') - continue - - # If we need to check the values of the existing tuple to determine if an update is needed - for augur_col, value_check in value_update_col_map.items(): - not_nan_check = not (pd.isna(value_check) and pd.isna(existing_tuple[augur_col])) if value_check is not None else True - if existing_tuple[augur_col] != value_check and not_nan_check: - continue - logging.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) - obj['flag'] = 'need_update' - obj['pkey'] = existing_tuple[table_pkey] - need_update_count += 1 - - if obj['flag'] == 'need_update': - logging.info('Already determined that current tuple needs update, skipping checking further updates. ' - 'Moving to next tuple.\n') - continue - - # Now check the existing tuple's values against the response values to determine if an update is needed - for col in update_col_map.keys(): - if update_col_map[col] not in obj: - continue - if obj[update_col_map[col]] == existing_tuple[col]: - continue - logging.info("Found a tuple that needs an update for column: {}\n".format(col)) - obj['flag'] = 'need_update' - obj['pkey'] = existing_tuple[table_pkey] - need_update_count += 1 - - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + - "was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count)) - return new_data - -def check_duplicates(new_data, table_values, key): - need_insertion = [] - for obj in new_data: - if type(obj) == dict: - if not table_values.isin([obj[key]]).any().any(): - need_insertion.append(obj) - # else: - # logging.info("Tuple with github's {} key value already".format(key) + - # "exists in our db: {}\n".format(str(obj[key]))) - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + - "was reduced to {} tuples.\n".format(str(len(need_insertion)))) - return need_insertion - -def connect_to_broker(self): - connected = False - for i in range(5): - try: - logging.info("attempt {}\n".format(i)) - if i > 0: - time.sleep(10) - requests.post('http://{}:{}/api/unstable/workers'.format( - self.config['broker_host'],self.config['broker_port']), json=self.specs) - logging.info("Connection to the broker was successful\n") - connected = True - break - except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker. Trying again...\n') - if not connected: - sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n') - -def dump_queue(queue): - """ - Empties all pending items in a queue and returns them in a list. - """ - result = [] - queue.put("STOP") - for i in iter(queue.get, 'STOP'): - result.append(i) - # time.sleep(.1) - return result - -def find_id_from_login(self, login): - idSQL = s.sql.text(""" - SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' - """.format(login)) - rs = pd.read_sql(idSQL, self.db, params={}) - data_list = [list(row) for row in rs.itertuples(index=False)] - try: - return data_list[0][0] - except: - logging.info("contributor needs to be added...") - - cntrb_url = ("https://api.github.com/users/" + login) - logging.info("Hitting endpoint: {} ...\n".format(cntrb_url)) - r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) - contributor = r.json() - - company = None - location = None - email = None - if 'company' in contributor: - company = contributor['company'] - if 'location' in contributor: - location = contributor['location'] - if 'email' in contributor: - email = contributor['email'] - - cntrb = { - "cntrb_login": contributor['login'] if 'login' in contributor else None, - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, - "cntrb_canonical": None, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } - result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) - self.results_counter += 1 - self.cntrb_id_inc = int(result.inserted_primary_key[0]) - - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - return find_id_from_login(self, login) - -def get_owner_repo(github_url): - split = github_url.split('/') - - owner = split[-2] - repo = split[-1] - - if '.git' in repo: - repo = repo[:-4] - - return owner, repo - -def get_max_id(self, table, column, default=25150, operations_table=False): - maxIdSQL = s.sql.text(""" - SELECT max({0}.{1}) AS {1} - FROM {0} - """.format(table, column)) - db = self.db if not operations_table else self.helper_db - rs = pd.read_sql(maxIdSQL, db, params={}) - if rs.iloc[0][column] is not None: - max_id = int(rs.iloc[0][column]) + 1 - logging.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) - else: - max_id = default - logging.info("Could not find max id for {} column in the {} table... using default set to: \ - {}\n".format(column, table, max_id)) - return max_id - -def get_table_values(self, cols, tables, where_clause=""): - table_str = tables[0] - del tables[0] - - col_str = cols[0] - del cols[0] - - for table in tables: - table_str += ", " + table - for col in cols: - col_str += ", " + col - - tableValuesSQL = s.sql.text(""" - SELECT {} FROM {} {} - """.format(col_str, table_str, where_clause)) - logging.info("Getting table values with the following PSQL query: \n{}\n".format(tableValuesSQL)) - values = pd.read_sql(tableValuesSQL, self.db, params={}) - return values - -def init_oauths(self): - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauthSQL = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(self.config['key'])) - for oauth in [{'oauth_id': 0, 'access_token': self.config['key']}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}\n".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - -def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}): - # Paginate backwards through all the tuples but get first page in order - # to determine if there are multiple pages and if the 1st page covers all - update_keys = list(update_col_map.keys()) if update_col_map else [] - update_keys += list(value_update_col_map.keys()) if value_update_col_map else [] - cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] - table_values = get_table_values(self, cols_query, [table], where_clause) - - i = 1 - multiple_pages = False - tuples = [] - while True: - num_attempts = 0 - success = False - while num_attempts < 3: - logging.info("Hitting endpoint: " + url.format(i) + " ...\n") - r = requests.get(url=url.format(i), headers=self.headers) - update_gh_rate_limit(self, r) - logging.info("Analyzing page {} of {}\n".format(i, int(r.links['last']['url'][-6:].split('=')[1]) + 1 if 'last' in r.links else '*last page not known*')) - - try: - j = r.json() - except: - j = json.loads(json.dumps(r.text)) - - if type(j) != dict and type(j) != str: - success = True - break - elif type(j) == dict: - logging.info("Request returned a dict: {}\n".format(j)) - if j['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) - break - if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': - num_attempts -= 1 - update_gh_rate_limit(self, r, temporarily_disable=True) - if j['message'] == 'Bad credentials': - update_gh_rate_limit(self, r, bad_credentials=True) - elif type(j) == str: - logging.info("J was string: {}\n".format(j)) - if '<!DOCTYPE html>' in j: - logging.info("HTML was returned, trying again...\n") - elif len(j) == 0: - logging.info("Empty string, trying again...\n") - else: - try: - j = json.loads(j) - success = True - break - except: - pass - num_attempts += 1 - if not success: - break - - # Find last page so we can decrement from there - if 'last' in r.links and not multiple_pages and not self.finishing_task: - param = r.links['last']['url'][-6:] - i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") - multiple_pages = True - elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") - elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ..." - " excess rate limit requests will be made\n") - - if len(j) == 0: - logging.info("Response was empty, breaking from pagination.\n") - break - - # Checking contents of requests with what we already have in the db - j = assign_tuple_action(self, j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map) - if not j: - logging.info("Assigning tuple action failed, moving to next page.\n") - i = i + 1 if self.finishing_task else i - 1 - continue - try: - to_add = [obj for obj in j if obj not in tuples and obj['flag'] != 'none'] - except Exception as e: - logging.info("Failure accessing data of page: {}. Moving to next page.\n".format(e)) - i = i + 1 if self.finishing_task else i - 1 - continue - if len(to_add) == 0 and multiple_pages and 'last' in r.links: - logging.info("{}".format(r.links['last'])) - if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown tuples, breaking from pagination.\n") - break - tuples += to_add - - i = i + 1 if self.finishing_task else i - 1 - - # Since we already wouldve checked the first page... break - if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") - break - - return tuples - -def query_github_contributors(self, entry_info, repo_id): - - """ Data collection function - Query the GitHub API for contributors - """ - logging.info("Querying contributors with given entry info: " + str(entry_info) + "\n") - - github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url'] - - # Extract owner/repo from the url for the endpoint - path = urlparse(github_url) - split = path[2].split('/') - - owner = split[1] - name = split[2] - - # Handles git url case by removing the extension - if ".git" in name: - name = name[:-4] - - # Set the base of the url and place to hold contributors to insert - contributors_url = ("https://api.github.com/repos/" + owner + "/" + - name + "/contributors?per_page=100&page={}") - - # Get contributors that we already have stored - # Set our duplicate and update column map keys (something other than PK) to - # check dupicates/needed column updates with - table = 'contributors' - table_pkey = 'cntrb_id' - update_col_map = {'cntrb_email': 'email'} - duplicate_col_map = {'cntrb_login': 'login'} - - #list to hold contributors needing insertion or update - contributors = paginate(self, contributors_url, duplicate_col_map, update_col_map, table, table_pkey) - - logging.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") - - for repo_contributor in contributors: - try: - # Need to hit this single contributor endpoint to get extra data including... - # `created at` - # i think that's it - cntrb_url = ("https://api.github.com/users/" + repo_contributor['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") - r = requests.get(url=cntrb_url, headers=self.headers) - update_gh_rate_limit(self, r) - contributor = r.json() - - company = None - location = None - email = None - if 'company' in contributor: - company = contributor['company'] - if 'location' in contributor: - location = contributor['location'] - if 'email' in contributor: - email = contributor['email'] - canonical_email = contributor['email'] - - cntrb = { - "cntrb_login": contributor['login'], - "cntrb_created_at": contributor['created_at'], - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - # "cntrb_type": , dont have a use for this as of now ... let it default to null - "cntrb_canonical": canonical_email, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } - - # Commit insertion to table - if repo_contributor['flag'] == 'need_update': - result = self.db.execute(self.contributors_table.update().where( - self.history_table.c.cntrb_email==email).values(cntrb)) - logging.info("Updated tuple in the contributors table with existing email: {}".format(email)) - self.cntrb_id_inc = repo_contributor['pkey'] - elif repo_contributor['flag'] == 'need_insertion': - result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) - self.results_counter += 1 - - logging.info("Inserted contributor: " + contributor['login'] + "\n") - - # Increment our global track of the cntrb id for the possibility of it being used as a FK - self.cntrb_id_inc = int(result.inserted_primary_key[0]) - - except Exception as e: - logging.info("Caught exception: {}".format(e)) - logging.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) - continue - -def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): - """ - Read a variable in specified section of the config file, unless provided an environment variable - - :param section: location of given variable - :param name: name of variable - """ - config_file_path = os.getenv("AUGUR_CONFIG_FILE", config_file_path) - _config_file_name = 'augur.config.json' - _config_bad = False - _already_exported = {} - _runtime_location = 'runtime/' - _default_config = {} - _config_file = None - - try: - _config_file = open(config_file_path, 'r+') - except: - print('Couldn\'t open {}'.format(_config_file_name)) - - # Load the config file - try: - config_text = _config_file.read() - _config = json.loads(config_text) - except json.decoder.JSONDecodeError as e: - if not _config_bad: - _using_config_file = False - print('{} could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: {}'.format(config_file_path, str(e))) - _config = _default_config - - value = None - if environment_variable is not None: - value = os.getenv(environment_variable) - if value is None: - try: - if name is not None: - value = _config[section][name] - else: - value = _config[section] - except Exception as e: - value = default - if not section in _config: - _config[section] = {} - - return value - - -def record_model_process(self, repo_id, model): - - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Stopped", - "total_results": self.results_counter - } - if self.finishing_task: - result = self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - self.history_id += 1 - else: - result = self.helper_db.execute(self.history_table.insert().values(task_history)) - logging.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) - self.history_id = int(result.inserted_primary_key[0]) - -def register_task_completion(self, task, repo_id, model): - # Task to send back to broker - task_completed = { - 'worker_id': self.config['id'], - 'job_type': "MAINTAIN", - 'repo_id': repo_id, - 'job_model': model - } - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] if 'git_url' in task['given'] else "INVALID_GIVEN" - if key == 'INVALID_GIVEN': - register_task_failure(self, task, repo_id, "INVALID_GIVEN: not github nor git url") - return - - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": model, - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Success", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where( - self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job completion for: " + str(task_completed) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where( - self.job_table.c.job_model==model).values(updated_job)) - logging.info("Updated job process for model: " + model + "\n") - - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") - - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['broker_host'],self.config['broker_port']), json=task_completed) - - # Reset results counter for next task - self.results_counter = 0 - -def register_task_failure(self, task, repo_id, e): - - logging.info("Worker ran into an error for task: {}\n".format(task)) - logging.info("Printing traceback...\n") - tb = traceback.format_exc() - logging.info(tb) - - logging.info(f'This task inserted {self.results_counter} tuples before failure.\n') - logging.info("Notifying broker and logging task failure in database...\n") - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - url = task['given'][key] - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(url)) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - task['worker_id'] = self.config['id'] - try: - requests.post("http://{}:{}/api/unstable/task_error".format( - self.config['broker_host'],self.config['broker_port']), json=task) - except requests.exceptions.ConnectionError: - logging.error('Could not send task failure message to the broker\n') - except Exception: - logging.exception('An error occured while informing broker about task failure\n') - - # Add to history table - task_history = { - "repo_id": repo_id, - "worker": self.config['id'], - "job_model": task['models'][0], - "oauth_id": self.oauths[0]['oauth_id'], - "timestamp": datetime.datetime.now(), - "status": "Error", - "total_results": self.results_counter - } - self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history)) - - logging.info("Recorded job error in the history table for: " + str(task) + "\n") - - # Update job process table - updated_job = { - "since_id_str": repo_id, - "last_count": self.results_counter, - "last_run": datetime.datetime.now(), - "analysis_state": 0 - } - self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==task['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + task['models'][0] + "\n") - - # Reset results counter for next task - self.results_counter = 0 - -def retrieve_tuple(self, key_values, tables): - table_str = tables[0] - del tables[0] - - key_values_items = list(key_values.items()) - for col, value in [key_values_items[0]]: - where_str = col + " = '" + value + "'" - del key_values_items[0] - - for col, value in key_values_items: - where_str += ' AND ' + col + " = '" + value + "'" - for table in tables: - table_str += ", " + table - - retrieveTupleSQL = s.sql.text(""" - SELECT * FROM {} WHERE {} - """.format(table_str, where_str)) - values = json.loads(pd.read_sql(retrieveTupleSQL, self.db, params={}).to_json(orient="records")) - return values - -def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): - # Try to get rate limit from request headers, sometimes it does not work (GH's issue) - # In that case we just decrement from last recieved header count - if bad_credentials and len(self.oauths) > 1: - logging.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) - del self.oauths[0] - - if temporarily_disable: - logging.info("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") - self.oauths[0]['rate_limit'] = 0 - else: - try: - self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - logging.info("Recieved rate limit from headers\n") - except: - self.oauths[0]['rate_limit'] -= 1 - logging.info("Headers did not work, had to decrement\n") - logging.info("Updated rate limit, you have: " + - str(self.oauths[0]['rate_limit']) + " requests remaining.\n") - if self.oauths[0]['rate_limit'] <= 0: - try: - reset_time = response.headers['X-RateLimit-Reset'] - except Exception as e: - logging.info("Could not get reset time from headers because of error: {}".format(e)) - logging.info('Headers: {}'.format(response.headers)) - reset_time = 3600 - time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() - logging.info("Rate limit exceeded, checking for other available keys to use.\n") - - # We will be finding oauth with the highest rate limit left out of our list of oauths - new_oauth = self.oauths[0] - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] - for oauth in other_oauths: - logging.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - response = requests.get(url=url, headers=self.headers) - oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() - - # Update oauth to switch to if a higher limit is found - if oauth['rate_limit'] > new_oauth['rate_limit']: - logging.info("Higher rate limit found in oauth: {}\n".format(oauth)) - new_oauth = oauth - elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: - logging.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) - new_oauth = oauth - - if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: - logging.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) - time.sleep(new_oauth['seconds_to_reset']) - - # Make new oauth the 0th element in self.oauths so we know which one is in use - index = self.oauths.index(new_oauth) - self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] - logging.info("Using oauth: {}\n".format(self.oauths[0])) - - # Change headers to be using the new oauth's key - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} diff --git a/workers/template_worker/__init__.py b/workers/template_worker/__init__.py new file mode 100644 diff --git a/workers/template_worker/runtime.py b/workers/template_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/template_worker/runtime.py @@ -0,0 +1,23 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.template_worker.template_worker import TemplateWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ + Creates the Flask app and data collection worker, then starts the Gunicorn server + """ + app = Flask(__name__) + app.worker = TemplateWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) diff --git a/workers/template_worker/setup.py b/workers/template_worker/setup.py --- a/workers/template_worker/setup.py +++ b/workers/template_worker/setup.py @@ -19,16 +19,15 @@ def read(filename): author="Augur Team", author_email="[email protected]", description="Template worker to be used as an example", - packages=find_packages(exclude=('tests',)), + packages=find_packages(), install_requires=[ 'flask', 'requests', - 'psycopg2-binary', - 'click' + 'psycopg2-binary' ], entry_points={ 'console_scripts': [ - 'template_worker_start=template_worker.runtime:main', + 'template_worker_start=workers.template_worker.runtime:main', ], }, classifiers=[ diff --git a/workers/template_worker/template_worker/worker.py b/workers/template_worker/template_worker.py similarity index 76% rename from workers/template_worker/template_worker/worker.py rename to workers/template_worker/template_worker.py --- a/workers/template_worker/template_worker/worker.py +++ b/workers/template_worker/template_worker.py @@ -6,12 +6,16 @@ from workers.worker_base import Worker class TemplateWorker(Worker): - def __init__(self, config): + def __init__(self, config={}): - # Define what this worker can be given and know how to interpret + # Define the worker's type, which will be used for self identification. + # Should be unique among all workers and is the same key used to define + # this worker's settings in the configuration file. + worker_type = "template_worker" + # Define what this worker can be given and know how to interpret # given is usually either [['github_url']] or [['git_url']] (depending if your - # worker is exclusive to repos that are on the GitHub platform) + # worker is exclusive to repos that are on the GitHub platform) given = [[]] # The name the housekeeper/broker use to distinguish the data model this worker can fill @@ -28,7 +32,14 @@ def __init__(self, config): operations_tables = ['worker_history', 'worker_job'] # Run the general worker initialization - super().__init__(config, given, models, data_tables, operations_tables) + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + # Do any additional configuration after the general initialization has been run + self.config.update(config) + + # If you need to do some preliminary interactions with the database, these MUST go + # in the model method. The database connection is instantiated only inside of each + # data collection process # Define data collection info self.tool_source = 'Fake Template Worker' @@ -54,8 +65,11 @@ def fake_data_model(self, task, repo_id): } :param repo_id: the collect() method queries the repo_id given the git/github url and passes it along to make things easier. An int such as: 27869 + """ + # Any initial database instructions, like finding the last tuple inserted or generate the next ID value + # Collection and insertion of data happens here # ... diff --git a/workers/template_worker/template_worker/runtime.py b/workers/template_worker/template_worker/runtime.py deleted file mode 100644 --- a/workers/template_worker/template_worker/runtime.py +++ /dev/null @@ -1,58 +0,0 @@ -from flask import Flask, jsonify, request, Response -import click, os, json, requests, logging -from template_worker.worker import TemplateWorker -from workers.util import read_config, create_server - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51237, help='Port') -def main(augur_url, host, port): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'repo_info_worker', None, None) - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - 'id': 'com.augurlabs.core.template_worker.{}'.format(worker_port), - 'location': 'http://{}:{}'.format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port) - } - - #create instance of the worker - app.template_worker = TemplateWorker(config) # declares the worker that will be running on this server with specified config - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - app.run(debug=app.debug, host=host, port=worker_port) - - if app.template_worker._child is not None: - app.template_worker._child.terminate() - - try: - requests.post('http://{}:{}/api/unstable/workers/remove'.format(server['host'],server['port']), json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - - - diff --git a/workers/util.py b/workers/util.py --- a/workers/util.py +++ b/workers/util.py @@ -1,5 +1,6 @@ import os, json, requests, logging from flask import Flask, Response, jsonify, request +import gunicorn.app.base def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): """ @@ -47,7 +48,7 @@ def read_config(section, name=None, environment_variable=None, default=None, con return value -def create_server(app, worker): +def create_server(app, worker=None): """ Consists of AUGWOP endpoints for the broker to communicate to this worker Can post a new task to be added to the workers queue Can retrieve current status of the worker @@ -83,4 +84,28 @@ def heartbeat(): def augwop_config(): """ Retrieve worker's config """ - return app.worker.config \ No newline at end of file + return app.worker.config + +class WorkerGunicornApplication(gunicorn.app.base.BaseApplication): + + def __init__(self, app): + self.options = { + 'bind': '%s:%s' % (app.worker.config["host"], app.worker.config["port"]), + 'workers': 1, + 'errorlog': app.worker.config['server_logfile'], + 'accesslog': app.worker.config['server_logfile'], + 'loglevel': app.worker.config['log_level'], + 'capture_output': app.worker.config['capture_output'] + } + + self.application = app + super().__init__() + + def load_config(self): + config = {key: value for key, value in self.options.items() + if key in self.cfg.settings and value is not None} + for key, value in config.items(): + self.cfg.set(key.lower(), value) + + def load(self): + return self.application diff --git a/workers/value_worker/__init__.py b/workers/value_worker/__init__.py new file mode 100644 diff --git a/workers/value_worker/runtime.py b/workers/value_worker/runtime.py new file mode 100644 --- /dev/null +++ b/workers/value_worker/runtime.py @@ -0,0 +1,22 @@ +from flask import Flask, jsonify, request, Response +import click, os, json, requests, logging +from workers.value_worker.value_worker import ValueWorker +from workers.util import create_server, WorkerGunicornApplication + +def main(): + """ Declares singular worker and creates the server and flask app that it will be running on + """ + app = Flask(__name__) + app.worker = ValueWorker() + + create_server(app) + WorkerGunicornApplication(app).run() + + if app.worker._child is not None: + app.worker._child.terminate() + try: + requests.post('http://{}:{}/api/unstable/workers/remove'.format(broker_host, broker_port), json={"id": config['id']}) + except: + pass + + os.kill(os.getpid(), 9) \ No newline at end of file diff --git a/workers/value_worker/setup.py b/workers/value_worker/setup.py --- a/workers/value_worker/setup.py +++ b/workers/value_worker/setup.py @@ -5,33 +5,23 @@ from setuptools import find_packages from setuptools import setup - -def read(filename): - filename = os.path.join(os.path.dirname(__file__), filename) - text_type = type(u"") - with io.open(filename, mode="r", encoding='utf-8') as fd: - return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) - - setup( name="value_worker", - version="0.1.0", + version="1.0.0", url="https://github.com/chaoss/augur", license='MIT', - author="Augurlabs", author_email="[email protected]", - description="Augur Worker that gathers value data", - long_description=read("README.md"), - packages=find_packages(exclude=('tests',)), - - install_requires=['flask', 'requests', 'psycopg2-binary', 'click'], - + install_requires=[ + 'flask', + 'requests', + 'psycopg2-binary', + ], entry_points={ 'console_scripts': [ - 'value_worker_start=value_worker.runtime:main', + 'value_worker_start=workers.value_worker.runtime:main', ], }, diff --git a/workers/value_worker/value_worker.py b/workers/value_worker/value_worker.py new file mode 100644 --- /dev/null +++ b/workers/value_worker/value_worker.py @@ -0,0 +1,94 @@ +import os, subprocess +from datetime import datetime +import logging +import requests +import json +from urllib.parse import quote +from multiprocessing import Process, Queue + +import pandas as pd +import sqlalchemy as s +from sqlalchemy.ext.automap import automap_base +from sqlalchemy import MetaData +from workers.worker_base import Worker + +class ValueWorker(Worker): + def __init__(self, config={}): + + worker_type = "value_worker" + + # Define what this worker can be given and know how to interpret + given = [['git_url']] + models = ['value'] + + # Define the tables needed to insert, update, or delete on + data_tables = ['repo_labor'] + operations_tables = ['worker_history', 'worker_job'] + + + # Run the general worker initialization + super().__init__(worker_type, config, given, models, data_tables, operations_tables) + + self.config.update({ + 'repo_directory': self.augur_config.get_value('Workers', 'facade_worker')['repo_directory'] + }) + + self.tool_source = 'Value Worker' + self.tool_version = '1.0.0' + self.data_source = 'SCC' + + def value_model(self, entry_info, repo_id): + """ Data collection and storage method + """ + self.logger.info(entry_info) + self.logger.info(repo_id) + + repo_path_sql = s.sql.text(""" + SELECT repo_id, CONCAT(repo_group_id || chr(47) || repo_path || repo_name) AS path + FROM repo + WHERE repo_id = :repo_id + """) + + relative_repo_path = self.db.execute(repo_path_sql, {'repo_id': repo_id}).fetchone()[1] + absolute_repo_path = self.config['repo_directory'] + relative_repo_path + + try: + self.generate_value_data(repo_id, absolute_repo_path) + except Exception as e: + self.logger.error(e) + + self.register_task_completion(entry_info, repo_id, "value") + + def generate_value_data(self, repo_id, path): + """Runs scc on repo and stores data in database + + :param repo_id: Repository ID + :param path: Absolute path of the Repostiory + """ + self.logger.info('Running `scc`....') + self.logger.info(f'Repo ID: {repo_id}, Path: {path}') + + output = subprocess.check_output([self.config['scc_bin'], '-f', 'json', path]) + records = json.loads(output.decode('utf8')) + + for record in records: + for file in record['Files']: + repo_labor = { + 'repo_id': repo_id, + 'rl_analysis_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'), + 'programming_language': file['Language'], + 'file_path': file['Location'], + 'file_name': file['Filename'], + 'total_lines': file['Lines'], + 'code_lines': file['Code'], + 'comment_lines': file['Comment'], + 'blank_lines': file['Blank'], + 'code_complexity': file['Complexity'], + 'tool_source': self.tool_source, + 'tool_version': self.tool_version, + 'data_source': self.data_source, + 'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') + } + + result = self.db.execute(self.repo_labor_table.insert().values(repo_labor)) + self.logger.info(f"Added Repo Labor Data: {result.inserted_primary_key}") diff --git a/workers/value_worker/value_worker/__init__.py b/workers/value_worker/value_worker/__init__.py deleted file mode 100644 --- a/workers/value_worker/value_worker/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""value_worker - Augur Worker that collects value data""" - -__tool_source__ = 'Value Worker' -__tool_version__ = '0.1.0' -__data_source__ = 'SCC' - -__author__ = 'Augur Team <[email protected]>' -__all__ = [] - diff --git a/workers/value_worker/value_worker/runtime.py b/workers/value_worker/value_worker/runtime.py deleted file mode 100644 --- a/workers/value_worker/value_worker/runtime.py +++ /dev/null @@ -1,122 +0,0 @@ -import json -import logging -import os -import subprocess -import sys - -import click -import requests -from flask import Flask, Response, jsonify, request - -from value_worker.worker import ValueWorker - -from workers.standard_methods import read_config - -def create_server(app, gw): - """ Consists of AUGWOP endpoints for the broker to communicate to this worker - Can post a new task to be added to the workers queue - Can retrieve current status of the worker - Can retrieve the workers config object - """ - - @app.route("/AUGWOP/task", methods=['POST', 'GET']) - def augwop_task(): - """ AUGWOP endpoint that gets hit to add a task to the workers queue or is used to get the heartbeat/status of worker - """ - # POST a task to be added to the queue - if request.method == 'POST': - logging.info("Sending to work on task: {}".format(str(request.json))) - app.value_worker.task = request.json - return Response(response=request.json, - status=200, - mimetype="application/json") - if request.method == 'GET': #will retrieve the current tasks/status of the worker - return jsonify({ - "status": "not implemented" - }) - return Response(response=request.json, - status=200, - mimetype="application/json") - - @app.route("/AUGWOP/heartbeat", methods=['GET']) - def heartbeat(): - if request.method == 'GET': - return jsonify({ - "status": "alive" - }) - - @app.route("/AUGWOP/config") - def augwop_config(): - """ Retrieve worker's config - """ - return app.value_worker.config - [email protected]() [email protected]('--augur-url', default='http://localhost:5000/', help='Augur URL') [email protected]('--host', default='localhost', help='Host') [email protected]('--port', default=51239, help='Port') [email protected]('--scc-bin', default=f'{os.environ["HOME"]}/go/bin/scc', help='scc binary') -def main(augur_url, host, port, scc_bin): - """ Declares singular worker and creates the server and flask app that it will be running on - """ - - app = Flask(__name__) - - #load credentials - broker_host = read_config("Server", "host", "AUGUR_HOST", "0.0.0.0") - broker_port = read_config("Server", "port", "AUGUR_PORT", 5000) - database_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'host') - worker_info = read_config('Workers', 'value_worker', None, - { - "port": 37300, - "scc_bin": "/home/sean/go/bin/scc" - }) - - - worker_port = worker_info['port'] if 'port' in worker_info else port - - while True: - try: - r = requests.get("http://{}:{}/AUGWOP/heartbeat".format(host, worker_port)).json() - if 'status' in r: - if r['status'] == 'alive': - worker_port += 1 - except: - break - - logging.basicConfig(filename='worker_{}.log'.format(worker_port), filemode='w', level=logging.INFO) - - config = { - "id": "com.augurlabs.core.value_worker.{}".format(worker_port), - "broker_port": broker_port, - "broker_host": broker_host, - "location": "http://{}:{}".format(read_config('Server', 'host', 'AUGUR_HOST', 'localhost'),worker_port), - "host": database_host, - "key": read_config("Database", "key", "AUGUR_GITHUB_API_KEY", "key"), - "password": read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password'), - "port": read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - "user": read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - "database": read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - "endpoint": "https://bestpractices.coreinfrastructure.org/projects.json", - 'scc_bin': worker_info['scc_bin'], - 'repo_directory': read_config('Workers', 'facade_worker', None, None)['repo_directory'], - } - - # Create the worker that will be running on this server with specified config - app.value_worker = ValueWorker(config) - - create_server(app, None) - logging.info("Starting Flask App with pid: " + str(os.getpid()) + "...") - - - app.run(debug=app.debug, host=host, port=worker_port) - if app.value_worker._child is not None: - app.value_worker._child.terminate() - try: - requests.post(f'http://{server["host"]}:{server["port"]}/api/unstable/workers/remove', json={"id": config['id']}) - except: - pass - - logging.info("Killing Flask App: " + str(os.getpid())) - os.kill(os.getpid(), 9) - diff --git a/workers/value_worker/value_worker/worker.py b/workers/value_worker/value_worker/worker.py deleted file mode 100644 --- a/workers/value_worker/value_worker/worker.py +++ /dev/null @@ -1,267 +0,0 @@ -import os, subprocess -from datetime import datetime -import logging -import requests -import json -from urllib.parse import quote -from multiprocessing import Process, Queue - -from value_worker import __data_source__, __tool_source__, __tool_version__ -import pandas as pd -import sqlalchemy as s -from sqlalchemy.ext.automap import automap_base -from sqlalchemy import MetaData -from workers.standard_methods import register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process - -class CollectorTask: - """ Worker's perception of a task in its queue - Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry - and the github_url given that it will be collecting data for - """ - def __init__(self, message_type='TASK', entry_info=None): - self.type = message_type - self.entry_info = entry_info - -class ValueWorker: - def __init__(self, config, task=None): - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) - self.config = config - - self.db = None - self.value_table = None - - self._task = task - self._queue = Queue() - self._child = None - - self.history_id = None - self.finishing_task = False - self.working_on = None - self.results_counter = 0 - - self.specs = { - "id": self.config['id'], - "location": self.config['location'], - "qualifications": [ - { - "given": [["git_url"]], - "models":["value"] - } - ], - "config": [self.config] - } - - self._db_str = 'postgresql://{}:{}@{}:{}/{}'.format( - self.config['user'], - self.config['password'], - self.config['host'], - self.config['port'], - self.config['database'] - ) - - dbschema = 'augur_data' - self.db = s.create_engine(self._db_str, poolclass=s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(dbschema)}) - - helper_schema = 'augur_operations' - self.helper_db = s.create_engine(self._db_str, poolclass = s.pool.NullPool, - connect_args={'options': '-csearch_path={}'.format(helper_schema)}) - logging.info("Database connection established...") - - metadata = MetaData() - helper_metadata = MetaData() - - metadata.reflect(self.db, only=['repo_labor']) - helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth']) - - Base = automap_base(metadata=metadata) - HelperBase = automap_base(metadata=helper_metadata) - - Base.prepare() - HelperBase.prepare() - - self.repo_labor_table = Base.classes.repo_labor.__table__ - self.history_table = HelperBase.classes.worker_history.__table__ - self.job_table = HelperBase.classes.worker_job.__table__ - - logging.info("ORM setup complete...") - - # Organize different api keys/oauths available - self.oauths = [] - self.headers = None - - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - - # Make a list of api key in the config combined w keys stored in the database - oauth_sql = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(0)) - - for oauth in [{'oauth_id': 0, 'access_token': 0}] + json.loads(pd.read_sql(oauth_sql, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}".format(oauth)) - response = requests.get(url=url, headers=self.headers) - self.oauths.append({ - 'oauth_id': oauth['oauth_id'], - 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) \ - - datetime.now()).total_seconds() - }) - logging.info("Found OAuth available for use: {}".format(self.oauths[-1])) - - if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") - - # First key to be used will be the one specified in the config (first element in - # self.oauths array will always be the key in use) - self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} - - # Send broker hello message - connect_to_broker(self) - logging.info("Connected to the broker...\n") - - def update_config(self, config): - """ Method to update config and set a default - """ - self.config = { - "key": "", - "display_name": "", - "description": "", - "required": 1, - "type": "string" - } - self.config.update(config) - self.API_KEY = self.config['github_api_key'] - - @property - def task(self): - """ Property that is returned when the worker's current task is referenced - """ - return self._task - - @task.setter - def task(self, value): - """ entry point for the broker to add a task to the queue - Adds this task to the queue, and calls method to process queue - """ - - if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN": - self._queue.put(value) - - if 'focused_task' in value: - if value['focused_task'] == 1: - logging.info("Focused task is ON\n") - self.finishing_task = True - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - else: - self.finishing_task = False - logging.info("Focused task is OFF\n") - - self._task = value - self.run() - - - def cancel(self): - """ Delete/cancel current task - """ - self._task = None - - def value_model(self, entry_info, repo_id): - """ Data collection and storage method - """ - logging.info(entry_info) - logging.info(repo_id) - - repo_path_sql = s.sql.text(""" - SELECT repo_id, CONCAT(repo_group_id || chr(47) || repo_path || repo_name) AS path - FROM repo - WHERE repo_id = :repo_id - """) - - relative_repo_path = self.db.execute(repo_path_sql, {'repo_id': repo_id}).fetchone()[1] - absolute_repo_path = self.config['repo_directory'] + relative_repo_path - - try: - self.generate_value_data(repo_id, absolute_repo_path) - except Exception as e: - logging.error(e) - - register_task_completion(self, entry_info, repo_id, "value") - - def generate_value_data(self, repo_id, path): - """Runs scc on repo and stores data in database - - :param repo_id: Repository ID - :param path: Absolute path of the Repostiory - """ - logging.info('Running `scc`....') - logging.info(f'Repo ID: {repo_id}, Path: {path}') - - output = subprocess.check_output([self.config['scc_bin'], '-f', 'json', path]) - records = json.loads(output.decode('utf8')) - - for record in records: - for file in record['Files']: - repo_labor = { - 'repo_id': repo_id, - 'rl_analysis_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'), - 'programming_language': file['Language'], - 'file_path': file['Location'], - 'file_name': file['Filename'], - 'total_lines': file['Lines'], - 'code_lines': file['Code'], - 'comment_lines': file['Comment'], - 'blank_lines': file['Blank'], - 'code_complexity': file['Complexity'], - 'tool_source': __tool_source__, - 'tool_version': __tool_version__, - 'data_source': __data_source__, - 'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') - } - - result = self.db.execute(self.repo_labor_table.insert().values(repo_labor)) - logging.info(f"Added Repo Labor Data: {result.inserted_primary_key}") - - def collect(self): - """ Function to process each entry in the worker's task queue - Determines what action to take based off the message type - """ - while True: - if not self._queue.empty(): - message = self._queue.get() - self.working_on = message['job_type'] - else: - break - logging.info("Popped off message: {}\n".format(str(message))) - - if message['job_type'] == 'STOP': - break - - if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE': - raise ValueError('{} is not a recognized task type'.format(message['job_type'])) - pass - - """ Query all repos with repo url of given task """ - repoUrlSQL = s.sql.text(""" - SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' - """.format(message['given']['git_url'])) - repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - - try: - if message['models'][0] == 'value': - self.value_model(message, repo_id) - except Exception as e: - register_task_failure(self, message, repo_id, e) - pass - - def run(self): - """ Kicks off the processing of the queue if it is not already being processed - Gets run whenever a new task is added - """ - logging.info("Running...\n") - self._child = Process(target=self.collect, args=()) - self._child.start() diff --git a/workers/worker_base.py b/workers/worker_base.py --- a/workers/worker_base.py +++ b/workers/worker_base.py @@ -1,47 +1,92 @@ """ Helper methods constant across all workers """ -import requests, datetime, time, traceback, json, os, sys, math +import requests, datetime, time, traceback, json, os, sys, math, logging +from logging import FileHandler, Formatter, StreamHandler from multiprocessing import Process, Queue import sqlalchemy as s import pandas as pd -import os -import sys, logging -from urllib.parse import urlparse -from workers.util import read_config +from pathlib import Path +from urllib.parse import urlparse, quote from sqlalchemy import MetaData from sqlalchemy.ext.automap import automap_base +from augur.config import AugurConfig +from augur.logging import AugurLogging class Worker(): - def __init__(self, config={}, given=[], models=[], data_tables=[], operations_tables=[]): + ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + def __init__(self, worker_type, config={}, given=[], models=[], data_tables=[], operations_tables=[], platform="github"): + + self.worker_type = worker_type self._task = None # task currently being worked on (dict) self._child = None # process of currently running task (multiprocessing process) self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes) + self.data_tables = data_tables + self.operations_tables = operations_tables + self._root_augur_dir = Worker.ROOT_AUGUR_DIR + self.platform = platform # count of tuples inserted in the database (to store stats for each task in op tables) self.results_counter = 0 - # if we are finishing a previous task, certain operations work differenty + # if we are finishing a previous task, certain operations work differently self.finishing_task = False - # Update config with options that are general and not specific to any worker - self.config = config + self.augur_config = AugurConfig(self._root_augur_dir) + + self.config = { + 'worker_type': self.worker_type, + 'host': self.augur_config.get_value("Server", "host"), + 'gh_api_key': self.augur_config.get_value('Database', 'key'), + 'offline_mode': False + } + self.config.update(self.augur_config.get_section("Logging")) + + try: + worker_defaults = self.augur_config.get_default_config()['Workers'][self.config['worker_type']] + self.config.update(worker_defaults) + except KeyError as e: + logging.warn('Could not get default configuration for {}'.format(self.config['worker_type'])) + + worker_info = self.augur_config.get_value('Workers', self.config['worker_type']) + self.config.update(worker_info) + + worker_port = self.config['port'] + while True: + try: + r = requests.get('http://{}:{}/AUGWOP/heartbeat'.format( + self.config['host'], worker_port)).json() + if 'status' in r: + if r['status'] == 'alive': + worker_port += 1 + except: + break + self.config.update({ - 'port_broker': read_config('Server', 'port', 'AUGUR_PORT', 5000), - 'host_broker': read_config('Server', 'host', 'AUGUR_HOST', '0.0.0.0'), - 'host_database': read_config('Database', 'host', 'AUGUR_DB_HOST', 'host'), - 'port_database': read_config('Database', 'port', 'AUGUR_DB_PORT', 'port'), - 'user_database': read_config('Database', 'user', 'AUGUR_DB_USER', 'user'), - 'name_database': read_config('Database', 'name', 'AUGUR_DB_NAME', 'database'), - 'password_database': read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'password') + "port": worker_port, + "id": "workers.{}.{}".format(self.worker_type, worker_port), + "capture_output": False, + 'location': 'http://{}:{}'.format(self.config["host"], worker_port), + 'port_broker': self.augur_config.get_value('Server', 'port'), + 'host_broker': self.augur_config.get_value('Server', 'host'), + 'host_database': self.augur_config.get_value('Database', 'host'), + 'port_database': self.augur_config.get_value('Database', 'port'), + 'user_database': self.augur_config.get_value('Database', 'user'), + 'name_database': self.augur_config.get_value('Database', 'name'), + 'password_database': self.augur_config.get_value('Database', 'password') }) + self.config.update(config) - # Format the port the worker is running on to the name of the - # log file so we can tell multiple instances apart - logging.basicConfig(filename='worker_{}.log'.format( - self.config['id'].split('.')[len(self.config['id'].split('.')) - 1] - ), filemode='w', level=logging.INFO) - logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) + # Initialize logging in the main process + self.initialize_logging() + + # Clear log contents from previous runs + open(self.config["server_logfile"], "w").close() + open(self.config["collection_logfile"], "w").close() + + # Get configured collection logger + self.logger = logging.getLogger(self.config["id"]) + self.logger.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) self.given = given self.models = models @@ -56,28 +101,100 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta ], 'config': self.config } - + + # Send broker hello message + if self.config["offline_mode"] is False: + self.connect_to_broker() + + try: + self.tool_source + self.tool_version + self.data_source + except: + self.tool_source = 'Augur Worker Testing' + self.tool_version = '0.0.0' + self.data_source = 'Augur Worker Testing' + + def __repr__(self): + return f"{self.config['id']}" + + def initialize_logging(self): + self.config["log_level"] = self.config["log_level"].upper() + if self.config["debug"]: + self.config["log_level"] = "DEBUG" + + if self.config["verbose"]: + format_string = AugurLogging.verbose_format_string + else: + format_string = AugurLogging.simple_format_string + + formatter = Formatter(fmt=format_string) + error_formatter = Formatter(fmt=AugurLogging.error_format_string) + + worker_dir = AugurLogging.get_log_directories(self.augur_config, reset_logfiles=False) + "/workers/" + Path(worker_dir).mkdir(exist_ok=True) + logfile_dir = worker_dir + f"/{self.worker_type}/" + Path(logfile_dir).mkdir(exist_ok=True) + + server_logfile = logfile_dir + '{}_{}_server.log'.format(self.worker_type, self.config["port"]) + collection_logfile = logfile_dir + '{}_{}_collection.log'.format(self.worker_type, self.config["port"]) + collection_errorfile = logfile_dir + '{}_{}_collection.err'.format(self.worker_type, self.config["port"]) + self.config.update({ + "logfile_dir": logfile_dir, + "server_logfile": server_logfile, + "collection_logfile": collection_logfile, + "collection_errorfile": collection_errorfile + }) + + collection_file_handler = FileHandler(filename=self.config["collection_logfile"], mode="a") + collection_file_handler.setFormatter(formatter) + collection_file_handler.setLevel(self.config["log_level"]) + + collection_errorfile_handler = FileHandler(filename=self.config["collection_errorfile"], mode="a") + collection_errorfile_handler.setFormatter(error_formatter) + collection_errorfile_handler.setLevel(logging.WARNING) + + logger = logging.getLogger(self.config["id"]) + logger.handlers = [] + logger.addHandler(collection_file_handler) + logger.addHandler(collection_errorfile_handler) + logger.setLevel(self.config["log_level"]) + logger.propagate = False + + if self.config["debug"]: + self.config["log_level"] = "DEBUG" + console_handler = StreamHandler() + console_handler.setFormatter(formatter) + console_handler.setLevel(self.config["log_level"]) + logger.addHandler(console_handler) + + if self.config["quiet"]: + logger.disabled = True + + self.logger = logger + + def initialize_database_connections(self): DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( self.config['user_database'], self.config['password_database'], self.config['host_database'], self.config['port_database'], self.config['name_database'] ) # Create an sqlalchemy engine for both database schemas - logging.info("Making database connections... {}".format(DB_STR)) + self.logger.info("Making database connections") db_schema = 'augur_data' - self.db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, + self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(db_schema)}) helper_schema = 'augur_operations' - self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool, + self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(helper_schema)}) metadata = MetaData() helper_metadata = MetaData() # Reflect only the tables we will use for each schema's metadata object - metadata.reflect(self.db, only=data_tables) - helper_metadata.reflect(self.helper_db, only=operations_tables) + metadata.reflect(self.db, only=self.data_tables) + helper_metadata.reflect(self.helper_db, only=self.operations_tables) Base = automap_base(metadata=metadata) HelperBase = automap_base(metadata=helper_metadata) @@ -86,28 +203,27 @@ def __init__(self, config={}, given=[], models=[], data_tables=[], operations_ta HelperBase.prepare() # So we can access all our tables when inserting, updating, etc - for table in data_tables: + for table in self.data_tables: setattr(self, '{}_table'.format(table), Base.classes[table].__table__) try: - logging.info(HelperBase.classes.keys()) + self.logger.info(HelperBase.classes.keys()) except: pass - for table in operations_tables: + for table in self.operations_tables: try: setattr(self, '{}_table'.format(table), HelperBase.classes[table].__table__) except Exception as e: - logging.info("Error setting attribute for table: {} : {}".format(table, e)) + self.logger.error("Error setting attribute for table: {} : {}".format(table, e)) # Increment so we are ready to insert the 'next one' of each of these most recent ids self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1 # Organize different api keys/oauths available - if 'gh_api_key' in self.config: - self.init_oauths() - - # Send broker hello message - self.connect_to_broker() + if 'gh_api_key' in self.config or 'gitlab_api_key' in self.config: + self.init_oauths(self.platform) + else: + self.oauths = [{'oauth_id': 0}] @property def task(self): @@ -128,7 +244,7 @@ def task(self, value): # This setting is set by the housekeeper and is attached to the task before it gets sent here if 'focused_task' in value: if value['focused_task'] == 1: - logging.info("Focused task is ON\n") + self.logger.debug("Focused task is ON\n") self.finishing_task = True self._task = value @@ -143,21 +259,23 @@ def run(self): """ Kicks off the processing of the queue if it is not already being processed Gets run whenever a new task is added """ - logging.info("Running...\n") # Spawn a subprocess to handle message reading and performing the tasks self._child = Process(target=self.collect, args=()) self._child.start() - + def collect(self): """ Function to process each entry in the worker's task queue Determines what action to take based off the message type """ + self.initialize_logging() # need to initialize logging again in child process cause multiprocessing + self.logger.info("Starting data collection process\n") + self.initialize_database_connections() while True: if not self._queue.empty(): message = self._queue.get() # Get the task off our MP queue else: break - logging.info("Popped off message: {}\n".format(str(message))) + self.logger.info("Popped off message: {}\n".format(str(message))) if message['job_type'] == 'STOP': break @@ -172,13 +290,13 @@ def collect(self): SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}' """.format(message['given'][self.given[0][0]])) repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id']) - + self.logger.info("repo_id for which data collection is being initiated: {}".format(str(repo_id))) # Call method corresponding to model sent in task try: model_method = getattr(self, '{}_model'.format(message['models'][0])) self.record_model_process(repo_id, 'repo_info') except Exception as e: - logging.info('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) + + self.logger.error('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) + 'must have name of {}_model'.format(message['models'][0])) self.register_task_failure(message, repo_id, e) break @@ -186,51 +304,95 @@ def collect(self): # Model method calls wrapped in try/except so that any unexpected error that occurs can be caught # and worker can move onto the next task without stopping try: + self.logger.info("Calling model method {}_models".format(message['models'][0])) model_method(message, repo_id) - except Exception as e: + except Exception as e: # this could be a custom exception, might make things easier self.register_task_failure(message, repo_id, e) - pass + break + + self.logger.debug('Closing database connections\n') + self.db.dispose() + self.helper_db.dispose() + self.logger.info("Collection process finished") def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}): - """ map objects => { *our db col* : *gh json key*} """ + """ Include an extra key-value pair on each element of new_data that represents + the action that should be taken with this element (i.e. 'need_insertion') + + :param new_data: List of dictionaries, data to be assigned an action to + :param table_values: Pandas DataFrame, existing data in the database to check + what action should be taken on the new_data depending on the presence of + each element in this DataFrame + :param update_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + updates (if source data value != value in existing database row, then an + update is needed). Key is source data column name, value is database field name. + Example: {'id': 'gh_issue_id'} + :param duplicate_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + duplicates (if source data value == value in existing database row, then this + element is a duplicate and would not need an insertion). Key is source data + column name, value is database field name. Example: {'id': 'gh_issue_id'} + :param table_pkey: String, the field name of the primary key of the table in + the database that we are checking the table_values for. + :param value_update_col_map: Dictionary, sometimes we add a new field to a table, + and we want to trigger an update of that row in the database even if all of the + data values are the same and would not need an update ordinarily. Checking for + a specific existing value in the database field allows us to do this. The key is the + name of the field in the database we are checking for a specific value to trigger + an update, the value is the value we are checking for equality to trigger an update. + Example: {'cntrb_id': None} + :return: List of dictionaries, contains all the same elements of new_data, except + each element now has an extra key-value pair with the key being 'flag', and + the value being 'need_insertion', 'need_update', or 'none' + """ need_insertion_count = 0 need_update_count = 0 for i, obj in enumerate(new_data): if type(obj) != dict: - logging.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) + self.logger.info('Moving to next tuple, tuple is not dict: {}'.format(obj)) continue obj['flag'] = 'none' # default of no action needed + existing_tuple = None for db_dupe_key in list(duplicate_col_map.keys()): if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any(): + if table_values[table_values[db_dupe_key].isin( + [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records'): + + existing_tuple = table_values[table_values[db_dupe_key].isin( + [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0] continue - logging.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) + self.logger.info('Found a tuple that needs insertion based on dupe key: {}\n'.format(db_dupe_key)) obj['flag'] = 'need_insertion' need_insertion_count += 1 break if obj['flag'] == 'need_insertion': - logging.info('Already determined that current tuple needs insertion, skipping checking updates. ' + self.logger.info('Already determined that current tuple needs insertion, skipping checking updates. ' 'Moving to next tuple.\n') continue - existing_tuple = table_values[table_values[db_dupe_key].isin( - [obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0] + if not existing_tuple: + self.logger.info('An existing tuple was not found for this data ' + + 'point and we have reached the check-updates portion of assigning ' + + 'tuple action, so we will now move to next data point\n') + continue # If we need to check the values of the existing tuple to determine if an update is needed for augur_col, value_check in value_update_col_map.items(): not_nan_check = not (math.isnan(value_check) and math.isnan(existing_tuple[augur_col])) if value_check is not None else True if existing_tuple[augur_col] != value_check and not_nan_check: continue - logging.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) + self.logger.info("Found a tuple that needs an update for column: {}\n".format(augur_col)) obj['flag'] = 'need_update' obj['pkey'] = existing_tuple[table_pkey] need_update_count += 1 if obj['flag'] == 'need_update': - logging.info('Already determined that current tuple needs update, skipping checking further updates. ' + self.logger.info('Already determined that current tuple needs update, skipping checking further updates. ' 'Moving to next tuple.\n') continue @@ -240,25 +402,34 @@ def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_ continue if obj[update_col_map[col]] == existing_tuple[col]: continue - logging.info("Found a tuple that needs an update for column: {}\n".format(col)) + self.logger.info("Found a tuple that needs an update for column: {}\n".format(col)) obj['flag'] = 'need_update' obj['pkey'] = existing_tuple[table_pkey] need_update_count += 1 - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + + self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) + "was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count)) return new_data - def check_duplicates(new_data, table_values, key): + def check_duplicates(self, new_data, table_values, key): + """ Filters what items of the new_data json (list of dictionaries) that are not + present in the table_values df + + :param new_data: List of dictionaries, new data to filter duplicates out of + :param table_values: Pandas DataFrame, existing data to check what data is already + present in the database + :param key: String, key of each dict in new_data whose value we are checking + duplicates with + :return: List of dictionaries, contains elements of new_data that are not already + present in the database + """ need_insertion = [] for obj in new_data: - if type(obj) == dict: - if not table_values.isin([obj[key]]).any().any(): - need_insertion.append(obj) - # else: - # logging.info("Tuple with github's {} key value already".format(key) + - # "exists in our db: {}\n".format(str(obj[key]))) - logging.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + + if type(obj) != dict: + continue + if not table_values.isin([obj[key]]).any().any(): + need_insertion.append(obj) + self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) + "was reduced to {} tuples.\n".format(str(len(need_insertion)))) return need_insertion @@ -266,16 +437,16 @@ def connect_to_broker(self): connected = False for i in range(5): try: - logging.info("attempt {}\n".format(i)) + self.logger.debug("Connecting to broker, attempt {}\n".format(i)) if i > 0: time.sleep(10) requests.post('http://{}:{}/api/unstable/workers'.format( self.config['host_broker'],self.config['port_broker']), json=self.specs) - logging.info("Connection to the broker was successful\n") + self.logger.info("Connection to the broker was successful\n") connected = True break except requests.exceptions.ConnectionError: - logging.error('Cannot connect to the broker. Trying again...\n') + self.logger.error('Cannot connect to the broker. Trying again...\n') if not connected: sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n') @@ -290,23 +461,39 @@ def dump_queue(queue): # time.sleep(.1) return result - def find_id_from_login(self, login): + def find_id_from_login(self, login, platform='github'): + """ + Retrieves our contributor table primary key value for the contributor with + the given GitHub login credentials, if this contributor is not there, then + they get inserted. + + :param login: String, the GitHub login username to find the primary key id for + :return: Integer, the id of the row in our database with the matching GitHub login + """ idSQL = s.sql.text(""" - SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' - """.format(login)) + SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' \ + AND LOWER(data_source) = '{} api' + """.format(login, platform)) + + self.logger.info(idSQL) + rs = pd.read_sql(idSQL, self.db, params={}) data_list = [list(row) for row in rs.itertuples(index=False)] try: return data_list[0][0] except: - logging.info("contributor needs to be added...") + self.logger.info('contributor needs to be added...') - cntrb_url = ("https://api.github.com/users/" + login) - logging.info("Hitting endpoint: {} ...\n".format(cntrb_url)) + if platform == 'github': + cntrb_url = ("https://api.github.com/users/" + login) + elif platform == 'gitlab': + cntrb_url = ("https://gitlab.com/api/v4/users?username=" + login ) + self.logger.info("Hitting endpoint: {} ...\n".format(cntrb_url)) r = requests.get(url=cntrb_url, headers=self.headers) - self.update_gh_rate_limit(r) + self.update_rate_limit(r) contributor = r.json() + company = None location = None email = None @@ -316,57 +503,108 @@ def find_id_from_login(self, login): location = contributor['location'] if 'email' in contributor: email = contributor['email'] + - cntrb = { - "cntrb_login": contributor['login'] if 'login' in contributor else None, - "cntrb_email": email, - "cntrb_company": company, - "cntrb_location": location, - "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, - "cntrb_canonical": None, - "gh_user_id": contributor['id'], - "gh_login": contributor['login'], - "gh_url": contributor['url'], - "gh_html_url": contributor['html_url'], - "gh_node_id": contributor['node_id'], - "gh_avatar_url": contributor['avatar_url'], - "gh_gravatar_id": contributor['gravatar_id'], - "gh_followers_url": contributor['followers_url'], - "gh_following_url": contributor['following_url'], - "gh_gists_url": contributor['gists_url'], - "gh_starred_url": contributor['starred_url'], - "gh_subscriptions_url": contributor['subscriptions_url'], - "gh_organizations_url": contributor['organizations_url'], - "gh_repos_url": contributor['repos_url'], - "gh_events_url": contributor['events_url'], - "gh_received_events_url": contributor['received_events_url'], - "gh_type": contributor['type'], - "gh_site_admin": contributor['site_admin'], - "tool_source": self.tool_source, - "tool_version": self.tool_version, - "data_source": self.data_source - } + if platform == 'github': + cntrb = { + "cntrb_login": contributor['login'] if 'login' in contributor else None, + "cntrb_email": contributor['email'] if 'email' in contributor else None, + "cntrb_company": contributor['company'] if 'company' in contributor else None, + "cntrb_location": contributor['location'] if 'location' in contributor else None, + "cntrb_created_at": contributor['created_at'] if 'created_at' in contributor else None, + "cntrb_canonical": None, + "gh_user_id": contributor['id'] if 'id' in contributor else None, + "gh_login": contributor['login'] if 'login' in contributor else None, + "gh_url": contributor['url'] if 'url' in contributor else None, + "gh_html_url": contributor['html_url'] if 'html_url' in contributor else None, + "gh_node_id": contributor['node_id'] if 'node_id' in contributor else None, + "gh_avatar_url": contributor['avatar_url'] if 'avatar_url' in contributor else None, + "gh_gravatar_id": contributor['gravatar_id'] if 'gravatar_id' in contributor else None, + "gh_followers_url": contributor['followers_url'] if 'followers_url' in contributor else None, + "gh_following_url": contributor['following_url'] if 'following_url' in contributor else None, + "gh_gists_url": contributor['gists_url'] if 'gists_url' in contributor else None, + "gh_starred_url": contributor['starred_url'] if 'starred_url' in contributor else None, + "gh_subscriptions_url": contributor['subscriptions_url'] if 'subscriptions_url' in contributor else None, + "gh_organizations_url": contributor['organizations_url'] if 'organizations_url' in contributor else None, + "gh_repos_url": contributor['repos_url'] if 'repos_url' in contributor else None, + "gh_events_url": contributor['events_url'] if 'events_url' in contributor else None, + "gh_received_events_url": contributor['received_events_url'] if 'received_events_url' in contributor else None, + "gh_type": contributor['type'] if 'type' in contributor else None, + "gh_site_admin": contributor['site_admin'] if 'site_admin' in contributor else None, + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + + elif platform == 'gitlab': + cntrb = { + "cntrb_login": contributor[0]['username'] if 'username' in contributor[0] else None, + "cntrb_email": email, + "cntrb_company": company, + "cntrb_location": location, + "cntrb_created_at": contributor[0]['created_at'] if 'created_at' in contributor[0] else None, + "cntrb_canonical": None, + "gh_user_id": contributor[0]['id'], + "gh_login": contributor[0]['username'], + "gh_url": contributor[0]['web_url'], + "gh_html_url": None, + "gh_node_id": None, + "gh_avatar_url": contributor[0]['avatar_url'], + "gh_gravatar_id": None, + "gh_followers_url": None, + "gh_following_url": None, + "gh_gists_url": None, + "gh_starred_url": None, + "gh_subscriptions_url": None, + "gh_organizations_url": None, + "gh_repos_url": None, + "gh_events_url": None, + "gh_received_events_url": None, + "gh_type": None, + "gh_site_admin": None, + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key)) self.results_counter += 1 self.cntrb_id_inc = int(result.inserted_primary_key[0]) - logging.info("Inserted contributor: " + contributor['login'] + "\n") + self.logger.info("Inserted contributor: " + cntrb['cntrb_login'] + "\n") - return self.find_id_from_login(login) + return self.find_id_from_login(login, platform) - def get_owner_repo(self, github_url): - split = github_url.split('/') + def get_owner_repo(self, git_url): + """ Gets the owner and repository names of a repository from a git url + + :param git_url: String, the git url of a repository + :return: Tuple, includes the owner and repository names in that order + """ + split = git_url.split('/') owner = split[-2] repo = split[-1] - if '.git' in repo: + if '.git' == repo[-4:]: repo = repo[:-4] return owner, repo def get_max_id(self, table, column, default=25150, operations_table=False): + """ Gets the max value (usually used for id/pk's) of any Integer column + of any table + + :param table: String, the table that consists of the column you want to + query a max value for + :param column: String, the column that you want to query the max value for + :param default: Integer, if there are no values in the + specified column, the value of this parameter will be returned + :param operations_table: Boolean, if True, this signifies that the table/column + that is wanted to be queried is in the augur_operations schema rather than + the augur_data schema. Default False + :return: Integer, the max value of the specified column/table + """ maxIdSQL = s.sql.text(""" SELECT max({0}.{1}) AS {1} FROM {0} @@ -375,14 +613,24 @@ def get_max_id(self, table, column, default=25150, operations_table=False): rs = pd.read_sql(maxIdSQL, db, params={}) if rs.iloc[0][column] is not None: max_id = int(rs.iloc[0][column]) + 1 - logging.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) + self.logger.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id)) else: max_id = default - logging.info("Could not find max id for {} column in the {} table... using default set to: \ - {}\n".format(column, table, max_id)) + self.logger.warning('Could not find max id for {} column in the {} table... ' + + 'using default set to: {}\n'.format(column, table, max_id)) return max_id def get_table_values(self, cols, tables, where_clause=""): + """ Can query all values of any column(s) from any table(s) + with an optional where clause + + :param cols: List of Strings, column(s) that user wants to query + :param tables: List of Strings, table(s) that user wants to query + :param where_clause: String, optional where clause to filter the values + queried + :return: Pandas DataFrame, contains all values queried in the columns, tables, and + optional where clause provided + """ table_str = tables[0] del tables[0] @@ -394,46 +642,103 @@ def get_table_values(self, cols, tables, where_clause=""): for col in cols: col_str += ", " + col - tableValuesSQL = s.sql.text(""" + table_values_sql = s.sql.text(""" SELECT {} FROM {} {} """.format(col_str, table_str, where_clause)) - logging.info("Getting table values with the following PSQL query: \n{}\n".format(tableValuesSQL)) - values = pd.read_sql(tableValuesSQL, self.db, params={}) + self.logger.info('Getting table values with the following PSQL query: \n{}\n'.format( + table_values_sql)) + values = pd.read_sql(table_values_sql, self.db, params={}) return values - def init_oauths(self): + def init_oauths(self, platform="github"): self.oauths = [] self.headers = None - # Endpoint to hit solely to retrieve rate limit information from headers of the response - url = "https://api.github.com/users/gabe-heim" - # Make a list of api key in the config combined w keys stored in the database - oauthSQL = s.sql.text(""" - SELECT * FROM worker_oauth WHERE access_token <> '{}' - """.format(self.config['gh_api_key'])) - for oauth in [{'oauth_id': 0, 'access_token': self.config['gh_api_key']}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): - self.headers = {'Authorization': 'token %s' % oauth['access_token']} - logging.info("Getting rate limit info for oauth: {}\n".format(oauth)) + # Select endpoint to hit solely to retrieve rate limit information from headers of the response + # Adjust header keys needed to fetch rate limit information from the API responses + if platform == "github": + url = "https://api.github.com/users/gabe-heim" + oauthSQL = s.sql.text(""" + SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'github' + """.format(self.config['gh_api_key'])) + key_name = "gh_api_key" + rate_limit_header_key = "X-RateLimit-Remaining" + rate_limit_reset_header_key = "X-RateLimit-Reset" + elif platform == "gitlab": + url = "https://gitlab.com/api/v4/version" + oauthSQL = s.sql.text(""" + SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'gitlab' + """.format(self.config['gitlab_api_key'])) + key_name = "gitlab_api_key" + rate_limit_header_key = "ratelimit-remaining" + rate_limit_reset_header_key = "ratelimit-reset" + + for oauth in [{'oauth_id': 0, 'access_token': self.config[key_name]}] + json.loads(pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")): + if platform == "github": + self.headers = {'Authorization': 'token %s' % oauth['access_token']} + elif platform == "gitlab": + self.headers = {'Authorization': 'Bearer %s' % oauth['access_token']} + self.logger.info("Getting rate limit info for oauth: {}\n".format(oauth)) response = requests.get(url=url, headers=self.headers) self.oauths.append({ 'oauth_id': oauth['oauth_id'], 'access_token': oauth['access_token'], - 'rate_limit': int(response.headers['X-RateLimit-Remaining']), - 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() + 'rate_limit': int(response.headers[rate_limit_header_key]), + 'seconds_to_reset': (datetime.datetime.fromtimestamp(int(response.headers[rate_limit_reset_header_key])) - datetime.datetime.now()).total_seconds() }) - logging.info("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) + self.logger.debug("Found OAuth available for use: {}\n\n".format(self.oauths[-1])) if len(self.oauths) == 0: - logging.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") + self.logger.info("No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\n") # First key to be used will be the one specified in the config (first element in # self.oauths array will always be the key in use) + if platform == "github": + self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} + elif platform == "gitlab": + self.headers = {'Authorization': 'Bearer %s' % self.oauths[0]['access_token']} + self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} + self.logger.info("OAuth initialized") + + def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}, platform="github"): + """ Paginate either backwards or forwards (depending on the value of the worker's + finishing_task attribute) through all the GitHub or GitLab api endpoint pages. + + :param url: String, the url of the API endpoint we are paginating through, expects + a curly brace string formatter within the string to format the Integer + representing the page number that is wanted to be returned + :param duplicate_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + duplicates (if source data value == value in existing database row, then this + element is a duplicate and would not need an insertion). Key is source data + column name, value is database field name. Example: {'id': 'gh_issue_id'} + :param update_col_map: Dictionary, maps the column names of the source data + to the field names in our database for columns that should be checked for + updates (if source data value != value in existing database row, then an + update is needed). Key is source data column name, value is database field name. + Example: {'id': 'gh_issue_id'} + :param table: String, the name of the table that holds the values to check for + duplicates/updates against + :param table_pkey: String, the field name of the primary key of the table in + the database that we are getting the values for to cross-reference to check + for duplicates. + :param where_clause: String, optional where clause to filter the values + that are queried when preparing the values that will be cross-referenced + for duplicates/updates + :param value_update_col_map: Dictionary, sometimes we add a new field to a table, + and we want to trigger an update of that row in the database even if all of the + data values are the same and would not need an update ordinarily. Checking for + a specific existing value in the database field allows us to do this. The key is the + name of the field in the database we are checking for a specific value to trigger + an update, the value is the value we are checking for equality to trigger an update. + Example: {'cntrb_id': None} + :return: List of dictionaries, all data points from the pages of the specified API endpoint + each with a 'flag' key-value pair representing the required action to take with that + data point (i.e. 'need_insertion', 'need_update', 'none') + """ - def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}): - # Paginate backwards through all the tuples but get first page in order - # to determine if there are multiple pages and if the 1st page covers all update_keys = list(update_col_map.keys()) if update_col_map else [] update_keys += list(value_update_col_map.keys()) if value_update_col_map else [] cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey] @@ -446,10 +751,18 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh num_attempts = 0 success = False while num_attempts < 3: - logging.info("Hitting endpoint: " + url.format(i) + " ...\n") + self.logger.info(f'Hitting endpoint: {url.format(i)}...\n') r = requests.get(url=url.format(i), headers=self.headers) - self.update_gh_rate_limit(r) - logging.info("Analyzing page {} of {}\n".format(i, int(r.links['last']['url'][-6:].split('=')[1]) + 1 if 'last' in r.links else '*last page not known*')) + + self.update_rate_limit(r, platform=platform) + if 'last' not in r.links: + last_page = None + else: + if platform == "github": + last_page = r.links['last']['url'][-6:].split('=')[1] + elif platform == "gitlab": + last_page = r.links['last']['url'].split('&')[2].split("=")[1] + self.logger.info("Analyzing page {} of {}\n".format(i, int(last_page) + 1 if last_page is not None else '*last page not known*')) try: j = r.json() @@ -460,21 +773,23 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh success = True break elif type(j) == dict: - logging.info("Request returned a dict: {}\n".format(j)) + self.logger.info("Request returned a dict: {}\n".format(j)) if j['message'] == 'Not Found': - logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(url)) + self.logger.warning("Github repo was not found or does not exist for endpoint: {}\n".format(url)) break if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.': num_attempts -= 1 - self.update_gh_rate_limit(r, temporarily_disable=True) + self.logger.info("rate limit update code goes here") + self.update_rate_limit(r, temporarily_disable=True,platform=platform) if j['message'] == 'Bad credentials': - self.update_gh_rate_limit(r, bad_credentials=True) + self.logger.info("rate limit update code goes here") + self.update_rate_limit(r, bad_credentials=True, platform=platform) elif type(j) == str: - logging.info("J was string: {}\n".format(j)) + self.logger.info(f'J was string: {j}\n') if '<!DOCTYPE html>' in j: - logging.info("HTML was returned, trying again...\n") + self.logger.info('HTML was returned, trying again...\n') elif len(j) == 0: - logging.info("Empty string, trying again...\n") + self.logger.warning('Empty string, trying again...\n') else: try: j = json.loads(j) @@ -488,44 +803,52 @@ def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, wh # Find last page so we can decrement from there if 'last' in r.links and not multiple_pages and not self.finishing_task: - param = r.links['last']['url'][-6:] - i = int(param.split('=')[1]) + 1 - logging.info("Multiple pages of request, last page is " + str(i - 1) + "\n") + if platform == "github": + param = r.links['last']['url'][-6:] + i = int(param.split('=')[1]) + 1 + elif platform == "gitlab": + i = int(r.links['last']['url'].split('&')[2].split("=")[1]) + 1 + self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n") multiple_pages = True elif not multiple_pages and not self.finishing_task: - logging.info("Only 1 page of request\n") + self.logger.info("Only 1 page of request\n") elif self.finishing_task: - logging.info("Finishing a previous task, paginating forwards ..." + self.logger.info("Finishing a previous task, paginating forwards ..." " excess rate limit requests will be made\n") - + if len(j) == 0: - logging.info("Response was empty, breaking from pagination.\n") + self.logger.info("Response was empty, breaking from pagination.\n") break - + # Checking contents of requests with what we already have in the db j = self.assign_tuple_action(j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map) if not j: - logging.info("Assigning tuple action failed, moving to next page.\n") + self.logger.error("Assigning tuple action failed, moving to next page.\n") i = i + 1 if self.finishing_task else i - 1 continue try: - to_add = [obj for obj in j if obj not in tuples and obj['flag'] != 'none'] + to_add = [obj for obj in j if obj not in tuples and (obj['flag'] != 'none')] except Exception as e: - logging.info("Failure accessing data of page: {}. Moving to next page.\n".format(e)) + self.logger.error("Failure accessing data of page: {}. Moving to next page.\n".format(e)) i = i + 1 if self.finishing_task else i - 1 continue if len(to_add) == 0 and multiple_pages and 'last' in r.links: - logging.info("{}".format(r.links['last'])) - if i - 1 != int(r.links['last']['url'][-6:].split('=')[1]): - logging.info("No more pages with unknown tuples, breaking from pagination.\n") + self.logger.info("{}".format(r.links['last'])) + if platform == "github": + page_number = int(r.links['last']['url'][-6:].split('=')[1]) + elif platform == "gitlab": + page_number = int(r.links['last']['url'].split('&')[2].split("=")[1]) + if i - 1 != page_number: + self.logger.info("No more pages with unknown tuples, breaking from pagination.\n") break + tuples += to_add i = i + 1 if self.finishing_task else i - 1 # Since we already wouldve checked the first page... break if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0: - logging.info("No more pages to check, breaking from pagination.\n") + self.logger.info("No more pages to check, breaking from pagination.\n") break return tuples @@ -535,24 +858,16 @@ def query_github_contributors(self, entry_info, repo_id): """ Data collection function Query the GitHub API for contributors """ - logging.info("Querying contributors with given entry info: " + str(entry_info) + "\n") + self.logger.info(f'Querying contributors with given entry info: {entry_info}\n') github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url'] # Extract owner/repo from the url for the endpoint - path = urlparse(github_url) - split = path[2].split('/') - - owner = split[1] - name = split[2] - - # Handles git url case by removing the extension - if ".git" in name: - name = name[:-4] + owner, name = self.get_owner_repo(github_url) # Set the base of the url and place to hold contributors to insert - contributors_url = ("https://api.github.com/repos/" + owner + "/" + - name + "/contributors?per_page=100&page={}") + contributors_url = (f'https://api.github.com/repos/{owner}/{name}/' + + 'contributors?per_page=100&page={}') # Get contributors that we already have stored # Set our duplicate and update column map keys (something other than PK) to @@ -565,7 +880,7 @@ def query_github_contributors(self, entry_info, repo_id): #list to hold contributors needing insertion or update contributors = self.paginate(contributors_url, duplicate_col_map, update_col_map, table, table_pkey) - logging.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") + self.logger.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n") for repo_contributor in contributors: try: @@ -573,7 +888,7 @@ def query_github_contributors(self, entry_info, repo_id): # `created at` # i think that's it cntrb_url = ("https://api.github.com/users/" + repo_contributor['login']) - logging.info("Hitting endpoint: " + cntrb_url + " ...\n") + self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n") r = requests.get(url=cntrb_url, headers=self.headers) self.update_gh_rate_limit(r) contributor = r.json() @@ -624,69 +939,118 @@ def query_github_contributors(self, entry_info, repo_id): if repo_contributor['flag'] == 'need_update': result = self.db.execute(self.contributors_table.update().where( self.worker_history_table.c.cntrb_email==email).values(cntrb)) - logging.info("Updated tuple in the contributors table with existing email: {}".format(email)) + self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email)) self.cntrb_id_inc = repo_contributor['pkey'] elif repo_contributor['flag'] == 'need_insertion': result = self.db.execute(self.contributors_table.insert().values(cntrb)) - logging.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) self.results_counter += 1 - logging.info("Inserted contributor: " + contributor['login'] + "\n") + self.logger.info("Inserted contributor: " + contributor['login'] + "\n") # Increment our global track of the cntrb id for the possibility of it being used as a FK self.cntrb_id_inc = int(result.inserted_primary_key[0]) except Exception as e: - logging.info("Caught exception: {}".format(e)) - logging.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) + self.logger.error("Caught exception: {}".format(e)) + self.logger.error("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) continue - def read_config(section, name=None, environment_variable=None, default=None, config_file_path='../../augur.config.json', no_config_file=0, use_main_config=0): - """ - Read a variable in specified section of the config file, unless provided an environment variable + def query_gitlab_contribtutors(self, entry_info, repo_id): - :param section: location of given variable - :param name: name of variable - """ - config_file_path = os.getenv("AUGUR_CONFIG_FILE", config_file_path) - _config_file_name = 'augur.config.json' - _config_bad = False - _already_exported = {} - _runtime_location = 'runtime/' - _default_config = {} - _config_file = None + gitlab_url = entry_info['given']['gitlab_url'] if 'gitlab_url' in entry_info['given'] else entry_info['given']['git_url'] - try: - _config_file = open(config_file_path, 'r+') - except: - print('Couldn\'t open {}'.format(_config_file_name)) + self.logger.info("Querying contributors with given entry info: " + str(entry_info) + "\n") - # Load the config file - try: - config_text = _config_file.read() - _config = json.loads(config_text) - except json.decoder.JSONDecodeError as e: - if not _config_bad: - _using_config_file = False - print('{} could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: {}'.format(config_file_path, str(e))) - _config = _default_config - - value = None - if environment_variable is not None: - value = os.getenv(environment_variable) - if value is None: + path = urlparse(gitlab_url) + split = path[2].split('/') + + owner = split[1] + name = split[2] + + # Handles git url case by removing the extension + if ".git" in name: + name = name[:-4] + + url_encoded_format = quote(owner + '/' + name, safe='') + + table = 'contributors' + table_pkey = 'cntrb_id' + update_col_map = {'cntrb_email': 'email'} + duplicate_col_map = {'cntrb_login': 'email'} + + # list to hold contributors needing insertion or update + contributors = self.paginate("https://gitlab.com/api/v4/projects/" + url_encoded_format + "/repository/contributors?per_page=100&page={}", duplicate_col_map, update_col_map, table, table_pkey, platform='gitlab') + + for repo_contributor in contributors: try: - if name is not None: - value = _config[section][name] - else: - value = _config[section] - except Exception as e: - value = default - if not section in _config: - _config[section] = {} + cntrb_compressed_url = ("https://gitlab.com/api/v4/users?search=" + repo_contributor['email']) + self.logger.info("Hitting endpoint: " + cntrb_compressed_url + " ...\n") + r = requests.get(url=cntrb_compressed_url, headers=self.headers) + contributor_compressed = r.json() - return value + email = repo_contributor['email'] + if len(contributor_compressed) == 0 or "id" not in contributor_compressed[0]: + continue + self.logger.info("Fetching for user: " + str(contributor_compressed[0]["id"])) + + cntrb_url = ("https://gitlab.com/api/v4/users/" + str(contributor_compressed[0]["id"])) + self.logger.info("Hitting end point to get complete contributor info now: " + cntrb_url + "...\n") + r = requests.get(url=cntrb_url, headers=self.headers) + contributor = r.json() + + cntrb = { + "cntrb_login": contributor.get('username', None), + "cntrb_created_at": contributor.get('created_at', None), + "cntrb_email": email, + "cntrb_company": contributor.get('organization', None), + "cntrb_location": contributor.get('location', None), + # "cntrb_type": , dont have a use for this as of now ... let it default to null + "cntrb_canonical": contributor.get('public_email', None), + "gh_user_id": contributor.get('id', None), + "gh_login": contributor.get('username', None), + "gh_url": contributor.get('web_url', None), + "gh_html_url": contributor.get('web_url', None), + "gh_node_id": None, + "gh_avatar_url": contributor.get('avatar_url', None), + "gh_gravatar_id": None, + "gh_followers_url": None, + "gh_following_url": None, + "gh_gists_url": None, + "gh_starred_url": None, + "gh_subscriptions_url": None, + "gh_organizations_url": None, + "gh_repos_url": None, + "gh_events_url": None, + "gh_received_events_url": None, + "gh_type": None, + "gh_site_admin": None, + "tool_source": self.tool_source, + "tool_version": self.tool_version, + "data_source": self.data_source + } + + # Commit insertion to table + if repo_contributor['flag'] == 'need_update': + result = self.db.execute(self.contributors_table.update().where( + self.worker_history_table.c.cntrb_email == email).values(cntrb)) + self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email)) + self.cntrb_id_inc = repo_contributor['pkey'] + elif repo_contributor['flag'] == 'need_insertion': + result = self.db.execute(self.contributors_table.insert().values(cntrb)) + self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key)) + self.results_counter += 1 + + self.logger.info("Inserted contributor: " + contributor['username'] + "\n") + + # Increment our global track of the cntrb id for the possibility of it being used as a FK + self.cntrb_id_inc = int(result.inserted_primary_key[0]) + + except Exception as e: + self.logger.info("Caught exception: {}".format(e)) + self.logger.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url)) + continue def record_model_process(self, repo_id, model): @@ -705,7 +1069,7 @@ def record_model_process(self, repo_id, model): self.history_id += 1 else: result = self.helper_db.execute(self.worker_history_table.insert().values(task_history)) - logging.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) + self.logger.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key)) self.history_id = int(result.inserted_primary_key[0]) def register_task_completion(self, task, repo_id, model): @@ -716,10 +1080,12 @@ def register_task_completion(self, task, repo_id, model): 'repo_id': repo_id, 'job_model': model } - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" - task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] if 'git_url' in task['given'] else "INVALID_GIVEN" + key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else \ + 'gitlab_url' if 'gitlab_url' in task['given'] else 'INVALID_GIVEN' + task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] \ + if 'git_url' in task['given'] else task['given']['gitlab_url'] if 'gitlab_url' in task['given'] else 'INVALID_GIVEN' if key == 'INVALID_GIVEN': - self.register_task_failure(task, repo_id, "INVALID_GIVEN: not github nor git url") + self.register_task_failure(task, repo_id, "INVALID_GIVEN: Not a github/gitlab/git url.") return # Add to history table @@ -735,7 +1101,7 @@ def register_task_completion(self, task, repo_id, model): self.helper_db.execute(self.worker_history_table.update().where( self.worker_history_table.c.history_id==self.history_id).values(task_history)) - logging.info("Recorded job completion for: " + str(task_completed) + "\n") + self.logger.info("Recorded job completion for: " + str(task_completed) + "\n") # Update job process table updated_job = { @@ -746,28 +1112,31 @@ def register_task_completion(self, task, repo_id, model): } self.helper_db.execute(self.worker_job_table.update().where( self.worker_job_table.c.job_model==model).values(updated_job)) - logging.info("Updated job process for model: " + model + "\n") + self.logger.info("Updated job process for model: " + model + "\n") - # Notify broker of completion - logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + - "This task inserted: " + str(self.results_counter) + " tuples.\n\n") + if self.config["offline_mode"] is False: + + # Notify broker of completion + self.logger.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + + "This task inserted: " + str(self.results_counter) + " tuples.\n") - requests.post('http://{}:{}/api/unstable/completed_task'.format( - self.config['host_broker'],self.config['port_broker']), json=task_completed) + requests.post('http://{}:{}/api/unstable/completed_task'.format( + self.config['host_broker'],self.config['port_broker']), json=task_completed) # Reset results counter for next task self.results_counter = 0 def register_task_failure(self, task, repo_id, e): - logging.info("Worker ran into an error for task: {}\n".format(task)) - logging.info("Printing traceback...\n") + self.logger.error("Worker ran into an error for task: {}\n".format(task)) + self.logger.error("Printing traceback...\n") tb = traceback.format_exc() - logging.info(tb) + self.logger.error(tb) - logging.info(f'This task inserted {self.results_counter} tuples before failure.\n') - logging.info("Notifying broker and logging task failure in database...\n") - key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else "INVALID_GIVEN" + self.logger.info(f'This task inserted {self.results_counter} tuples before failure.\n') + self.logger.info("Notifying broker and logging task failure in database...\n") + key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else \ + 'gitlab_url' if 'gitlab_url' in task['given'] else 'INVALID_GIVEN' url = task['given'][key] """ Query all repos with repo url of given task """ @@ -781,9 +1150,11 @@ def register_task_failure(self, task, repo_id, e): requests.post("http://{}:{}/api/unstable/task_error".format( self.config['host_broker'],self.config['port_broker']), json=task) except requests.exceptions.ConnectionError: - logging.error('Could not send task failure message to the broker\n') + self.logger.error('Could not send task failure message to the broker\n') + self.logger.error(e) except Exception: - logging.exception('An error occured while informing broker about task failure\n') + self.logger.error('An error occured while informing broker about task failure\n') + self.logger.error(e) # Add to history table task_history = { @@ -797,7 +1168,7 @@ def register_task_failure(self, task, repo_id, e): } self.helper_db.execute(self.worker_history_table.update().where(self.worker_history_table.c.history_id==self.history_id).values(task_history)) - logging.info("Recorded job error in the history table for: " + str(task) + "\n") + self.logger.error("Recorded job error in the history table for: " + str(task) + "\n") # Update job process table updated_job = { @@ -807,7 +1178,7 @@ def register_task_failure(self, task, repo_id, e): "analysis_state": 0 } self.helper_db.execute(self.worker_job_table.update().where(self.worker_job_table.c.job_model==task['models'][0]).values(updated_job)) - logging.info("Updated job process for model: " + task['models'][0] + "\n") + self.logger.info("Updated job process for model: " + task['models'][0] + "\n") # Reset results counter for next task self.results_counter = 0 @@ -830,35 +1201,97 @@ def retrieve_tuple(self, key_values, tables): SELECT * FROM {} WHERE {} """.format(table_str, where_str)) values = json.loads(pd.read_sql(retrieveTupleSQL, self.db, params={}).to_json(orient="records")) - return values + return values + + def update_gitlab_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): + # Try to get rate limit from request headers, sometimes it does not work (GH's issue) + # In that case we just decrement from last recieved header count + if bad_credentials and len(self.oauths) > 1: + self.logger.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) + del self.oauths[0] + + if temporarily_disable: + self.logger.info("Gitlab rate limit reached. Temp. disabling...\n") + self.oauths[0]['rate_limit'] = 0 + else: + try: + self.oauths[0]['rate_limit'] = int(response.headers['RateLimit-Remaining']) + self.logger.info("Recieved rate limit from headers\n") + except: + self.oauths[0]['rate_limit'] -= 1 + self.logger.info("Headers did not work, had to decrement\n") + self.logger.info("Updated rate limit, you have: " + + str(self.oauths[0]['rate_limit']) + " requests remaining.\n") + if self.oauths[0]['rate_limit'] <= 0: + try: + reset_time = response.headers['RateLimit-Reset'] + except Exception as e: + self.logger.info("Could not get reset time from headers because of error: {}".format(e)) + reset_time = 3600 + time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() + self.logger.info("Rate limit exceeded, checking for other available keys to use.\n") + + # We will be finding oauth with the highest rate limit left out of our list of oauths + new_oauth = self.oauths[0] + # Endpoint to hit solely to retrieve rate limit information from headers of the response + url = "https://gitlab.com/api/v4/version" + + other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] + for oauth in other_oauths: + self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) + self.headers = {"PRIVATE-TOKEN" : oauth['access_token']} + response = requests.get(url=url, headers=self.headers) + oauth['rate_limit'] = int(response.headers['RateLimit-Remaining']) + oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() + + # Update oauth to switch to if a higher limit is found + if oauth['rate_limit'] > new_oauth['rate_limit']: + self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth)) + new_oauth = oauth + elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: + self.logger.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) + new_oauth = oauth + + if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: + self.logger.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) + time.sleep(new_oauth['seconds_to_reset']) + + # Make new oauth the 0th element in self.oauths so we know which one is in use + index = self.oauths.index(new_oauth) + self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] + self.logger.info("Using oauth: {}\n".format(self.oauths[0])) + + # Change headers to be using the new oauth's key + self.headers = {"PRIVATE-TOKEN" : self.oauths[0]['access_token']} + def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False): # Try to get rate limit from request headers, sometimes it does not work (GH's issue) # In that case we just decrement from last recieved header count if bad_credentials and len(self.oauths) > 1: - logging.info("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) + self.logger.warning("Removing oauth with bad credentials from consideration: {}".format(self.oauths[0])) del self.oauths[0] if temporarily_disable: - logging.info("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") + self.logger.debug("Github thinks we are abusing their api. Preventing use of this key until it resets...\n") self.oauths[0]['rate_limit'] = 0 else: try: self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - logging.info("Recieved rate limit from headers\n") + self.logger.info("Recieved rate limit from headers\n") except: self.oauths[0]['rate_limit'] -= 1 - logging.info("Headers did not work, had to decrement\n") - logging.info("Updated rate limit, you have: " + + self.logger.info("Headers did not work, had to decrement\n") + self.logger.info("Updated rate limit, you have: " + str(self.oauths[0]['rate_limit']) + " requests remaining.\n") if self.oauths[0]['rate_limit'] <= 0: try: reset_time = response.headers['X-RateLimit-Reset'] except Exception as e: - logging.info("Could not get reset time from headers because of error: {}".format(error)) + self.logger.error("Could not get reset time from headers because of error: {}".format(e)) reset_time = 3600 time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now() - logging.info("Rate limit exceeded, checking for other available keys to use.\n") + self.logger.info("Rate limit exceeded, checking for other available keys to use.\n") # We will be finding oauth with the highest rate limit left out of our list of oauths new_oauth = self.oauths[0] @@ -867,28 +1300,48 @@ def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disa other_oauths = self.oauths[0:] if len(self.oauths) > 1 else [] for oauth in other_oauths: - logging.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) + self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth)) self.headers = {'Authorization': 'token %s' % oauth['access_token']} - response = requests.get(url=url, headers=self.headers) - oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) - oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() + + attempts = 3 + success = False + while attempts > 0 and not success: + response = requests.get(url=url, headers=self.headers) + try: + oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining']) + oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds() + success = True + except Exception as e: + self.logger.info(f'oath method ran into error getting info from headers: {e}\n') + self.logger.info(f'{self.headers}\n{url}\n') + attempts -= 1 + if not success: + continue # Update oauth to switch to if a higher limit is found if oauth['rate_limit'] > new_oauth['rate_limit']: - logging.info("Higher rate limit found in oauth: {}\n".format(oauth)) + self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth)) new_oauth = oauth elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']: - logging.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) + self.logger.info("Lower wait time found in oauth with same rate limit: {}\n".format(oauth)) new_oauth = oauth if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0: - logging.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) + self.logger.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}\n".format(new_oauth)) time.sleep(new_oauth['seconds_to_reset']) # Make new oauth the 0th element in self.oauths so we know which one is in use index = self.oauths.index(new_oauth) self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0] - logging.info("Using oauth: {}\n".format(self.oauths[0])) + self.logger.info("Using oauth: {}\n".format(self.oauths[0])) # Change headers to be using the new oauth's key self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']} + + def update_rate_limit(self, response, bad_credentials=False, temporarily_disable=False, platform="gitlab"): + if platform == 'gitlab': + return self.update_gitlab_rate_limit(response, bad_credentials=bad_credentials, + temporarily_disable=temporarily_disable) + elif platform == 'github': + return self.update_gh_rate_limit(response, bad_credentials=bad_credentials, + temporarily_disable=temporarily_disable) \ No newline at end of file
diff --git a/test/api/test_experimental_routes.py b/test/api/test_experimental_routes.py deleted file mode 100644 --- a/test/api/test_experimental_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_insight_routes.py b/test/api/test_insight_routes.py deleted file mode 100644 --- a/test/api/test_insight_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_message_routes.py b/test/api/test_message_routes.py deleted file mode 100644 --- a/test/api/test_message_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_platform_routes.py b/test/api/test_platform_routes.py deleted file mode 100644 --- a/test/api/test_platform_routes.py +++ /dev/null @@ -1,7 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="session") -def metrics(): - pass diff --git a/test/api/test_util_routes.py b/test/api/test_util_routes.py deleted file mode 100644 --- a/test/api/test_util_routes.py +++ /dev/null @@ -1,31 +0,0 @@ -import requests -import pytest - [email protected](scope="session") -def metrics(): - pass - -def test_common(endpoint="http://localhost:5000/api/unstable/repos"): - response = requests.get(endpoint) - data = response.json() - assert response.status_code == 200 - assert len(data) >= 1 - -def test_get_all_repos(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repos') - -def test_repo_groups(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups') - -def test_repos_in_repo_groups(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups') - -def test_get_repo_for_dosocs(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/dosocs/repos') - -def test_aggregate_summary_by_repo(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups/10/repos/25430/aggregate-summary') - -def test_aggregate_summary_by_group(metrics): - return test_common(endpoint='http://localhost:5000/api/unstable/repo-groups/10/aggregate-summary') - diff --git a/test/metrics/test_experimental_metrics.py b/test/metrics/test_experimental_metrics.py deleted file mode 100644 --- a/test/metrics/test_experimental_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_insight_metrics.py b/test/metrics/test_insight_metrics.py deleted file mode 100644 --- a/test/metrics/test_insight_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_message_metrics.py b/test/metrics/test_message_metrics.py deleted file mode 100644 --- a/test/metrics/test_message_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_platform_metrics.py b/test/metrics/test_platform_metrics.py deleted file mode 100644 --- a/test/metrics/test_platform_metrics.py +++ /dev/null @@ -1,9 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics diff --git a/test/metrics/test_util_metrics.py b/test/metrics/test_util_metrics.py deleted file mode 100644 --- a/test/metrics/test_util_metrics.py +++ /dev/null @@ -1,14 +0,0 @@ -#SPDX-License-Identifier: MIT - -import pytest - [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - -# def test_get_repos_for_dosocs(metrics): -# assert metrics.get_repos_for_dosocs().isin( -# ['/home/sean/git-repos/25430/github.com/rails/rails-dom-testing']).any().any() - diff --git a/augur/housekeeper/__init__.py b/tests/__init__.py similarity index 100% rename from augur/housekeeper/__init__.py rename to tests/__init__.py diff --git a/tests/test_application.py b/tests/test_application.py new file mode 100644 --- /dev/null +++ b/tests/test_application.py @@ -0,0 +1,20 @@ +import pytest +import augur.application +import sqlalchemy as s +import json + +from augur.application import Application + +def test_init_augur_regular(): + augur_app = Application(disable_logs=True) + assert augur_app is not None + +def test_connect_to_database(monkeypatch): + def mock_fail_connection(self): + raise(s.exc.OperationalError("fake", "error", "message")) + + monkeypatch.setattr(s.engine.Engine, "connect", mock_fail_connection) + monkeypatch.setenv("AUGUR_LOG_QUIET", "1") + + with pytest.raises(s.exc.OperationalError): + augur_app = Application(disable_logs=True) diff --git a/test/metrics/test_commit_metrics.py b/tests/test_metrics/test_commit_metrics.py similarity index 90% rename from test/metrics/test_commit_metrics.py rename to tests/test_metrics/test_commit_metrics.py --- a/test/metrics/test_commit_metrics.py +++ b/tests/test_metrics/test_commit_metrics.py @@ -2,12 +2,6 @@ import pytest [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_annual_commit_count_ranked_by_repo_in_repo_group(metrics): assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10).iloc[0].net > 0 assert metrics.annual_commit_count_ranked_by_repo_in_repo_group(10, 25430).iloc[0].net > 0 diff --git a/test/metrics/test_contributor_metrics.py b/tests/test_metrics/test_contributor_metrics.py similarity index 91% rename from test/metrics/test_contributor_metrics.py rename to tests/test_metrics/test_contributor_metrics.py --- a/test/metrics/test_contributor_metrics.py +++ b/tests/test_metrics/test_contributor_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_contributors(metrics): # repo group assert metrics.contributors(20).iloc[0]['total'] > 0 diff --git a/test/metrics/test_issue_metrics.py b/tests/test_metrics/test_issue_metrics.py similarity index 97% rename from test/metrics/test_issue_metrics.py rename to tests/test_metrics/test_issue_metrics.py --- a/test/metrics/test_issue_metrics.py +++ b/tests/test_metrics/test_issue_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_issues_new(metrics): #repo_id assert metrics.issues_new(10, 25430, period='year').iloc[0]['issues'] > 0 diff --git a/test/metrics/test_pull_request_metrics.py b/tests/test_metrics/test_pull_request_metrics.py similarity index 91% rename from test/metrics/test_pull_request_metrics.py rename to tests/test_metrics/test_pull_request_metrics.py --- a/test/metrics/test_pull_request_metrics.py +++ b/tests/test_metrics/test_pull_request_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_pull_requests_merge_contributor_new(metrics): # repo id assert metrics.pull_requests_merge_contributor_new(10, repo_id=25430, period='year').isin( diff --git a/test/metrics/test_repo_meta_metrics.py b/tests/test_metrics/test_repo_meta_metrics.py similarity index 96% rename from test/metrics/test_repo_meta_metrics.py rename to tests/test_metrics/test_repo_meta_metrics.py --- a/test/metrics/test_repo_meta_metrics.py +++ b/tests/test_metrics/test_repo_meta_metrics.py @@ -3,12 +3,6 @@ import pytest import pandas as pd [email protected](scope="module") -def metrics(): - import augur - augur_app = augur.Application() - return augur_app.metrics - def test_code_changes(metrics): #repo_id assert metrics.code_changes(10, 25430, period='year').isin([pd.Timestamp('2019-01-01T00:00:00+00:00'), 2]).any().any() diff --git a/test/api/runner.py b/tests/test_routes/runner.py similarity index 84% rename from test/api/runner.py rename to tests/test_routes/runner.py --- a/test/api/runner.py +++ b/tests/test_routes/runner.py @@ -9,9 +9,10 @@ start = subprocess.Popen(["augur", "run", "--disable-housekeeper", "--skip-cleanup"], stdout=FNULL, stderr=subprocess.STDOUT) print("Waiting for the server to start...") time.sleep(5) -process = subprocess.run(["pytest", "-ra", "--tb=short", "-x", "test/metrics"]) + +process = subprocess.run(["pytest", "tests/test_routes/"]) time.sleep(2) + subprocess.Popen(["augur", "util", "kill"], stdout=FNULL, stderr=subprocess.STDOUT) print("Server successfully shutdown.") - sys.exit(process.returncode) diff --git a/test/api/test_commit_routes.py b/tests/test_routes/test_commit_routes.py similarity index 97% rename from test/api/test_commit_routes.py rename to tests/test_routes/test_commit_routes.py --- a/test/api/test_commit_routes.py +++ b/tests/test_routes/test_commit_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_annual_commit_count_ranked_by_new_repo_in_repo_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/annual-commit-count-ranked-by-new-repo-in-repo-group/') data = response.json() diff --git a/test/api/test_contributor_routes.py b/tests/test_routes/test_contributor_routes.py similarity index 95% rename from test/api/test_contributor_routes.py rename to tests/test_routes/test_contributor_routes.py --- a/test/api/test_contributor_routes.py +++ b/tests/test_routes/test_contributor_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_contributors_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/contributors') data = response.json() diff --git a/test/api/test_issue_routes.py b/tests/test_routes/test_issue_routes.py similarity index 99% rename from test/api/test_issue_routes.py rename to tests/test_routes/test_issue_routes.py --- a/test/api/test_issue_routes.py +++ b/tests/test_routes/test_issue_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_issues_new_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/issues-new') data = response.json() diff --git a/test/api/test_pull_request_routes.py b/tests/test_routes/test_pull_request_routes.py similarity index 94% rename from test/api/test_pull_request_routes.py rename to tests/test_routes/test_pull_request_routes.py --- a/test/api/test_pull_request_routes.py +++ b/tests/test_routes/test_pull_request_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_pull_requests_merge_contributor_new_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/pull-requests-merge-contributor-new') data = response.json() diff --git a/test/api/test_repo_meta_routes.py b/tests/test_routes/test_repo_meta_routes.py similarity index 98% rename from test/api/test_repo_meta_routes.py rename to tests/test_routes/test_repo_meta_routes.py --- a/test/api/test_repo_meta_routes.py +++ b/tests/test_routes/test_repo_meta_routes.py @@ -1,10 +1,6 @@ import requests import pytest [email protected](scope="session") -def metrics(): - pass - def test_code_changes_by_group(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/code-changes') data = response.json() @@ -51,7 +47,6 @@ def test_sub_projects_by_repo(metrics): def test_cii_best_practices_badge_by_repo(metrics): response = requests.get('http://localhost:5000/api/unstable/repo-groups/10/repos/25430/cii-best-practices-badge') - print(response) data = response.json() assert response.status_code == 200 assert len(data) >= 1 diff --git a/tests/test_routes/test_util_routes.py b/tests/test_routes/test_util_routes.py new file mode 100644 --- /dev/null +++ b/tests/test_routes/test_util_routes.py @@ -0,0 +1,20 @@ +import requests +import pytest + +from conftest import create_full_routes + +util_routes = [\ +"repos",\ +"repo-groups",\ +"repo-groups",\ +"dosocs/repos",\ +"repo-groups/<default_repo_group_id>/aggregate-summary",\ +"repo-groups/<default_repo_group_id>/repos/<default_repo_id>/aggregate-summary",\ +] + [email protected]("endpoint", create_full_routes(util_routes)) +def test_base_test(client, endpoint): + response = client.get(endpoint) + data = response.get_json() + assert response.status_code == 200 + assert len(data) >= 1 diff --git a/tests/test_workers/test_repo_info_worker.py b/tests/test_workers/test_repo_info_worker.py new file mode 100644 --- /dev/null +++ b/tests/test_workers/test_repo_info_worker.py @@ -0,0 +1,29 @@ +import pytest +from time import sleep + +from workers.repo_info_worker.repo_info_worker import RepoInfoWorker + [email protected] +def test_task(): + return { + "given": { + "github_url": "https://github.com/chaoss/augur.git" + }, + "models": ["repo_info"], + "job_type": "MAINTAIN", + "display_name": "repo_info model for url: https://github.com/chaoss/augur.git", + "focused_task": 1 + } + [email protected] +def repo_info_worker(): + config = { + "offline_mode": True, + "quiet": True + } + + repo_info_worker = RepoInfoWorker(config=config) + return repo_info_worker + +def test_repo_info_worker(repo_info_worker, test_task): + assert repo_info_worker is not None diff --git a/test/__init__.py b/workers/contributor_worker/__init__.py similarity index 100% rename from test/__init__.py rename to workers/contributor_worker/__init__.py diff --git a/test/test_model.py b/workers/github_worker/__init__.py similarity index 100% rename from test/test_model.py rename to workers/github_worker/__init__.py diff --git a/workers/metric_status_worker/tests/tests_worker.py b/workers/metric_status_worker/tests/tests_worker.py deleted file mode 100644 --- a/workers/metric_status_worker/tests/tests_worker.py +++ /dev/null @@ -1,16 +0,0 @@ -import requests -import pytest - -from metric_status_worker.worker import MetricsStatus - -def test_get_metric_index_in_table_row(): - row = "metric |sTatuS|TestString" - metric_status = MetricsStatus("api.github.com") - result = metric_status.get_metric_index_in_table_row(row) - print(result) - assert result == (0, 3) - -def test_is_has_link(): - metric_status = MetricsStatus("api.github.com") - re_result = metric_status.is_has_link(" [oss](augur" , None) - assert re_result == ('oss', 'augur') diff --git a/workers/tests/test_standard_methods.py b/workers/tests/test_standard_methods.py deleted file mode 100644 --- a/workers/tests/test_standard_methods.py +++ /dev/null @@ -1,28 +0,0 @@ -# Sample Test passing with nose and pytest -import pandas as pd -import pytest -from workers.standard_methods import check_duplicates, dump_queue, read_config -from queue import Queue - - -def test_check_duplicates(): - obj = {"website":["walmart.com"]} - new_data = [obj] - table_values = pd.read_csv("augur/data/companies.csv") - assert check_duplicates(new_data, table_values, "website") == [obj] - -def test_dump_queues(): - sample_queue = Queue() - list_sample = ["[email protected]", "[email protected]", "[email protected]"] - for list_item in list_sample: - sample_queue.put(list_item) - queue_to_list = dump_queue(sample_queue) - assert queue_to_list == ["[email protected]", "[email protected]", "[email protected]"] - -def test_read_config_no_exception(): - db_name = read_config('Database', 'user', 'AUGUR_DB_USER', 'augur',config_file_path="augur.config.json") - assert db_name == "augur" - -def test_read_config_exception(): - with pytest.raises(AttributeError): - db_name = read_config('Server', 'username')
repo_info worker: dev/test branch Please help us help you by filling out the following sections as thoroughly as you can. **Description:** Looks like the new Fork information collection has some kind of mismatch between the method and parameters passed: ``` INFO:root:Worker ran into an error for task: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}} INFO:root:Printing traceback... INFO:root:Traceback (most recent call last): File "/mnt/md0/github/augur-census/workers/worker_base.py", line 189, in collect model_method(message, repo_id) File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 149, in repo_info_model forked = self.is_forked(owner, repo) File "/mnt/md0/github/augur-census/workers/repo_info_worker/repo_info_worker/worker.py", line 232, in is_forked data = self.get_repo_data(self, url, r) TypeError: get_repo_data() takes 3 positional arguments but 4 were given INFO:root:This task inserted 0 tuples before failure. INFO:root:Notifying broker and logging task failure in database... INFO:werkzeug:127.0.0.1 - - [31/May/2020 18:48:58] "GET /AUGWOP/heartbeat HTTP/1.1" 200 - INFO:root:Recorded job error in the history table for: {'job_type': 'MAINTAIN', 'models': ['repo_info'], 'display_name': 'repo_info model for url: https://github.com/davepacheco/node-verror.git', 'given': {'github_url': 'https://github.com/davepacheco/node-verror.git'}, 'worker_id': 'com.augurlabs.core.repo_info_worker.50700'} INFO:root:Updated job process for model: repo_info ``` If the log does not provide enough info, let me know
2020-08-19T23:57:52Z
[]
[]
ros/ros_comm
433
ros__ros_comm-433
[ "424" ]
c5236aa6d3be30e5902768dae8c4e90f4122f69f
diff --git a/utilities/message_filters/src/message_filters/__init__.py b/utilities/message_filters/src/message_filters/__init__.py --- a/utilities/message_filters/src/message_filters/__init__.py +++ b/utilities/message_filters/src/message_filters/__init__.py @@ -30,6 +30,7 @@ ====================== """ +import itertools import threading import rospy @@ -143,3 +144,33 @@ def add(self, msg, my_queue): for q in self.queues: del q[t] self.lock.release() + +class ApproximateTimeSynchronizer(TimeSynchronizer): + + """ + Approximately synchronizes messages by their timestamps. + + :class:`ApproximateTimeSynchronizer` synchronizes incoming message filters by the + timestamps contained in their messages' headers. The API is the same as TimeSynchronizer + except for an extra `slop` parameter in the constructor that defines the delay (in seconds) + with which messages can be synchronized + """ + + def __init__(self, fs, queue_size, slop): + TimeSynchronizer.__init__(self, fs, queue_size) + self.slop = rospy.Duration.from_sec(slop) + + def add(self, msg, my_queue): + self.lock.acquire() + my_queue[msg.header.stamp] = msg + while len(my_queue) > self.queue_size: + del my_queue[min(my_queue)] + for vv in itertools.product(*[list(q.keys()) for q in self.queues]): + qt = list(zip(self.queues, vv)) + if ( ((max(vv) - min(vv)) < self.slop) and + (len([1 for q,t in qt if t not in q]) == 0) ): + msgs = [q[t] for q,t in qt] + self.signalMessage(*msgs) + for q,t in qt: + del q[t] + self.lock.release()
diff --git a/utilities/message_filters/test/test_approxsync.py b/utilities/message_filters/test/test_approxsync.py new file mode 100644 --- /dev/null +++ b/utilities/message_filters/test/test_approxsync.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python +# +# Software License Agreement (BSD License) +# +# Copyright (c) 2009, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of the Willow Garage nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import rostest +import rospy +import unittest +import random + +import message_filters +from message_filters import ApproximateTimeSynchronizer + +class MockHeader: + pass + +class MockMessage: + def __init__(self, stamp, data): + self.header = MockHeader() + self.header.stamp = stamp + self.data = data + +class MockFilter(message_filters.SimpleFilter): + pass + +class TestApproxSync(unittest.TestCase): + + def cb_collector_2msg(self, msg1, msg2): + self.collector.append((msg1, msg2)) + + def test_approx(self): + m0 = MockFilter() + m1 = MockFilter() + ts = ApproximateTimeSynchronizer([m0, m1], 1, 0.1) + ts.registerCallback(self.cb_collector_2msg) + + if 0: + # Simple case, pairs of messages, make sure that they get combined + for t in range(10): + self.collector = [] + msg0 = MockMessage(t, 33) + msg1 = MockMessage(t, 34) + m0.signalMessage(msg0) + self.assertEqual(self.collector, []) + m1.signalMessage(msg1) + self.assertEqual(self.collector, [(msg0, msg1)]) + + # Scramble sequences of length N. Make sure that TimeSequencer recombines them. + random.seed(0) + for N in range(1, 10): + m0 = MockFilter() + m1 = MockFilter() + seq0 = [MockMessage(rospy.Time(t), random.random()) for t in range(N)] + seq1 = [MockMessage(rospy.Time(t), random.random()) for t in range(N)] + # random.shuffle(seq0) + ts = ApproximateTimeSynchronizer([m0, m1], N, 0.1) + ts.registerCallback(self.cb_collector_2msg) + self.collector = [] + for msg in random.sample(seq0, N): + m0.signalMessage(msg) + self.assertEqual(self.collector, []) + for msg in random.sample(seq1, N): + m1.signalMessage(msg) + self.assertEqual(set(self.collector), set(zip(seq0, seq1))) + +if __name__ == '__main__': + if 1: + rostest.unitrun('camera_calibration', 'testapproxsync', TestApproxSync) + else: + suite = unittest.TestSuite() + suite.addTest(TestApproxSync('test_approx')) + unittest.TextTestRunner(verbosity=2).run(suite)
message_filters: Incorporate python approximate time synchronizer https://github.com/ros-perception/image_pipeline/issues/76
Please consider providing a pull request for this feature and also contribute to the documentation for it. Well, it is already here :) https://github.com/ros/ros_comm/blob/indigo-devel/utilities/message_filters/src/message_filters/__init__.py#L101 So you can close this one. sorry, I missed it, this is approximate: should I create a new class or just add a slop parameter like done in the camera_calibration file ? (that would default to infinity I guess) A parameter seems reasonable. though i'd make the slop default to zero ;-) For ease of use it might be worth implementing it in the regular syncronizer using a semi protected method, and then have a separate wrapper class which invokes that code path and provides the clear interface.
2014-06-07T12:22:42Z
[]
[]
ros/ros_comm
492
ros__ros_comm-492
[ "485" ]
2c4af852eafd5335dc2dff7446a1879519a8e2b6
diff --git a/clients/rospy/src/rospy/exceptions.py b/clients/rospy/src/rospy/exceptions.py --- a/clients/rospy/src/rospy/exceptions.py +++ b/clients/rospy/src/rospy/exceptions.py @@ -61,6 +61,15 @@ class ROSInterruptException(ROSException, KeyboardInterrupt): """ pass +class ROSTimeMovedBackwardsException(ROSInterruptException): + """ + Exception if time moved backwards + """ + def __init__(self, time): + self.time = time + """The amount of time in seconds.""" + super(ROSTimeMovedBackwardsException, self).__init__("ROS time moved backwards") + class ROSInternalException(Exception): """ Base class for exceptions that are internal to the ROS system diff --git a/clients/rospy/src/rospy/timer.py b/clients/rospy/src/rospy/timer.py --- a/clients/rospy/src/rospy/timer.py +++ b/clients/rospy/src/rospy/timer.py @@ -65,8 +65,10 @@ def sleep(self): account the time elapsed since the last successful sleep(). - @raise ROSInterruptException: if ROS time is set backwards or if - ROS shutdown occurs before sleep completes + @raise ROSInterruptException: if ROS shutdown occurs before + sleep completes + @raise ROSTimeMovedBackwardsException: if ROS time is set + backwards """ curr_time = rospy.rostime.get_rostime() # detect time jumping backwards @@ -83,7 +85,6 @@ def sleep(self): if curr_time - self.last_time > self.sleep_dur * 2: self.last_time = curr_time -# TODO: may want more specific exceptions for sleep def sleep(duration): """ sleep for the specified duration in ROS time. If duration @@ -91,8 +92,10 @@ def sleep(duration): @param duration: seconds (or rospy.Duration) to sleep @type duration: float or Duration - @raise ROSInterruptException: if ROS time is set backwards or if - ROS shutdown occurs before sleep completes + @raise ROSInterruptException: if ROS shutdown occurs before sleep + completes + @raise ROSTimeMovedBackwardsException: if ROS time is set + backwards """ if rospy.rostime.is_wallclock(): if isinstance(duration, genpy.Duration): @@ -128,7 +131,9 @@ def sleep(duration): rostime_cond.wait(0.5) if rospy.rostime.get_rostime() < initial_rostime: - raise rospy.exceptions.ROSInterruptException("ROS time moved backwards") + time_jump = (initial_rostime - rospy.rostime.get_rostime()).to_sec() + rospy.core.logerr("ROS time moved backwards: %ss", time_jump) + raise rospy.exceptions.ROSTimeMovedBackwardsException(time_jump) if rospy.core.is_shutdown(): raise rospy.exceptions.ROSInterruptException("ROS shutdown request")
diff --git a/test/test_rospy/test/unit/test_rospy_exceptions.py b/test/test_rospy/test/unit/test_rospy_exceptions.py --- a/test/test_rospy/test/unit/test_rospy_exceptions.py +++ b/test/test_rospy/test/unit/test_rospy_exceptions.py @@ -62,3 +62,16 @@ def test_ROSInterruptException(self): raise ROSInterruptException("test") except KeyboardInterrupt: pass + + def test_ROSTimeMovedBackwardsException(self): + from rospy.exceptions import ROSTimeMovedBackwardsException, ROSInterruptException + try: + raise ROSTimeMovedBackwardsException(1.0) + except ROSInterruptException as e: + # ensure the message is not changed, because old code may check it + self.assertEqual("ROS time moved backwards", e.message) + try: + time = 1.0 + raise ROSTimeMovedBackwardsException(time) + except ROSTimeMovedBackwardsException as e: + self.assertEqual(time, e.time)
rospy.sleep(): request for specific exception if time moved backwards I kindly request for the TODO of rospy.sleep() in [rospy.timer.py](https://github.com/ros/ros_comm/blob/hydro-devel/clients/rospy/src/rospy/timer.py) to be implemented and use a specific exception if ros time moved backwards. I had trouble to find the cause of a python node shutting down, when using 'rosbag play -l --clock bagfile'. When the implementation follows the ros tutorials on python the ROSInterruptException is always catched: ``` python if __name__ == '__main__': try: talker() except rospy.ROSInterruptException: pass ``` And there is no error message telling you, that time moved backwards.
Since it is not very likely that this will get implemented by the maintainer I have marked the issue with the milestone `untargeted`. Please consider to provide a pull request for the proposed feature. One comment though: a newly introduced exception would anyway need to inherit from `rospy.ROSInterruptException` in order to not change the API in terms of raising a new unrelated exception. I made pull request #491 for the proposed feature. In addition to the exception inheriting from rospy.ROSInterruptException a logerr message is produced to inform the user. I hope this is ok. I looked into [rospy.topics.py](https://github.com/ros/ros_comm/blob/hydro-devel/clients/rospy/src/rospy/topics.py) which produces log messages, too.
2014-08-19T15:09:19Z
[]
[]
ros/ros_comm
831
ros__ros_comm-831
[ "107" ]
fb49cec8460bc6b402d8f31988b8aee7d9eb77cb
diff --git a/clients/rospy/src/rospy/impl/registration.py b/clients/rospy/src/rospy/impl/registration.py --- a/clients/rospy/src/rospy/impl/registration.py +++ b/clients/rospy/src/rospy/impl/registration.py @@ -306,6 +306,8 @@ def run(self): if cond is not None: cond.release() + get_topic_manager().check_all() + #call _connect_topic on all URIs as it can check to see whether #or not a connection exists. if uris and not self.handler.done: diff --git a/clients/rospy/src/rospy/topics.py b/clients/rospy/src/rospy/topics.py --- a/clients/rospy/src/rospy/topics.py +++ b/clients/rospy/src/rospy/topics.py @@ -109,6 +109,10 @@ def isstring(s): # for interfacing with topics, while _TopicImpl implements the # underlying connection details. +if not hasattr(select, 'EPOLLRDHUP'): + select.EPOLLRDHUP = 0x2000 + + class Topic(object): """Base class of L{Publisher} and L{Subscriber}""" @@ -188,25 +192,28 @@ class Poller(object): on multiple platforms. NOT thread-safe. """ def __init__(self): - try: + if hasattr(select, 'epoll'): + self.poller = select.epoll() + self.add_fd = self.add_epoll + self.remove_fd = self.remove_epoll + self.error_iter = self.error_epoll_iter + elif hasattr(select, 'poll'): self.poller = select.poll() self.add_fd = self.add_poll self.remove_fd = self.remove_poll self.error_iter = self.error_poll_iter - except: - try: - # poll() not available, try kqueue - self.poller = select.kqueue() - self.add_fd = self.add_kqueue - self.remove_fd = self.remove_kqueue - self.error_iter = self.error_kqueue_iter - self.kevents = [] - except: - #TODO: non-Noop impl for Windows - self.poller = self.noop - self.add_fd = self.noop - self.remove_fd = self.noop - self.error_iter = self.noop_iter + elif hasattr(select, 'kqueue'): + self.poller = select.kqueue() + self.add_fd = self.add_kqueue + self.remove_fd = self.remove_kqueue + self.error_iter = self.error_kqueue_iter + self.kevents = [] + else: + #TODO: non-Noop impl for Windows + self.poller = self.noop + self.add_fd = self.noop + self.remove_fd = self.noop + self.error_iter = self.noop_iter def noop(self, *args): pass @@ -228,6 +235,18 @@ def error_poll_iter(self): if event & (select.POLLHUP | select.POLLERR): yield fd + def add_epoll(self, fd): + self.poller.register(fd, select.EPOLLHUP|select.EPOLLERR|select.EPOLLRDHUP) + + def remove_epoll(self, fd): + self.poller.unregister(fd) + + def error_epoll_iter(self): + events = self.poller.poll(0) + for fd, event in events: + if event & (select.EPOLLHUP | select.EPOLLERR | select.EPOLLRDHUP): + yield fd + def add_kqueue(self, fd): self.kevents.append(select.kevent(fd)) @@ -439,6 +458,17 @@ def cleanup_cb_wrapper(s): return True + def check(self): + fds_to_remove = list(self.connection_poll.error_iter()) + if fds_to_remove: + with self.c_lock: + new_connections = self.connections[:] + to_remove = [x for x in new_connections if x.fileno() in fds_to_remove] + for x in to_remove: + rospydebug("removing connection to %s, connection error detected"%(x.endpoint_id)) + self._remove_connection(new_connections, x) + self.connections = new_connections + def remove_connection(self, c): """ Remove connection from topic. @@ -1136,6 +1166,15 @@ def close_all(self): t.close() self.pubs.clear() self.subs.clear() + + + def check_all(self): + """ + Check all registered publication and subscriptions. + """ + with self.lock: + for t in chain(iter(self.pubs.values()), iter(self.subs.values())): + t.check() def _add(self, ps, rmap, reg_type): """
diff --git a/test/test_rospy/CMakeLists.txt b/test/test_rospy/CMakeLists.txt --- a/test/test_rospy/CMakeLists.txt +++ b/test/test_rospy/CMakeLists.txt @@ -57,4 +57,5 @@ if(CATKIN_ENABLE_TESTING) add_rostest(test/rostest/latch.test) add_rostest(test/rostest/on_shutdown.test) add_rostest(test/rostest/sub_to_multiple_pubs.test) + add_rostest(test/rostest/latch_unsubscribe.test) endif() diff --git a/test/test_rospy/nodes/listener_once.py b/test/test_rospy/nodes/listener_once.py new file mode 100755 --- /dev/null +++ b/test/test_rospy/nodes/listener_once.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# Software License Agreement (BSD License) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Willow Garage, Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +## Simple talker demo that listens to std_msgs/Strings published +## to the 'chatter' topic and shuts down afterwards + +from __future__ import print_function + +import rospy +from std_msgs.msg import String + + +def callback(data): + print(rospy.get_caller_id(), "I heard %s" % data.data) + rospy.signal_shutdown("Received %s, exiting now" % data.data) + + +def listener(): + rospy.init_node('listener', anonymous=True) + rospy.sleep(rospy.get_param('delay', 0.0)) + rospy.Subscriber("chatter", String, callback) + rospy.spin() + + +if __name__ == '__main__': + listener() diff --git a/test/test_rospy/package.xml b/test/test_rospy/package.xml --- a/test/test_rospy/package.xml +++ b/test/test_rospy/package.xml @@ -18,6 +18,7 @@ <build_depend>test_rosmaster</build_depend> <test_depend>python-numpy</test_depend> + <test_depend>python-psutil</test_depend> <test_depend>rosbuild</test_depend> <test_depend>rosgraph</test_depend> <test_depend>rospy</test_depend> diff --git a/test/test_rospy/test/rostest/latch_unsubscribe.test b/test/test_rospy/test/rostest/latch_unsubscribe.test new file mode 100644 --- /dev/null +++ b/test/test_rospy/test/rostest/latch_unsubscribe.test @@ -0,0 +1,7 @@ +<launch> + <node name="listener_once_1" pkg="test_rospy" type="listener_once.py" output="screen" /> + <node name="listener_once_2" pkg="test_rospy" type="listener_once.py" output="screen"> + <param name="delay" value="1.0" type="double" /> + </node> + <test test-name="test_latch_unsubscribe" pkg="test_rospy" type="test_latch_unsubscribe.py" /> +</launch> diff --git a/test/test_rospy/test/rostest/test_latch_unsubscribe.py b/test/test_rospy/test/rostest/test_latch_unsubscribe.py new file mode 100755 --- /dev/null +++ b/test/test_rospy/test/rostest/test_latch_unsubscribe.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python +# Software License Agreement (BSD License) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Willow Garage, Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +PKG = 'test_rospy' +NAME = 'test_latch_unsubscribe' + +import os +import sys +import unittest + +import psutil + +from std_msgs.msg import String + + +def _get_connections(process_info): + if hasattr(process_info, 'connections'): # new naming + return process_info.connections() + elif hasattr(process_info, 'get_connections'): # old naming + return process_info.get_connections() + raise AttributeError('Wrong psutil version?') + + +def _get_connection_statii(process_info): + return (conn.status for conn in _get_connections(process_info)) + + +class TestLatch(unittest.TestCase): + + def setUp(self): + pass + + def test_latch(self): + import rospy + proc_info = psutil.Process(os.getpid()) + self.assertNotIn('CLOSE_WAIT', _get_connection_statii(proc_info), + 'CLOSE_WAIT sockets already before the test. This ' + 'should not happen at all.') + + rospy.init_node(NAME) + pub = rospy.Publisher('chatter', String, latch=True) + pub.publish(String("hello")) + rospy.sleep(0.5) + self.assertNotIn('CLOSE_WAIT', _get_connection_statii(proc_info), + 'CLOSE_WAIT sockets after the subscriber exited. ' + '(#107)') + rospy.sleep(1.5) + # also check for a second subscriber + self.assertNotIn('CLOSE_WAIT', _get_connection_statii(proc_info), + 'CLOSE_WAIT sockets after the second subscriber ' + 'exited. (#107)') + + +if __name__ == '__main__': + import rostest + rostest.run(PKG, NAME, TestLatch, sys.argv)
rospy: a single rospy latching publication continues to track dead nodes (ros ticket #3038) If you create a node which publishes once on a latching topic, and then bring a subscriber up and down, the list of connections in the publisher seems to just keep growing. Terminal 1: {{{ $ rostopic pub chatter std_msgs/String foo }}} Terminal 2: {{{ $ rosrun rospy_tutorials listener.py ^C $ rosrun rospy_tutorials listener.py ^C $ rosrun rospy_tutorials listener.py ^C $ rosnode info /rostopic_27665_1285637405612 ... Connections: - topic: /chatter - to: /listener_27682_1285637409689 - direction: outbound - transport: TCPROS - topic: /chatter - to: /listener_27706_1285637411687 - direction: outbound - transport: TCPROS - topic: /chatter - to: /listener_27729_1285637413320 - direction: outbound - transport: TCPROS }}} The connection list seems to get reset on publication. trac data: - Owner: **kwc** - Reporter: **leibs** - Reported at: **Mon Sep 27 18:35:07 2010** - URL: https://code.ros.org/trac/ros/ticket/3038
2016-07-13T15:29:32Z
[]
[]
ros/ros_comm
1,159
ros__ros_comm-1159
[ "1158" ]
44bbbd1a7c383f1b1901c024010992ad7b17b0c6
diff --git a/tools/roslaunch/src/roslaunch/xmlloader.py b/tools/roslaunch/src/roslaunch/xmlloader.py --- a/tools/roslaunch/src/roslaunch/xmlloader.py +++ b/tools/roslaunch/src/roslaunch/xmlloader.py @@ -370,6 +370,7 @@ def _node_tag(self, tag, context, ros_config, default_machine, is_test=False, ve child_ns = self._ns_clear_params_attr('node', tag, context, ros_config, node_name=name) param_ns = child_ns.child(name) + param_ns.params = [] # This is necessary because child() does not make a copy of the param list. # required attributes pkg, node_type = self.reqd_attrs(tag, context, ('pkg', 'type')) @@ -649,6 +650,7 @@ def _recurse_load(self, ros_config, tags, context, default_machine, is_core, ver if ifunless_test(self, tag, context): self._check_attrs(tag, context, ros_config, XmlLoader.GROUP_ATTRS) child_ns = self._ns_clear_params_attr(name, tag, context, ros_config) + child_ns.params = list(child_ns.params) # copy is needed here to enclose new params default_machine = \ self._recurse_load(ros_config, tag.childNodes, child_ns, \ default_machine, is_core, verbose)
diff --git a/tools/roslaunch/test/unit/test_roslaunch_dump_params.py b/tools/roslaunch/test/unit/test_roslaunch_dump_params.py --- a/tools/roslaunch/test/unit/test_roslaunch_dump_params.py +++ b/tools/roslaunch/test/unit/test_roslaunch_dump_params.py @@ -77,6 +77,10 @@ def test_roslaunch(self): '/node_rosparam/dict1/shoulders': 2, '/node_rosparam/dict1/knees': 3, '/node_rosparam/dict1/toes': 4, + '/node_rosparam/tilde1': 'foo', + '/node_rosparam/local_param': 'baz', + + '/node_rosparam2/tilde1': 'foo', '/inline_str': 'value1', '/inline_list': [1, 2, 3, 4], @@ -99,3 +103,6 @@ def test_roslaunch(self): elif v != output_val[k]: self.fail("key [%s] value [%s] does not match output: %s"%(k, v, output_val[k])) self.assertEquals(val, output_val) + for k in ('/node_rosparam/tilde2', '/node_rosparam2/tilde2', '/node_rosparam2/local_param'): + if k in output_val: + self.fail("key [%s] should not be in output: %s"%(k, output_val)) diff --git a/tools/roslaunch/test/xml/test-dump-rosparam.launch b/tools/roslaunch/test/xml/test-dump-rosparam.launch --- a/tools/roslaunch/test/xml/test-dump-rosparam.launch +++ b/tools/roslaunch/test/xml/test-dump-rosparam.launch @@ -1,15 +1,21 @@ <launch> + <param name="~tilde1" value="foo" /> <rosparam file="$(find roslaunch)/test/dump-params.yaml" command="load" /> <group ns="rosparam"> + <param name="~tilde2" value="bar" /> <rosparam file="$(find roslaunch)/test/dump-params.yaml" command="load" /> </group> <node pkg="package" type="test_base" name="node_rosparam"> + <param name="local_param" value="baz" /> <rosparam file="$(find roslaunch)/test/dump-params.yaml" command="load" /> </node> + <node pkg="package" type="test_base" name="node_rosparam2"> + </node> + <rosparam param="inline_str">value1</rosparam> <rosparam param="inline_list">[1, 2, 3, 4]</rosparam> <rosparam param="inline_dict">{key1: value1, key2: value2}</rosparam>
Roslaunch Creates Unexpected Private Parameters I came across a bug on the implementation of roslaunch. I have tested it both in Indigo and Kinetic, and it is also present in `lunar-devel` from what I can see. Consider the following example launch, ```xml <launch> <node name="driver" pkg="drivers" type="driver"> <param name="param1" value="1" /> <param name="~param2" value="2" /> </node> <node name="controller" pkg="controllers" type="controller" /> </launch> ``` As you would expect, roslaunch creates two parameters: - `/driver/param1` - `/driver/param2` Consider yet another simple example. ```xml <launch> <param name="~param" value="1" /> <node name="driver" pkg="drivers" type="driver" /> <node name="controller" pkg="controllers" type="controller" /> </launch> ``` Again, two parameters, as expected. - `/driver/param` - `/controller/param` If we combine both features, the second node inherits the first node's private parameters, when it should not. ```xml <launch> <param name="~param1" value="1" /> <node name="driver" pkg="drivers" type="driver"> <param name="param2" value="2" /> <param name="~param3" value="3" /> </node> <node name="controller" pkg="controllers" type="controller" /> </launch> ``` This results in ***six*** parameters, instead of four. - `/driver/param1` - `/driver/param2` - `/driver/param3` - `/controller/param1` - `/controller/param2` - `/controller/param3` I have tried variations of isolating either the first or second node in `<group>`, but the result is the same. From my understanding of the code, the problem lies in [line 172 of `loader.py`](https://github.com/ros/ros_comm/blob/44bbbd1a7c383f1b1901c024010992ad7b17b0c6/tools/roslaunch/src/roslaunch/loader.py#L172) and [line 320](https://github.com/ros/ros_comm/blob/44bbbd1a7c383f1b1901c024010992ad7b17b0c6/tools/roslaunch/src/roslaunch/loader.py#L320) of the same file. When parsing the first private parameter, the parameter is [added to the root context](https://github.com/ros/ros_comm/blob/44bbbd1a7c383f1b1901c024010992ad7b17b0c6/tools/roslaunch/src/roslaunch/xmlloader.py#L270). ```python if is_private(name) or force_local: p = Param(name, value) context.add_param(p) ``` When parsing the node tag right after, a child context is created, and this is where the problem lies. The child context does not start with its own list of parameters, it simply [reuses the same list](https://github.com/ros/ros_comm/blob/44bbbd1a7c383f1b1901c024010992ad7b17b0c6/tools/roslaunch/src/roslaunch/loader.py#L320), over and over. ```python def child(self, ns): ... return LoaderContext(..., params=self.params, ...) ``` The result is that `param1` is added to the list, and `param2` and `param3` are added to the same list as well, because the child context is using the same reference as its parent context. When parsing the second (third, fourth, ...) `<node>`, it will create not only `param1`, but the other parameters as well, since they were wrongly appended to the root list. Why does this work as intended if the first parameter is not present? It works because the list is empty when the first `<node>` is parsed. When creating the child context, an empty list (the root list) is passed to the `LoaderContext` constructor. As seen in [line 172](https://github.com/ros/ros_comm/blob/44bbbd1a7c383f1b1901c024010992ad7b17b0c6/tools/roslaunch/src/roslaunch/loader.py#L172), if the received list is empty, it does not pass the logical `or`, and a **new** parameter list is created. Since a new list is created, it isolates the parameters properly. ```python self.params = params or [] ``` I have a few ideas to solve this issue, but I am not sure of the impact of each, and what is the intended behaviour. If one of the following is the clear solution, I can submit a pull request. 1. Change `LoaderContext` constructor. Simply create a new empty list for `self.params` every time. 2. Change the `child` method to pass down either an empty list or a *copy* of the current list. 3. Change `XmlLoader` to overwrite `param_ns.params` with an empty list ([l. 372](https://github.com/ros/ros_comm/blob/44bbbd1a7c383f1b1901c024010992ad7b17b0c6/tools/roslaunch/src/roslaunch/xmlloader.py#L372)).
Thanks for the detailed report! Looks like this is an issue with the tilde param functionality, since the following: ``` <launch> <param name="param1" value="1" /> <node name="driver" pkg="drivers" type="driver"> <param name="param2" value="2" /> <param name="param3" value="3" /> </node> <node name="controller" pkg="controllers" type="controller" /> </launch> ``` Gives the expected result: ``` $ roslaunch test.launch --dump-params {/driver/param2: 2, /driver/param3: 3, /param1: 1} ``` The [documentation](http://wiki.ros.org/roslaunch/XML/param) says: > You can also set private parameter across a group of nodes by using the ~param syntax (see ROS names) in a <param> tag. The declared parameter will be set as a local parameter in the <node> tags that follow that are in the same scope (i.e. group or ns tag). So I agree that this behaviour is absolutely a bug.
2017-09-07T12:36:16Z
[]
[]
ros/ros_comm
1,205
ros__ros_comm-1205
[ "1158" ]
247459207e20c1da109fc306e58b84d15c4107bd
diff --git a/clients/rospy/src/rospy/core.py b/clients/rospy/src/rospy/core.py --- a/clients/rospy/src/rospy/core.py +++ b/clients/rospy/src/rospy/core.py @@ -181,38 +181,37 @@ def __call__(self, caller_id, logging_func, period, msg): _logging_throttle = LoggingThrottle() -def _frame_record_to_caller_id(frame_record): - frame, _, lineno, _, code, _ = frame_record +def _frame_to_caller_id(frame): caller_id = ( inspect.getabsfile(frame), - lineno, + frame.f_lineno, frame.f_lasti, ) return pickle.dumps(caller_id) def logdebug_throttle(period, msg): - caller_id = _frame_record_to_caller_id(inspect.stack()[1]) + caller_id = _frame_to_caller_id(inspect.currentframe().f_back) _logging_throttle(caller_id, logdebug, period, msg) def loginfo_throttle(period, msg): - caller_id = _frame_record_to_caller_id(inspect.stack()[1]) + caller_id = _frame_to_caller_id(inspect.currentframe().f_back) _logging_throttle(caller_id, loginfo, period, msg) def logwarn_throttle(period, msg): - caller_id = _frame_record_to_caller_id(inspect.stack()[1]) + caller_id = _frame_to_caller_id(inspect.currentframe().f_back) _logging_throttle(caller_id, logwarn, period, msg) def logerr_throttle(period, msg): - caller_id = _frame_record_to_caller_id(inspect.stack()[1]) + caller_id = _frame_to_caller_id(inspect.currentframe().f_back) _logging_throttle(caller_id, logerr, period, msg) def logfatal_throttle(period, msg): - caller_id = _frame_record_to_caller_id(inspect.stack()[1]) + caller_id = _frame_to_caller_id(inspect.currentframe().f_back) _logging_throttle(caller_id, logfatal, period, msg) diff --git a/clients/rospy/src/rospy/impl/masterslave.py b/clients/rospy/src/rospy/impl/masterslave.py --- a/clients/rospy/src/rospy/impl/masterslave.py +++ b/clients/rospy/src/rospy/impl/masterslave.py @@ -59,6 +59,7 @@ import threading import traceback import time +import errno try: #py3k @@ -441,13 +442,19 @@ def _connect_topic(self, topic, pub_uri): interval = 0.5 # seconds # while the ROS node is not shutdown try to get the topic information # and retry on connections problems after some wait + # Abort the retry if the we get a Connection Refused since at that point + # we know for sure the URI is invalid while not success and not is_shutdown(): try: code, msg, result = \ xmlrpcapi(pub_uri).requestTopic(caller_id, topic, protocols) success = True except Exception as e: - if not is_shutdown(): + if getattr(e, 'errno', None) == errno.ECONNREFUSED: + code = -errno.ECONNREFUSED + msg = str(e) + break + elif not is_shutdown(): _logger.debug("Retrying for %s" % topic) if interval < 30.0: # exponential backoff (maximum 32 seconds) diff --git a/clients/rospy/src/rospy/impl/tcpros_service.py b/clients/rospy/src/rospy/impl/tcpros_service.py --- a/clients/rospy/src/rospy/impl/tcpros_service.py +++ b/clients/rospy/src/rospy/impl/tcpros_service.py @@ -128,7 +128,6 @@ def contact_service(resolved_name, timeout=10.0): try: if contact_service(resolved_name, timeout_t-time.time()): return - time.sleep(0.3) except KeyboardInterrupt: # re-raise rospy.core.logdebug("wait_for_service: received keyboard interrupt, assuming signals disabled and re-raising") @@ -137,6 +136,7 @@ def contact_service(resolved_name, timeout=10.0): if first: first = False rospy.core.logerr("wait_for_service(%s): failed to contact [%s], will keep trying"%(resolved_name, uri)) + time.sleep(0.3) if rospy.core.is_shutdown(): raise ROSInterruptException("rospy shutdown") else: @@ -146,7 +146,6 @@ def contact_service(resolved_name, timeout=10.0): try: if contact_service(resolved_name): return - time.sleep(0.3) except KeyboardInterrupt: # re-raise rospy.core.logdebug("wait_for_service: received keyboard interrupt, assuming signals disabled and re-raising") @@ -155,6 +154,7 @@ def contact_service(resolved_name, timeout=10.0): if first: first = False rospy.core.logerr("wait_for_service(%s): failed to contact [%s], will keep trying"%(resolved_name, uri)) + time.sleep(0.3) if rospy.core.is_shutdown(): raise ROSInterruptException("rospy shutdown") diff --git a/clients/rospy/src/rospy/timer.py b/clients/rospy/src/rospy/timer.py --- a/clients/rospy/src/rospy/timer.py +++ b/clients/rospy/src/rospy/timer.py @@ -49,15 +49,18 @@ class Rate(object): Convenience class for sleeping in a loop at a specified rate """ - def __init__(self, hz): + def __init__(self, hz, reset=False): """ Constructor. @param hz: hz rate to determine sleeping - @type hz: int + @type hz: float + @param reset: if True, timer is reset when rostime moved backward. [default: False] + @type reset: bool """ # #1403 self.last_time = rospy.rostime.get_rostime() self.sleep_dur = rospy.rostime.Duration(0, int(1e9/hz)) + self._reset = reset def _remaining(self, curr_time): """ @@ -96,7 +99,13 @@ def sleep(self): backwards """ curr_time = rospy.rostime.get_rostime() - sleep(self._remaining(curr_time)) + try: + sleep(self._remaining(curr_time)) + except rospy.exceptions.ROSTimeMovedBackwardsException: + if not self._reset: + raise + self.last_time = rospy.rostime.get_rostime() + return self.last_time = self.last_time + self.sleep_dur # detect time jumping forwards, as well as loops that are @@ -181,7 +190,7 @@ class Timer(threading.Thread): Convenience class for calling a callback at a specified rate """ - def __init__(self, period, callback, oneshot=False): + def __init__(self, period, callback, oneshot=False, reset=False): """ Constructor. @param period: desired period between callbacks @@ -190,11 +199,14 @@ def __init__(self, period, callback, oneshot=False): @type callback: function taking rospy.TimerEvent @param oneshot: if True, fire only once, otherwise fire continuously until shutdown is called [default: False] @type oneshot: bool + @param reset: if True, timer is reset when rostime moved backward. [default: False] + @type reset: bool """ super(Timer, self).__init__() self._period = period self._callback = callback self._oneshot = oneshot + self._reset = reset self._shutdown = False self.setDaemon(True) self.start() @@ -206,7 +218,7 @@ def shutdown(self): self._shutdown = True def run(self): - r = Rate(1.0 / self._period.to_sec()) + r = Rate(1.0 / self._period.to_sec(), reset=self._reset) current_expected = rospy.rostime.get_rostime() + self._period last_expected, last_real, last_duration = None, None, None while not rospy.core.is_shutdown() and not self._shutdown: diff --git a/tools/rosbag/src/rosbag/bag.py b/tools/rosbag/src/rosbag/bag.py --- a/tools/rosbag/src/rosbag/bag.py +++ b/tools/rosbag/src/rosbag/bag.py @@ -496,7 +496,8 @@ def get_start_time(self): else: if not self._connection_indexes: raise ROSBagException('Bag contains no message') - start_stamp = min([index[0].time.to_sec() for index in self._connection_indexes.values()]) + start_stamps = [index[0].time.to_sec() for index in self._connection_indexes.values() if index] + start_stamp = min(start_stamps) if start_stamps else 0 return start_stamp @@ -512,7 +513,8 @@ def get_end_time(self): else: if not self._connection_indexes: raise ROSBagException('Bag contains no message') - end_stamp = max([index[-1].time.to_sec() for index in self._connection_indexes.values()]) + end_stamps = [index[-1].time.to_sec() for index in self._connection_indexes.values() if index] + end_stamp = max(end_stamps) if end_stamps else 0 return end_stamp @@ -625,8 +627,10 @@ def __str__(self): start_stamp = self._chunks[ 0].start_time.to_sec() end_stamp = self._chunks[-1].end_time.to_sec() else: - start_stamp = min([index[ 0].time.to_sec() for index in self._connection_indexes.values()]) - end_stamp = max([index[-1].time.to_sec() for index in self._connection_indexes.values()]) + start_stamps = [index[0].time.to_sec() for index in self._connection_indexes.values() if index] + start_stamp = min(start_stamps) if start_stamps else 0 + end_stamps = [index[-1].time.to_sec() for index in self._connection_indexes.values() if index] + end_stamp = max(end_stamps) if end_stamps else 0 # Show duration duration = end_stamp - start_stamp @@ -808,8 +812,10 @@ def _get_yaml_info(self, key=None): start_stamp = self._chunks[ 0].start_time.to_sec() end_stamp = self._chunks[-1].end_time.to_sec() else: - start_stamp = min([index[ 0].time.to_sec() for index in self._connection_indexes.values()]) - end_stamp = max([index[-1].time.to_sec() for index in self._connection_indexes.values()]) + start_stamps = [index[0].time.to_sec() for index in self._connection_indexes.values() if index] + start_stamp = min(start_stamps) if start_stamps else 0 + end_stamps = [index[-1].time.to_sec() for index in self._connection_indexes.values() if index] + end_stamp = max(end_stamps) if end_stamps else 0 duration = end_stamp - start_stamp s += 'duration: %.6f\n' % duration @@ -1580,6 +1586,7 @@ def _read_uint32(f): return _unpack_uint32(f.read(4)) def _read_uint64(f): return _unpack_uint64(f.read(8)) def _read_time (f): return _unpack_time (f.read(8)) +def _decode_str(v): return v if type(v) is str else v.decode() def _unpack_uint8(v): return struct.unpack('<B', v)[0] def _unpack_uint32(v): return struct.unpack('<L', v)[0] def _unpack_uint64(v): return struct.unpack('<Q', v)[0] @@ -1628,8 +1635,7 @@ def _read_field(header, field, unpack_fn): return value -def _read_str_field (header, field): - return _read_field(header, field, lambda v: v) +def _read_str_field (header, field): return _read_field(header, field, _decode_str) def _read_uint8_field (header, field): return _read_field(header, field, _unpack_uint8) def _read_uint32_field(header, field): return _read_field(header, field, _unpack_uint32) def _read_uint64_field(header, field): return _read_field(header, field, _unpack_uint64) diff --git a/tools/rosbag/src/rosbag/rosbag_main.py b/tools/rosbag/src/rosbag/rosbag_main.py --- a/tools/rosbag/src/rosbag/rosbag_main.py +++ b/tools/rosbag/src/rosbag/rosbag_main.py @@ -477,7 +477,7 @@ def check_cmd(argv): migrations = checkbag(mm, args[0]) if len(migrations) == 0: - print('Bag file is up to date.') + print('Bag file does not need any migrations.') exit(0) print('The following migrations need to occur:') diff --git a/tools/rosgraph/src/rosgraph/roslogging.py b/tools/rosgraph/src/rosgraph/roslogging.py --- a/tools/rosgraph/src/rosgraph/roslogging.py +++ b/tools/rosgraph/src/rosgraph/roslogging.py @@ -83,7 +83,13 @@ def configure_logging(logname, level=logging.INFO, filename=None, env=None): makedirs_with_parent_perms(logfile_dir) except OSError: # cannot print to screen because command-line tools with output use this - sys.stderr.write("WARNING: cannot create log directory [%s]. Please set %s to a writable location.\n"%(logfile_dir, ROS_LOG_DIR)) + if os.path.exists(logfile_dir): + # We successfully created the logging folder, but could not change + # permissions of the new folder to the same as the parent folder + sys.stderr.write("WARNING: Could not change permissions for folder [%s], make sure that the parent folder has correct permissions.\n"%logfile_dir) + else: + # Could not create folder + sys.stderr.write("WARNING: cannot create log directory [%s]. Please set %s to a writable location.\n"%(logfile_dir, ROS_LOG_DIR)) return None elif os.path.isfile(logfile_dir): raise LoggingException("Cannot save log files: file [%s] is in the way"%logfile_dir) diff --git a/tools/roslaunch/src/roslaunch/__init__.py b/tools/roslaunch/src/roslaunch/__init__.py --- a/tools/roslaunch/src/roslaunch/__init__.py +++ b/tools/roslaunch/src/roslaunch/__init__.py @@ -296,21 +296,15 @@ def main(argv=sys.argv): # This is a roslaunch parent, spin up parent server and launch processes. # args are the roslaunch files to load from . import parent as roslaunch_parent - try: - # force a port binding spec if we are running a core - if options.core: - options.port = options.port or DEFAULT_MASTER_PORT - p = roslaunch_parent.ROSLaunchParent(uuid, args, roslaunch_strs=roslaunch_strs, - is_core=options.core, port=options.port, local_only=options.local_only, - verbose=options.verbose, force_screen=options.force_screen, - num_workers=options.num_workers, timeout=options.timeout) - p.start() - p.spin() - finally: - # remove the pid file - if options.pid_fn: - try: os.unlink(options.pid_fn) - except os.error: pass + # force a port binding spec if we are running a core + if options.core: + options.port = options.port or DEFAULT_MASTER_PORT + p = roslaunch_parent.ROSLaunchParent(uuid, args, roslaunch_strs=roslaunch_strs, + is_core=options.core, port=options.port, local_only=options.local_only, + verbose=options.verbose, force_screen=options.force_screen, + num_workers=options.num_workers, timeout=options.timeout) + p.start() + p.spin() except RLException as e: roslaunch_core.printerrlog(str(e)) @@ -328,6 +322,12 @@ def main(argv=sys.argv): except Exception as e: traceback.print_exc() sys.exit(1) + finally: + # remove the pid file + if options is not None and options.pid_fn: + try: os.unlink(options.pid_fn) + except os.error: pass + if __name__ == '__main__': main() diff --git a/tools/roslaunch/src/roslaunch/depends.py b/tools/roslaunch/src/roslaunch/depends.py --- a/tools/roslaunch/src/roslaunch/depends.py +++ b/tools/roslaunch/src/roslaunch/depends.py @@ -45,6 +45,7 @@ import rospkg +from .loader import convert_value from .substitution_args import resolve_args NAME="roslaunch-deps" @@ -94,20 +95,16 @@ def _get_arg_value(tag, context): else: raise RoslaunchDepsException("No value for arg [%s]"%(name)) -def _parse_arg(tag, context): - name = tag.attributes['name'].value +def _check_ifunless(tag, context): if tag.attributes.has_key('if'): val = resolve_args(tag.attributes['if'].value, context) - if val == '1' or val == 'true': - return (name, _get_arg_value(tag, context)) + if not convert_value(val, 'bool'): + return False elif tag.attributes.has_key('unless'): val = resolve_args(tag.attributes['unless'].value, context) - if val == '0' or val == 'false': - return (name, _get_arg_value(tag, context)) - else: - return (name, _get_arg_value(tag, context)) - # nothing to return (no value, or conditional wasn't satisfied) - return None + if convert_value(val, 'bool'): + return False + return True def _parse_subcontext(tags, context): subcontext = {'arg': {}} @@ -116,12 +113,8 @@ def _parse_subcontext(tags, context): return subcontext for tag in [t for t in tags if t.nodeType == DomNode.ELEMENT_NODE]: - if tag.tagName == 'arg': - # None is returned for args with if/unless that evaluate to false - ret = _parse_arg(tag, context) - if ret is not None: - (name, val) = ret - subcontext['arg'][name] = val + if tag.tagName == 'arg' and _check_ifunless(tag, context): + subcontext['arg'][tag.attributes['name'].value] = _get_arg_value(tag, context) return subcontext def _parse_launch(tags, launch_file, file_deps, verbose, context): @@ -130,6 +123,8 @@ def _parse_launch(tags, launch_file, file_deps, verbose, context): # process group, include, node, and test tags from launch file for tag in [t for t in tags if t.nodeType == DomNode.ELEMENT_NODE]: + if not _check_ifunless(tag, context): + continue if tag.tagName == 'group': @@ -137,10 +132,8 @@ def _parse_launch(tags, launch_file, file_deps, verbose, context): _parse_launch(tag.childNodes, launch_file, file_deps, verbose, context) elif tag.tagName == 'arg': - v = _parse_arg(tag, context) - if v: - (name, val) = v - context['arg'][name] = val + context['arg'][tag.attributes['name'].value] = _get_arg_value(tag, context) + elif tag.tagName == 'include': try: sub_launch_file = resolve_args(tag.attributes['file'].value, context) diff --git a/tools/roslaunch/src/roslaunch/launch.py b/tools/roslaunch/src/roslaunch/launch.py --- a/tools/roslaunch/src/roslaunch/launch.py +++ b/tools/roslaunch/src/roslaunch/launch.py @@ -382,6 +382,9 @@ def _launch_nodes(self): def _launch_master(self): """ Launches master if requested. + @return: True if a master was launched, False if a master was + already running. + @rtype: bool @raise RLException: if master launch fails """ m = self.config.master @@ -426,6 +429,8 @@ def _launch_master(self): self.logger.info("setting /roslaunch/uris/%s__%s' to %s"%(hostname, port, self.server_uri)) param_server.setParam(_ID, '/roslaunch/uris/%s__%s'%(hostname, port),self.server_uri) + return not is_running + def _check_and_set_run_id(self, param_server, run_id): """ Initialize self.run_id to existing value or setup parameter @@ -534,10 +539,7 @@ def launch_node(self, node, core=False): process = create_node_process(self.run_id, node, master.uri) except roslaunch.node_args.NodeParamsException as e: self.logger.error(e) - if node.package == 'rosout' and node.type == 'rosout': - printerrlog("\n\n\nERROR: rosout is not built. Please run 'rosmake rosout'\n\n\n") - else: - printerrlog("ERROR: cannot launch node of type [%s/%s]: %s"%(node.package, node.type, str(e))) + printerrlog("ERROR: cannot launch node of type [%s/%s]: %s"%(node.package, node.type, str(e))) if node.name: return node.name, False else: @@ -627,8 +629,9 @@ def _setup(self): self.remote_runner.add_process_listener(self.listeners) # start up the core: master + core nodes defined in core.xml - self._launch_master() - self._launch_core_nodes() + launched = self._launch_master() + if launched: + self._launch_core_nodes() # run exectuables marked as setup period. this will block # until these executables exit. setup executable have to run diff --git a/tools/roslaunch/src/roslaunch/nodeprocess.py b/tools/roslaunch/src/roslaunch/nodeprocess.py --- a/tools/roslaunch/src/roslaunch/nodeprocess.py +++ b/tools/roslaunch/src/roslaunch/nodeprocess.py @@ -290,6 +290,12 @@ def start(self): cwd = get_ros_root() else: cwd = rospkg.get_ros_home() + if not os.path.exists(cwd): + try: + os.makedirs(cwd) + except OSError: + # exist_ok=True + pass _logger.info("process[%s]: start w/ args [%s]", self.name, self.args) _logger.info("process[%s]: cwd will be [%s]", self.name, cwd) diff --git a/tools/roslaunch/src/roslaunch/xmlloader.py b/tools/roslaunch/src/roslaunch/xmlloader.py --- a/tools/roslaunch/src/roslaunch/xmlloader.py +++ b/tools/roslaunch/src/roslaunch/xmlloader.py @@ -368,6 +368,7 @@ def _node_tag(self, tag, context, ros_config, default_machine, is_test=False, ve child_ns = self._ns_clear_params_attr('node', tag, context, ros_config, node_name=name) param_ns = child_ns.child(name) + param_ns.params = [] # This is necessary because child() does not make a copy of the param list. # required attributes pkg, node_type = self.reqd_attrs(tag, context, ('pkg', 'type')) @@ -647,6 +648,7 @@ def _recurse_load(self, ros_config, tags, context, default_machine, is_core, ver if ifunless_test(self, tag, context): self._check_attrs(tag, context, ros_config, XmlLoader.GROUP_ATTRS) child_ns = self._ns_clear_params_attr(name, tag, context, ros_config) + child_ns.params = list(child_ns.params) # copy is needed here to enclose new params default_machine = \ self._recurse_load(ros_config, tag.childNodes, child_ns, \ default_machine, is_core, verbose) diff --git a/tools/rosmaster/src/rosmaster/util.py b/tools/rosmaster/src/rosmaster/util.py --- a/tools/rosmaster/src/rosmaster/util.py +++ b/tools/rosmaster/src/rosmaster/util.py @@ -49,6 +49,9 @@ monkey_patch() del monkey_patch +import errno +import socket + _proxies = {} #cache ServerProxys def xmlrpcapi(uri): """ @@ -62,9 +65,24 @@ def xmlrpcapi(uri): return None if not uri in _proxies: _proxies[uri] = ServerProxy(uri) + close_half_closed_sockets() return _proxies[uri] +def close_half_closed_sockets(): + for proxy in _proxies.values(): + transport = proxy("transport") + if transport._connection and transport._connection[1] is not None and transport._connection[1].sock is not None: + try: + state = transport._connection[1].sock.getsockopt(socket.SOL_TCP, socket.TCP_INFO) + except socket.error as e: # catch [Errno 92] Protocol not available + if e.args[0] is errno.ENOPROTOOPT: + return + raise + if state == 8: # CLOSE_WAIT + transport.close() + + def remove_server_proxy(uri): if uri in _proxies: del _proxies[uri] diff --git a/tools/rosmsg/src/rosmsg/__init__.py b/tools/rosmsg/src/rosmsg/__init__.py --- a/tools/rosmsg/src/rosmsg/__init__.py +++ b/tools/rosmsg/src/rosmsg/__init__.py @@ -48,6 +48,7 @@ import rospkg import genmsg +from genpy.dynamic import generate_dynamic import roslib.message import rosbag @@ -603,7 +604,14 @@ def rosmsg_cmd_show(mode, full, alias='show'): for topic, msg, t in rosbag.Bag(bag_file).read_messages(raw=True): datatype, _, _, _, pytype = msg if datatype == arg: - print(get_msg_text(datatype, options.raw, pytype._full_text)) + if options.raw: + print(pytype._full_text) + else: + context = genmsg.MsgContext.create_default() + msgs = generate_dynamic(datatype, pytype._full_text) + for t, msg in msgs.items(): + context.register(t, msg._spec) + print(spec_to_str(context, msgs[datatype]._spec)) break else: rospack = rospkg.RosPack() diff --git a/tools/rosnode/src/rosnode/__init__.py b/tools/rosnode/src/rosnode/__init__.py --- a/tools/rosnode/src/rosnode/__init__.py +++ b/tools/rosnode/src/rosnode/__init__.py @@ -182,7 +182,6 @@ def get_nodes_by_machine(machine): @raise ROSNodeException: if machine name cannot be resolved to an address @raise ROSNodeIOException: if unable to communicate with master """ - import urlparse master = rosgraph.Master(ID) try: @@ -338,7 +337,7 @@ def rosnode_ping(node_name, max_count=None, verbose=False): # 3786: catch ValueError on unpack as socket.error is not always a tuple try: # #3659 - errnum, msg = e + errnum, msg = e.args if errnum == -2: #name/service unknown p = urlparse.urlparse(node_api) print("ERROR: Unknown host [%s] for node [%s]"%(p.hostname, node_name), file=sys.stderr) @@ -492,9 +491,9 @@ def topic_type(t, pub_topics): pub_topics = master.getPublishedTopics('/') except socket.error: raise ROSNodeIOException("Unable to communicate with master!") - pubs = [t for t, l in state[0] if node_name in l] - subs = [t for t, l in state[1] if node_name in l] - srvs = [t for t, l in state[2] if node_name in l] + pubs = sorted([t for t, l in state[0] if node_name in l]) + subs = sorted([t for t, l in state[1] if node_name in l]) + srvs = sorted([t for t, l in state[2] if node_name in l]) buff = "Node [%s]"%node_name if pubs: @@ -808,9 +807,12 @@ def rosnodemain(argv=None): _fullusage() except socket.error: print("Network communication failed. Most likely failed to communicate with master.", file=sys.stderr) + sys.exit(1) except rosgraph.MasterError as e: print("ERROR: "+str(e), file=sys.stderr) + sys.exit(1) except ROSNodeException as e: print("ERROR: "+str(e), file=sys.stderr) + sys.exit(1) except KeyboardInterrupt: pass diff --git a/tools/rostopic/src/rostopic/__init__.py b/tools/rostopic/src/rostopic/__init__.py --- a/tools/rostopic/src/rostopic/__init__.py +++ b/tools/rostopic/src/rostopic/__init__.py @@ -64,6 +64,12 @@ #TODO: lazy-import rospy or move rospy-dependent routines to separate location import rospy +try: + long +except NameError: + long = int + + class ROSTopicException(Exception): """ Base exception class of rostopic-related errors @@ -687,7 +693,7 @@ def _sub_str_plot_fields(val, f, field_filter): """recursive helper function for _str_plot_fields""" # CSV type_ = type(val) - if type_ in (bool, int, float) or \ + if type_ in (bool, int, long, float) or \ isinstance(val, genpy.TVal): return f # duck-type check for messages @@ -708,7 +714,7 @@ def _sub_str_plot_fields(val, f, field_filter): val0 = val[0] type0 = type(val0) # no arrays of arrays - if type0 in (bool, int, float) or \ + if type0 in (bool, int, long, float) or \ isinstance(val0, genpy.TVal): return ','.join(["%s%s"%(f,x) for x in range(0,len(val))]) elif _isstring_type(type0): @@ -757,7 +763,7 @@ def _sub_str_plot(val, time_offset, field_filter): if type_ == bool: return '1' if val else '0' - elif type_ in (int, float) or \ + elif type_ in (int, long, float) or \ isinstance(val, genpy.TVal): if time_offset is not None and isinstance(val, genpy.Time): return str(val-time_offset) @@ -783,7 +789,7 @@ def _sub_str_plot(val, time_offset, field_filter): type0 = type(val0) if type0 == bool: return ','.join([('1' if v else '0') for v in val]) - elif type0 in (int, float) or \ + elif type0 in (int, long, float) or \ isinstance(val0, genpy.TVal): return ','.join([str(v) for v in val]) elif _isstring_type(type0): @@ -1482,11 +1488,7 @@ def _rostopic_cmd_hz(argv): if len(args) == 0: parser.error("topic must be specified") try: - if options.window_size != -1: - import string - window_size = string.atoi(options.window_size) - else: - window_size = options.window_size + window_size = int(options.window_size) except: parser.error("window size must be an integer") @@ -1534,11 +1536,7 @@ def _rostopic_cmd_bw(argv=sys.argv): if len(args) > 1: parser.error("you may only specify one input topic") try: - if options.window_size: - import string - window_size = string.atoi(options.window_size) - else: - window_size = options.window_size + window_size = int(options.window_size) if options.window_size is not None else None except: parser.error("window size must be an integer") topic = rosgraph.names.script_resolve_name('rostopic', args[0]) diff --git a/utilities/roswtf/src/roswtf/environment.py b/utilities/roswtf/src/roswtf/environment.py --- a/utilities/roswtf/src/roswtf/environment.py +++ b/utilities/roswtf/src/roswtf/environment.py @@ -128,7 +128,7 @@ def ros_test_results_dir_check(ctx): def pythonpath_check(ctx): # used to have a lot more checks here, but trying to phase out need for roslib on custom PYTHONPATH path = ctx.pythonpath - roslib_count = len(set([p for p in paths(path) if 'roslib' in p])) + roslib_count = len(set([p for p in paths(path) if 'roslib' in p.split(os.sep)])) if roslib_count > 1: return "Multiple roslib directories in PYTHONPATH (there should only be one)" diff --git a/utilities/roswtf/src/roswtf/rosdep_db.py b/utilities/roswtf/src/roswtf/rosdep_db.py --- a/utilities/roswtf/src/roswtf/rosdep_db.py +++ b/utilities/roswtf/src/roswtf/rosdep_db.py @@ -41,15 +41,16 @@ def get_user_home_directory(): return os.path.expanduser("~") -def rosdep_database_initialized_check(ctx): - """Makes sure rosdep database is initialized""" +def rosdep_database_updated_check(ctx): + """Makes sure rosdep database is updated""" if not os.path.exists((os.path.join(get_user_home_directory(), '.ros', 'rosdep', 'sources.cache', 'index'))): - return "Please initialize rosdep database with sudo rosdep init." + return "Please update rosdep database with 'rosdep update'." + warnings = [] -errors = [(rosdep_database_initialized_check, - "ROS Dep database not initialized: "), +errors = [(rosdep_database_updated_check, + "ROS Dep database not updated: "), ]
diff --git a/test/test_rosbag_storage/CMakeLists.txt b/test/test_rosbag_storage/CMakeLists.txt --- a/test/test_rosbag_storage/CMakeLists.txt +++ b/test/test_rosbag_storage/CMakeLists.txt @@ -15,4 +15,8 @@ if(CATKIN_ENABLE_TESTING) if(TARGET create_and_iterate_bag) target_link_libraries(create_and_iterate_bag ${catkin_LIBRARIES}) endif() + catkin_add_gtest(swap_bags src/swap_bags.cpp) + if(TARGET swap_bags) + target_link_libraries(swap_bags ${catkin_LIBRARIES}) + endif() endif() diff --git a/test/test_rosbag_storage/src/create_and_iterate_bag.cpp b/test/test_rosbag_storage/src/create_and_iterate_bag.cpp --- a/test/test_rosbag_storage/src/create_and_iterate_bag.cpp +++ b/test/test_rosbag_storage/src/create_and_iterate_bag.cpp @@ -10,71 +10,104 @@ #include "boost/foreach.hpp" #include <gtest/gtest.h> +void create_test_bag(const std::string &filename) +{ + rosbag::Bag bag; + bag.open(filename, rosbag::bagmode::Write); + + std_msgs::String str; + str.data = std::string("foo"); + + std_msgs::Int32 i; + i.data = 42; + + bag.write("chatter", ros::Time::now(), str); + bag.write("numbers", ros::Time::now(), i); + + bag.close(); +} -TEST(rosbag_storage, create_and_iterate_bag) +const char* bag_filename = "/tmp/rosbag_storage_create_and_iterate_bag.bag"; + +TEST(rosbag_storage, iterator_copy_constructor) { - const char* bag_filename = "/tmp/rosbag_storage_create_and_iterate_bag.bag"; - { - rosbag::Bag bag; - bag.open(bag_filename, rosbag::bagmode::Write); - - std_msgs::String str; - str.data = std::string("foo"); - - std_msgs::Int32 i; - i.data = 42; - - bag.write("chatter", ros::Time::now(), str); - bag.write("numbers", ros::Time::now(), i); - - bag.close(); - } + // copy ctor + rosbag::Bag bag; + bag.open(bag_filename, rosbag::bagmode::Read); + rosbag::View view(bag, rosbag::TopicQuery("numbers")); + rosbag::View::const_iterator it0 = view.begin(); + EXPECT_EQ(42, it0->instantiate<std_msgs::Int32>()->data); + rosbag::View::const_iterator it1(it0); + EXPECT_EQ(it0, it1); + EXPECT_EQ(42, it1->instantiate<std_msgs::Int32>()->data); + ++it1; + EXPECT_NE(it0, it1); + EXPECT_EQ(42, it0->instantiate<std_msgs::Int32>()->data); +} - { - rosbag::Bag bag; - bag.open(bag_filename, rosbag::bagmode::Read); +TEST(rosbag_storage, iterator_copy_assignment) +{ + // copy assignment + rosbag::Bag bag; + bag.open(bag_filename, rosbag::bagmode::Read); + rosbag::View view(bag, rosbag::TopicQuery("numbers")); + rosbag::View::const_iterator it0 = view.begin(); + EXPECT_EQ(42, it0->instantiate<std_msgs::Int32>()->data); + rosbag::View::const_iterator it1; + it1 = it0; + EXPECT_EQ(it0, it1); + EXPECT_EQ(42, it1->instantiate<std_msgs::Int32>()->data); + ++it1; + EXPECT_NE(it0, it1); + EXPECT_EQ(42, it0->instantiate<std_msgs::Int32>()->data); +} + +TEST(rosbag_storage, iterate_bag) +{ + rosbag::Bag bag; + bag.open(bag_filename, rosbag::bagmode::Read); - std::vector<std::string> topics; - topics.push_back(std::string("chatter")); - topics.push_back(std::string("numbers")); + std::vector<std::string> topics; + topics.push_back(std::string("chatter")); + topics.push_back(std::string("numbers")); - rosbag::View view(bag, rosbag::TopicQuery(topics)); + rosbag::View view(bag, rosbag::TopicQuery(topics)); - BOOST_FOREACH(rosbag::MessageInstance const m, view) + BOOST_FOREACH(rosbag::MessageInstance const m, view) + { + std_msgs::String::ConstPtr s = m.instantiate<std_msgs::String>(); + if (s != NULL) { - std_msgs::String::ConstPtr s = m.instantiate<std_msgs::String>(); - if (s != NULL) + if(s->data == std::string("foo")) { + printf("Successfully checked string foo\n"); + } + else { - if(s->data == std::string("foo")) { - printf("Successfully checked string foo\n"); - } - else - { - printf("Failed checked string foo\n"); - FAIL(); - } + printf("Failed checked string foo\n"); + FAIL(); } + } - std_msgs::Int32::ConstPtr i = m.instantiate<std_msgs::Int32>(); - if (i != NULL) + std_msgs::Int32::ConstPtr i = m.instantiate<std_msgs::Int32>(); + if (i != NULL) + { + if (i->data == 42) { + printf("Successfully checked value 42\n"); + } + else { - if (i->data == 42) { - printf("Successfully checked value 42\n"); - } - else - { - printf("Failed checked value 42.\n"); - FAIL(); - } + printf("Failed checked value 42.\n"); + FAIL(); } } - - bag.close(); } + + bag.close(); } int main(int argc, char **argv) { ros::Time::init(); + create_test_bag(bag_filename); testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); diff --git a/test/test_rosbag_storage/src/swap_bags.cpp b/test/test_rosbag_storage/src/swap_bags.cpp new file mode 100644 --- /dev/null +++ b/test/test_rosbag_storage/src/swap_bags.cpp @@ -0,0 +1,101 @@ +#include "ros/time.h" +#include "rosbag/bag.h" +#include "rosbag/view.h" +#include "std_msgs/Int32.h" + +#include "boost/foreach.hpp" +#include <gtest/gtest.h> + +void writeBags(rosbag::CompressionType a, rosbag::CompressionType b) { + using std::swap; + rosbag::Bag bag1("/tmp/swap1.bag", rosbag::bagmode::Write); + rosbag::Bag bag2("/tmp/swap2.bag", rosbag::bagmode::Write); + + // In the end "/tmp/swap1.bag" should have CompressionType a and contain two messages of value a. + // "/tmp/swap2.bag" should have CompressionType b and contain two messages of value b. + // We use these pointers to track the bags accordingly. + + rosbag::Bag* a_bag = &bag1; + rosbag::Bag* b_bag = &bag2; + + std_msgs::Int32 a_msg, b_msg; + a_msg.data = a; + b_msg.data = b; + + swap(bag1, bag2); + swap(a_bag, b_bag); + + a_bag->setCompression(a); + b_bag->setCompression(b); + + swap(bag1, bag2); + swap(a_bag, b_bag); + + a_bag->write("/data", ros::Time::now(), a_msg); + b_bag->write("/data", ros::Time::now(), b_msg); + + swap(bag1, bag2); + swap(a_bag, b_bag); + + a_bag->write("/data", ros::Time::now(), a_msg); + b_bag->write("/data", ros::Time::now(), b_msg); + + swap(bag1, bag2); + + bag1.close(); + bag2.close(); + + swap(bag1, bag2); +} + +void readBags(rosbag::CompressionType a, rosbag::CompressionType b) { + using std::swap; + rosbag::Bag bag1("/tmp/swap1.bag", rosbag::bagmode::Read); + rosbag::Bag bag2("/tmp/swap2.bag", rosbag::bagmode::Read); + + rosbag::Bag* a_bag = &bag1; + rosbag::Bag* b_bag = &bag2; + + swap(bag1, bag2); + swap(a_bag, b_bag); + + // only valid when writing + //EXPECT_EQ(a_bag->getCompression(), a); + //EXPECT_EQ(b_bag->getCompression(), b); + + std::vector<std::string> topics; + topics.push_back("data"); + + rosbag::View a_view(*a_bag, rosbag::TopicQuery(topics)); + rosbag::View b_view(*b_bag, rosbag::TopicQuery(topics)); + + BOOST_FOREACH(rosbag::MessageInstance const m, a_view) + { + std_msgs::Int32::ConstPtr i = m.instantiate<std_msgs::Int32>(); + ASSERT_TRUE(i); + EXPECT_EQ(i->data, a); + } + BOOST_FOREACH(rosbag::MessageInstance const m, b_view) + { + std_msgs::Int32::ConstPtr i = m.instantiate<std_msgs::Int32>(); + ASSERT_TRUE(i); + EXPECT_EQ(i->data, b); + } +} + +TEST(rosbag_storage, swap_bags) +{ + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 3; ++j) { + writeBags(rosbag::CompressionType(i), rosbag::CompressionType(j)); + readBags(rosbag::CompressionType(i), rosbag::CompressionType(j)); + } + } +} + +int main(int argc, char **argv) { + ros::Time::init(); + + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/test/test_roscpp/test/src/timer_callbacks.cpp b/test/test_roscpp/test/src/timer_callbacks.cpp --- a/test/test_roscpp/test/src/timer_callbacks.cpp +++ b/test/test_roscpp/test/src/timer_callbacks.cpp @@ -54,6 +54,302 @@ using namespace test_roscpp; std::string g_node_name = "test_timer_callbacks"; + +/************************* SteadyTimer tests **********************/ + +class SteadyTimerHelper +{ + public: + SteadyTimerHelper(float period, bool oneshot = false) + : expected_period_(period) + , failed_(false) + , total_calls_(0) + , testing_period_(false) + , calls_before_testing_period_(0) + { + NodeHandle n; + timer_ = n.createSteadyTimer(expected_period_, &SteadyTimerHelper::callback, this, oneshot); + } + + void callback(const SteadyTimerEvent& e) + { + bool first = last_call_.isZero(); + last_call_ = e.current_real; + + if (!first) + { + double time_error = e.current_real.toSec() - e.current_expected.toSec(); + // Strict check if called early, loose check if called late. + // Yes, this is very loose, but must pass in high-load, containerized/virtualized, contentious environments. + if (time_error > 5.0 || time_error < -0.01) + { + ROS_ERROR("Call came at wrong time (expected: %f, actual %f)", e.current_expected.toSec(), e.current_real.toSec()); + failed_ = true; + } + } + + if(testing_period_) + { + + // Inside callback, less than current period, reset=false + if(total_calls_ == calls_before_testing_period_) + { + WallDuration p(0.5); + pretendWork(0.15); + setPeriod(p); + } + + // Inside callback, greater than current period, reset=false + else if(total_calls_ == (calls_before_testing_period_+1)) + { + WallDuration p(0.25); + pretendWork(0.15); + setPeriod(p); + } + + // Inside callback, less than current period, reset=true + else if(total_calls_ == (calls_before_testing_period_+2)) + { + WallDuration p(0.5); + pretendWork(0.15); + setPeriod(p, true); + } + + // Inside callback, greater than current period, reset=true + else if(total_calls_ == (calls_before_testing_period_+3)) + { + WallDuration p(0.25); + pretendWork(0.15); + setPeriod(p, true); + } + } + + ++total_calls_; + } + + void setPeriod(const WallDuration p, bool reset=false) + { + timer_.setPeriod(p, reset); + expected_period_ = p; + } + + + void pretendWork(const float t) + { + ros::Rate r(1. / t); + r.sleep(); + } + + SteadyTime last_call_; + WallDuration expected_period_; + + bool failed_; + + SteadyTimer timer_; + int32_t total_calls_; + + bool testing_period_; + int calls_before_testing_period_; +}; + +TEST(RoscppTimerCallbacks, singleSteadyTimeCallback) +{ + NodeHandle n; + SteadyTimerHelper helper1(0.01); + + WallDuration d(0.001f); + for (int32_t i = 0; i < 1000 && n.ok(); ++i) + { + spinOnce(); + d.sleep(); + } + + if (helper1.failed_) + { + FAIL(); + } + + if (helper1.total_calls_ < 99) + { + ROS_ERROR("Total calls: %d (expected at least 100)", helper1.total_calls_); + FAIL(); + } +} + +TEST(RoscppTimerCallbacks, multipleSteadyTimeCallbacks) +{ + NodeHandle n; + const int count = 100; + typedef boost::scoped_ptr<SteadyTimerHelper> HelperPtr; + HelperPtr helpers[count]; + for (int i = 0; i < count; ++i) + { + helpers[i].reset(new SteadyTimerHelper((float)(i + 1) * 0.1f)); + } + + WallDuration d(0.01f); + const int spin_count = 1000; + for (int32_t i = 0; i < spin_count && n.ok(); ++i) + { + spinOnce(); + d.sleep(); + } + + for (int i = 0; i < count; ++i) + { + if (helpers[i]->failed_) + { + ROS_ERROR("Helper %d failed", i); + FAIL(); + } + + int32_t expected_count = (spin_count * d.toSec()) / helpers[i]->expected_period_.toSec(); + if (helpers[i]->total_calls_ < (expected_count - 1)) + { + ROS_ERROR("Helper %d total calls: %d (at least %d expected)", i, helpers[i]->total_calls_, expected_count); + FAIL(); + } + } +} + +TEST(RoscppTimerCallbacks, steadySetPeriod) +{ + NodeHandle n; + WallDuration period(0.5); + SteadyTimerHelper helper(period.toSec()); + Rate r(100); + + // Let the callback occur once before getting started + while(helper.total_calls_ < 1) + { + spinOnce(); + r.sleep(); + } + + helper.pretendWork(0.1); + + // outside callback, new period < old period, reset = false + Duration wait(0.5); + WallDuration p(0.25); + helper.setPeriod(p); + while(helper.total_calls_ < 2) + { + spinOnce(); + r.sleep(); + } + + helper.pretendWork(0.1); + + // outside callback, new period > old period, reset = false + WallDuration p2(0.5); + helper.setPeriod(p); + while(helper.total_calls_ < 3) + { + spinOnce(); + r.sleep(); + } + + helper.pretendWork(0.1); + + // outside callback, new period < old period, reset = true + WallDuration p3(0.25); + helper.setPeriod(p, true); + while(helper.total_calls_ < 4) + { + spinOnce(); + r.sleep(); + } + + helper.pretendWork(0.1); + + // outside callback, new period > old period, reset = true + WallDuration p4(0.5); + helper.setPeriod(p, true); + while(helper.total_calls_ < 5) + { + spinOnce(); + r.sleep(); + } + + // Test calling setPeriod inside callback + helper.calls_before_testing_period_ = helper.total_calls_; + int total = helper.total_calls_ + 5; + helper.testing_period_ = true; + while(helper.total_calls_ < total) + { + spinOnce(); + r.sleep(); + } + helper.testing_period_ = false; + + + if(helper.failed_) + { + ROS_ERROR("Helper failed in setPeriod"); + FAIL(); + } +} + +TEST(RoscppTimerCallbacks, stopSteadyTimer) +{ + NodeHandle n; + SteadyTimerHelper helper(0.001); + + for (int32_t i = 0; i < 1000 && n.ok(); ++i) + { + WallDuration(0.001).sleep(); + spinOnce(); + } + + ASSERT_GT(helper.total_calls_, 0); + int32_t last_count = helper.total_calls_; + helper.timer_.stop(); + + for (int32_t i = 0; i < 1000 && n.ok(); ++i) + { + WallDuration(0.001).sleep(); + spinOnce(); + } + + ASSERT_EQ(last_count, helper.total_calls_); +} + +int32_t g_steady_count = 0; +void steadyTimerCallback(const ros::SteadyTimerEvent&) +{ + ++g_steady_count; +} + +TEST(RoscppTimerCallbacks, steadyStopThenSpin) +{ + g_steady_count = 0; + NodeHandle n; + ros::SteadyTimer timer = n.createSteadyTimer(ros::WallDuration(0.001), steadyTimerCallback); + + WallDuration(0.1).sleep(); + timer.stop(); + + spinOnce(); + + ASSERT_EQ(g_steady_count, 0); +} + +TEST(RoscppTimerCallbacks, oneShotSteadyTimer) +{ + NodeHandle n; + SteadyTimerHelper helper(0.001, true); + + for (int32_t i = 0; i < 1000 && n.ok(); ++i) + { + WallDuration(0.001).sleep(); + spinOnce(); + } + + ASSERT_EQ(helper.total_calls_, 1); +} + +/************************* WallTimer tests **********************/ + class WallTimerHelper { public: @@ -71,15 +367,16 @@ class WallTimerHelper void callback(const WallTimerEvent& e) { bool first = last_call_.isZero(); - WallTime last_call = last_call_; - last_call_ = WallTime::now(); - WallTime start = last_call_; + last_call_ = e.current_real; if (!first) { - if (fabsf(expected_next_call_.toSec() - start.toSec()) > 0.1f) + double time_error = e.current_real.toSec() - e.current_expected.toSec(); + // Strict check if called early, loose check if called late. + // Yes, this is very loose, but must pass in high-load, containerized/virtualized, contentious environments. + if (time_error > 5.0 || time_error < -0.01) { - ROS_ERROR("Call came at wrong time (%f vs. %f)", expected_next_call_.toSec(), start.toSec()); + ROS_ERROR("Call came at wrong time (expected: %f, actual %f)", e.current_expected.toSec(), e.current_real.toSec()); failed_ = true; } } @@ -119,28 +416,12 @@ class WallTimerHelper setPeriod(p, true); } } - else - { - expected_next_call_ = e.current_expected + expected_period_; - } - - WallTime end = WallTime::now(); - last_duration_ = end - start; ++total_calls_; } void setPeriod(const WallDuration p, bool reset=false) { - if(reset) - { - expected_next_call_ = WallTime::now() + p; - } - else - { - expected_next_call_ = last_call_ + p; - } - timer_.setPeriod(p, reset); expected_period_ = p; } @@ -153,9 +434,7 @@ class WallTimerHelper } WallTime last_call_; - WallTime expected_next_call_; WallDuration expected_period_; - WallDuration last_duration_; bool failed_; diff --git a/test/test_roscpp/test/test_callback_queue.cpp b/test/test_roscpp/test/test_callback_queue.cpp --- a/test/test_roscpp/test/test_callback_queue.cpp +++ b/test/test_roscpp/test/test_callback_queue.cpp @@ -38,6 +38,7 @@ #include <ros/console.h> #include <ros/timer.h> +#include <boost/atomic.hpp> #include <boost/shared_ptr.hpp> #include <boost/bind.hpp> #include <boost/thread.hpp> @@ -398,6 +399,81 @@ TEST(CallbackQueue, recursiveTimer) tg.join_all(); } +class ConditionObject +{ +public: + ConditionObject(CallbackQueue * _queue) + : id(0), queue(_queue) { + condition_sync.store(true); + condition_one.store(false); + condition_stop.store(false); + } + + void add(); + + unsigned long id; + CallbackQueue * queue; + boost::atomic<bool> condition_one; + boost::atomic<bool> condition_sync; + boost::atomic<bool> condition_stop; +}; + +class RaceConditionCallback : public CallbackInterface +{ +public: + RaceConditionCallback(ConditionObject * _condition_object, unsigned long * _id) + : condition_object(_condition_object), id(_id) + {} + + virtual CallResult call() + { + condition_object->condition_one.store(false); + return Success; + } + + ConditionObject * condition_object; + unsigned long * id; +}; + +void ConditionObject::add() +{ + while(!condition_stop.load()) + { + if(condition_sync.load()) + { + condition_sync.store(false); + condition_one.store(true); + id++; + queue->addCallback(boost::make_shared<RaceConditionCallback>(this, &id), id); + } + boost::this_thread::sleep(boost::posix_time::microseconds(1)); + } +} + +TEST(CallbackQueue, raceConditionCallback) +{ + CallbackQueue queue; + ConditionObject condition_object(&queue); + + boost::thread t(boost::bind(&ConditionObject::add, &condition_object)); + for(unsigned int i = 0; i < 1000000; ++i) + { + if (queue.callOne() == CallbackQueue::Called) + { + if(condition_object.condition_one.load()) + { + condition_object.condition_stop.store(true); + ASSERT_FALSE(condition_object.condition_one.load()); + } + } + + queue.clear(); + condition_object.condition_sync.store(true); + } + condition_object.condition_stop.store(true); + t.join(); +} + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); diff --git a/test/test_rostopic/test/test_rostopic_unit.py b/test/test_rostopic/test/test_rostopic_unit.py --- a/test/test_rostopic/test/test_rostopic_unit.py +++ b/test/test_rostopic/test/test_rostopic_unit.py @@ -247,7 +247,7 @@ def test_strify_message(self): m = String() self.assertEquals("data: ''", strify_message(m, field_filter=f)) m = String('foo') - self.assertEquals('data: foo', strify_message(m, field_filter=f)) + self.assertEquals('data: "foo"', strify_message(m, field_filter=f)) m = TVals(Time(123, 456), Duration(78, 90)) v = yaml.load(strify_message(m, field_filter=f)) self.assertEquals({'t': {'secs': 123, 'nsecs': 456}, 'd': {'secs': 78, 'nsecs': 90}}, v) @@ -283,7 +283,7 @@ def test_strify_message(self): m = String() self.assertEquals("data: ''", strify_message(m, field_filter=f)) m = String('foo') - self.assertEquals('data: foo', strify_message(m, field_filter=f)) + self.assertEquals('data: "foo"', strify_message(m, field_filter=f)) m = TVals(Time(123, 456), Duration(78, 90)) v = yaml.load(strify_message(m, field_filter=f)) self.assertEquals({'t': {'secs': 123, 'nsecs': 456}, 'd': {'secs': 78, 'nsecs': 90}}, v) diff --git a/tools/roslaunch/test/unit/test_roslaunch_dump_params.py b/tools/roslaunch/test/unit/test_roslaunch_dump_params.py --- a/tools/roslaunch/test/unit/test_roslaunch_dump_params.py +++ b/tools/roslaunch/test/unit/test_roslaunch_dump_params.py @@ -77,6 +77,10 @@ def test_roslaunch(self): '/node_rosparam/dict1/shoulders': 2, '/node_rosparam/dict1/knees': 3, '/node_rosparam/dict1/toes': 4, + '/node_rosparam/tilde1': 'foo', + '/node_rosparam/local_param': 'baz', + + '/node_rosparam2/tilde1': 'foo', '/inline_str': 'value1', '/inline_list': [1, 2, 3, 4], @@ -99,3 +103,6 @@ def test_roslaunch(self): elif v != output_val[k]: self.fail("key [%s] value [%s] does not match output: %s"%(k, v, output_val[k])) self.assertEquals(val, output_val) + for k in ('/node_rosparam/tilde2', '/node_rosparam2/tilde2', '/node_rosparam2/local_param'): + if k in output_val: + self.fail("key [%s] should not be in output: %s"%(k, output_val)) diff --git a/tools/roslaunch/test/xml/test-dump-rosparam.launch b/tools/roslaunch/test/xml/test-dump-rosparam.launch --- a/tools/roslaunch/test/xml/test-dump-rosparam.launch +++ b/tools/roslaunch/test/xml/test-dump-rosparam.launch @@ -1,15 +1,21 @@ <launch> + <param name="~tilde1" value="foo" /> <rosparam file="$(find roslaunch)/test/dump-params.yaml" command="load" /> <group ns="rosparam"> + <param name="~tilde2" value="bar" /> <rosparam file="$(find roslaunch)/test/dump-params.yaml" command="load" /> </group> <node pkg="package" type="test_base" name="node_rosparam"> + <param name="local_param" value="baz" /> <rosparam file="$(find roslaunch)/test/dump-params.yaml" command="load" /> </node> + <node pkg="package" type="test_base" name="node_rosparam2"> + </node> + <rosparam param="inline_str">value1</rosparam> <rosparam param="inline_list">[1, 2, 3, 4]</rosparam> <rosparam param="inline_dict">{key1: value1, key2: value2}</rosparam> diff --git a/tools/rostopic/test/check_rostopic_command_line_online.py b/tools/rostopic/test/check_rostopic_command_line_online.py --- a/tools/rostopic/test/check_rostopic_command_line_online.py +++ b/tools/rostopic/test/check_rostopic_command_line_online.py @@ -111,7 +111,7 @@ def test_rostopic(self): values = [n for n in values if n != '---'] self.assertEquals(count, len(values), "wrong number of echos in output:\n"+str(values)) for n in values: - self.assert_('data: hello world ' in n, n) + self.assert_('data: "hello world ' in n, n) if 0: #bw diff --git a/utilities/roswtf/test/check_roswtf_command_line_online.py b/utilities/roswtf/test/check_roswtf_command_line_online.py --- a/utilities/roswtf/test/check_roswtf_command_line_online.py +++ b/utilities/roswtf/test/check_roswtf_command_line_online.py @@ -127,13 +127,15 @@ def _check_output(self, cmd, output, error=None): 'No errors or warnings' in output or 'Found 1 error' in output, 'CMD[%s] OUTPUT[%s]%s' % (' '.join(cmd), output, '\nstderr[%s]' % error if error else '')) - if 'No errors or warnings' in output: - self.assert_('ERROR' not in output, 'OUTPUT[%s]' % output) + allowed_errors = 0 if 'Found 1 error' in output: self.assert_(output.count('ERROR') == 1, 'OUTPUT[%s]' % output) self.assert_( - 'Error: the rosdep view is empty' not in output, + 'ROS Dep database not updated' in output, 'OUTPUT[%s]' % output) + allowed_errors += 1 + if 'No errors or warnings' in output: + self.assert_(output.count('ERROR') <= allowed_errors, 'OUTPUT[%s]' % output) if __name__ == '__main__': rostest.run(PKG, NAME, TestRostopicOnline, sys.argv)
Roslaunch Creates Unexpected Private Parameters I came across a bug on the implementation of roslaunch. I have tested it both in Indigo and Kinetic, and it is also present in `lunar-devel` from what I can see. Consider the following example launch, ```xml <launch> <node name="driver" pkg="drivers" type="driver"> <param name="param1" value="1" /> <param name="~param2" value="2" /> </node> <node name="controller" pkg="controllers" type="controller" /> </launch> ``` As you would expect, roslaunch creates two parameters: - `/driver/param1` - `/driver/param2` Consider yet another simple example. ```xml <launch> <param name="~param" value="1" /> <node name="driver" pkg="drivers" type="driver" /> <node name="controller" pkg="controllers" type="controller" /> </launch> ``` Again, two parameters, as expected. - `/driver/param` - `/controller/param` If we combine both features, the second node inherits the first node's private parameters, when it should not. ```xml <launch> <param name="~param1" value="1" /> <node name="driver" pkg="drivers" type="driver"> <param name="param2" value="2" /> <param name="~param3" value="3" /> </node> <node name="controller" pkg="controllers" type="controller" /> </launch> ``` This results in ***six*** parameters, instead of four. - `/driver/param1` - `/driver/param2` - `/driver/param3` - `/controller/param1` - `/controller/param2` - `/controller/param3` I have tried variations of isolating either the first or second node in `<group>`, but the result is the same. From my understanding of the code, the problem lies in [line 172 of `loader.py`](https://github.com/ros/ros_comm/blob/44bbbd1a7c383f1b1901c024010992ad7b17b0c6/tools/roslaunch/src/roslaunch/loader.py#L172) and [line 320](https://github.com/ros/ros_comm/blob/44bbbd1a7c383f1b1901c024010992ad7b17b0c6/tools/roslaunch/src/roslaunch/loader.py#L320) of the same file. When parsing the first private parameter, the parameter is [added to the root context](https://github.com/ros/ros_comm/blob/44bbbd1a7c383f1b1901c024010992ad7b17b0c6/tools/roslaunch/src/roslaunch/xmlloader.py#L270). ```python if is_private(name) or force_local: p = Param(name, value) context.add_param(p) ``` When parsing the node tag right after, a child context is created, and this is where the problem lies. The child context does not start with its own list of parameters, it simply [reuses the same list](https://github.com/ros/ros_comm/blob/44bbbd1a7c383f1b1901c024010992ad7b17b0c6/tools/roslaunch/src/roslaunch/loader.py#L320), over and over. ```python def child(self, ns): ... return LoaderContext(..., params=self.params, ...) ``` The result is that `param1` is added to the list, and `param2` and `param3` are added to the same list as well, because the child context is using the same reference as its parent context. When parsing the second (third, fourth, ...) `<node>`, it will create not only `param1`, but the other parameters as well, since they were wrongly appended to the root list. Why does this work as intended if the first parameter is not present? It works because the list is empty when the first `<node>` is parsed. When creating the child context, an empty list (the root list) is passed to the `LoaderContext` constructor. As seen in [line 172](https://github.com/ros/ros_comm/blob/44bbbd1a7c383f1b1901c024010992ad7b17b0c6/tools/roslaunch/src/roslaunch/loader.py#L172), if the received list is empty, it does not pass the logical `or`, and a **new** parameter list is created. Since a new list is created, it isolates the parameters properly. ```python self.params = params or [] ``` I have a few ideas to solve this issue, but I am not sure of the impact of each, and what is the intended behaviour. If one of the following is the clear solution, I can submit a pull request. 1. Change `LoaderContext` constructor. Simply create a new empty list for `self.params` every time. 2. Change the `child` method to pass down either an empty list or a *copy* of the current list. 3. Change `XmlLoader` to overwrite `param_ns.params` with an empty list ([l. 372](https://github.com/ros/ros_comm/blob/44bbbd1a7c383f1b1901c024010992ad7b17b0c6/tools/roslaunch/src/roslaunch/xmlloader.py#L372)).
2017-10-26T19:20:05Z
[]
[]
ros/ros_comm
1,323
ros__ros_comm-1323
[ "1262" ]
078481c0c113ae8c1c01947e2425e4f8a0ec62d0
diff --git a/clients/rospy/src/rospy/msg.py b/clients/rospy/src/rospy/msg.py --- a/clients/rospy/src/rospy/msg.py +++ b/clients/rospy/src/rospy/msg.py @@ -69,7 +69,7 @@ def __init__(self, *args): def serialize(self, buff): """AnyMsg provides an implementation so that a node can forward messages w/o (de)serialization""" if self._buff is None: - raise rospy.exceptions("AnyMsg is not initialized") + raise rospy.exceptions.ROSException("AnyMsg is not initialized") else: buff.write(self._buff) diff --git a/tools/rosbag/src/rosbag/rosbag_main.py b/tools/rosbag/src/rosbag/rosbag_main.py --- a/tools/rosbag/src/rosbag/rosbag_main.py +++ b/tools/rosbag/src/rosbag/rosbag_main.py @@ -170,10 +170,13 @@ def info_cmd(argv): except ROSBagUnindexedException as ex: print('ERROR bag unindexed: %s. Run rosbag reindex.' % arg, file=sys.stderr) + sys.exit(1) except ROSBagException as ex: print('ERROR reading %s: %s' % (arg, str(ex)), file=sys.stderr) + sys.exit(1) except IOError as ex: print('ERROR reading %s: %s' % (arg, str(ex)), file=sys.stderr) + sys.exit(1) def handle_topics(option, opt_str, value, parser): @@ -326,7 +329,7 @@ def eval_fn(topic, m, t): inbag = Bag(inbag_filename) except ROSBagUnindexedException as ex: print('ERROR bag unindexed: %s. Run rosbag reindex.' % inbag_filename, file=sys.stderr) - return + sys.exit(1) try: meter = ProgressMeter(outbag_filename, inbag._uncompressed_size) @@ -424,7 +427,7 @@ def fix_cmd(argv): except ROSBagUnindexedException as ex: print('ERROR bag unindexed: %s. Run rosbag reindex.' % inbag_filename, file=sys.stderr) - return + sys.exit(1) if len(migrations) == 0: os.rename(outname, outbag_filename) @@ -441,6 +444,7 @@ def fix_cmd(argv): print('Try running \'rosbag check\' to create the necessary rule files or run \'rosbag fix\' with the \'--force\' option.') os.remove(outname) + sys.exit(1) def check_cmd(argv): parser = optparse.OptionParser(usage='rosbag check BAG [-g RULEFILE] [EXTRARULES1 EXTRARULES2 ...]', description='Determine whether a bag is playable in the current system, or if it can be migrated.') @@ -470,7 +474,7 @@ def check_cmd(argv): Bag(args[0]) except ROSBagUnindexedException as ex: print('ERROR bag unindexed: %s. Run rosbag reindex.' % args[0], file=sys.stderr) - return + sys.exit(1) mm = MessageMigrator(args[1:] + append_rule, not options.noplugins) diff --git a/tools/roslaunch/src/roslaunch/nodeprocess.py b/tools/roslaunch/src/roslaunch/nodeprocess.py --- a/tools/roslaunch/src/roslaunch/nodeprocess.py +++ b/tools/roslaunch/src/roslaunch/nodeprocess.py @@ -95,7 +95,7 @@ def create_master_process(run_id, type_, ros_root, port, num_workers=NUM_WORKERS _logger.info("process[master]: launching with args [%s]"%args) log_output = False - return LocalProcess(run_id, package, 'master', args, os.environ, log_output, None) + return LocalProcess(run_id, package, 'master', args, os.environ, log_output, None, required=True) def create_node_process(run_id, node, master_uri): """ diff --git a/tools/roslaunch/src/roslaunch/parent.py b/tools/roslaunch/src/roslaunch/parent.py --- a/tools/roslaunch/src/roslaunch/parent.py +++ b/tools/roslaunch/src/roslaunch/parent.py @@ -286,7 +286,10 @@ def start(self, auto_terminate=True): if self.process_listeners: for l in self.process_listeners: self.runner.pm.add_process_listener(l) - + # Add listeners to server as well, otherwise they won't be + # called when a node on a remote machine dies. + self.server.add_process_listener(l) + def spin_once(self): """ Run the parent roslaunch event loop once diff --git a/tools/rosmaster/src/rosmaster/util.py b/tools/rosmaster/src/rosmaster/util.py --- a/tools/rosmaster/src/rosmaster/util.py +++ b/tools/rosmaster/src/rosmaster/util.py @@ -70,6 +70,8 @@ def xmlrpcapi(uri): def close_half_closed_sockets(): + if not hasattr(socket, 'TCP_INFO'): + return for proxy in _proxies.values(): transport = proxy("transport") if transport._connection and transport._connection[1] is not None and transport._connection[1].sock is not None: diff --git a/tools/rosnode/src/rosnode/__init__.py b/tools/rosnode/src/rosnode/__init__.py --- a/tools/rosnode/src/rosnode/__init__.py +++ b/tools/rosnode/src/rosnode/__init__.py @@ -84,7 +84,7 @@ def _succeed(args): _caller_apis = {} def get_api_uri(master, caller_id, skip_cache=False): """ - @param master: XMLRPC handle to ROS Master + @param master: rosgraph Master instance @type master: rosgraph.Master @param caller_id: node name @type caller_id: str @@ -399,9 +399,9 @@ def rosnode_ping_all(verbose=False): def cleanup_master_blacklist(master, blacklist): """ - Remove registrations from ROS Master that do not match blacklist. - @param master: XMLRPC handle to ROS Master - @type master: xmlrpclib.ServerProxy + Remove registrations from ROS Master that match blacklist. + @param master: rosgraph Master instance + @type master: rosgraph.Master @param blacklist: list of nodes to scrub @type blacklist: [str] """ @@ -426,8 +426,8 @@ def cleanup_master_blacklist(master, blacklist): def cleanup_master_whitelist(master, whitelist): """ Remove registrations from ROS Master that do not match whitelist. - @param master: XMLRPC handle to ROS Master - @type master: xmlrpclib.ServerProxy + @param master: rosgraph Master instance + @type master: rosgraph.Master @param whitelist: list of nodes to keep @type whitelist: list of nodes to keep """
diff --git a/test/test_roscpp/test/test_poll_set.cpp b/test/test_roscpp/test/test_poll_set.cpp --- a/test/test_roscpp/test/test_poll_set.cpp +++ b/test/test_roscpp/test/test_poll_set.cpp @@ -256,7 +256,16 @@ void delThread(PollSet* ps, SocketHelper* sh, boost::barrier* barrier) ps->delSocket(sh->socket_); } -TEST_F(Poller, addDelMultiThread) +/** + * This test has been disabled. The underlying logic which it tests has three + * different implementations (poll, epoll, Windows), and development of the epoll + * version exposed that the test was validating a buggy aspect of the original + * poll version. To reenable this test, the poll version and the test would both + * have to be updated. + * + * For more discussion, see: https://github.com/ros/ros_comm/pull/1217 + */ +TEST_F(Poller, DISABLED_addDelMultiThread) { for (int i = 0; i < 100; ++i) { diff --git a/test/test_rospy/test/unit/test_rospy_api.py b/test/test_rospy/test/unit/test_rospy_api.py --- a/test/test_rospy/test/unit/test_rospy_api.py +++ b/test/test_rospy/test/unit/test_rospy_api.py @@ -66,12 +66,13 @@ def test_anymsg(self): except ImportError: from io import StringIO import rospy + import rospy.exceptions #trip wires against AnyMsg API m = rospy.AnyMsg() try: m.serialize(StringIO()) self.fail("AnyMsg should not allow serialization") - except: + except rospy.exceptions.ROSException: pass teststr = 'foostr-%s'%time.time() diff --git a/tools/topic_tools/test/test_mux_services.py b/tools/topic_tools/test/test_mux_services.py --- a/tools/topic_tools/test/test_mux_services.py +++ b/tools/topic_tools/test/test_mux_services.py @@ -52,7 +52,7 @@ def make_srv_proxies(self): rospy.wait_for_service('mux/list', 5) rospy.wait_for_service('mux/select', 5) except rospy.ROSException as e: - self.fail('failed to find a required service: ' + `e`) + self.fail('failed to find a required service: ' + repr(e)) add_srv = rospy.ServiceProxy('mux/add', MuxAdd) delete_srv = rospy.ServiceProxy('mux/delete', MuxDelete)
ROS_IPV6 is checked too late in roscpp ROS_IPV6 environment variable is checked late in initialization process (in `ros::start`): ```cpp env_ipv6 = getenv("ROS_IPV6"); //... bool use_ipv6 = (env_ipv6 && strcmp(env_ipv6,"on") == 0); //... XmlRpc::XmlRpcSocket::s_use_ipv6_ = use_ipv6; ``` However, first connection to master can be initialized as early as in `ros::init` - failing the whole process and preventing nodes from starting. If I change default value of `XmlRpc::XmlRpcSocket::s_use_ipv6_` to `true` manually, these nodes work in IPv6 environment. The correct solution would be to move ROS_IPV6 check to the beginning of `ros::init`.
2018-02-09T22:05:43Z
[]
[]
ros/ros_comm
1,477
ros__ros_comm-1477
[ "1360" ]
e1ac4cfab9d56c7f5cd19a75043a270616d66453
diff --git a/clients/rospy/src/rospy/impl/tcpros_pubsub.py b/clients/rospy/src/rospy/impl/tcpros_pubsub.py --- a/clients/rospy/src/rospy/impl/tcpros_pubsub.py +++ b/clients/rospy/src/rospy/impl/tcpros_pubsub.py @@ -242,7 +242,7 @@ def create_transport(self, resolved_name, pub_uri, protocol_params): if type(protocol_params) != list or len(protocol_params) != 3: return 0, "ERROR: invalid TCPROS parameters", 0 if protocol_params[0] != TCPROS: - return 0, "INTERNAL ERROR: protocol id is not TCPROS: %s"%id, 0 + return 0, "INTERNAL ERROR: protocol id is not TCPROS: %s"%protocol_params[0], 0 id, dest_addr, dest_port = protocol_params sub = rospy.impl.registration.get_topic_manager().get_subscriber_impl(resolved_name) diff --git a/clients/rospy/src/rospy/names.py b/clients/rospy/src/rospy/names.py --- a/clients/rospy/src/rospy/names.py +++ b/clients/rospy/src/rospy/names.py @@ -46,7 +46,7 @@ is_global, is_private import rosgraph.names -from rospy.exceptions import ROSException +from rospy.exceptions import ROSException, ROSInitException from rospy.impl.validators import ParameterInvalid TOPIC_ANYTYPE = ANYTYPE #indicates that a subscriber will connect any datatype given to it diff --git a/clients/rospy/src/rospy/topics.py b/clients/rospy/src/rospy/topics.py --- a/clients/rospy/src/rospy/topics.py +++ b/clients/rospy/src/rospy/topics.py @@ -1348,7 +1348,7 @@ def get_topics(self): @return: list of topic names this node subscribes to/publishes @rtype: [str] """ - return self.topics + return self.topics.copy() def _get_list(self, rmap): return [[k, v.type] for k, v in rmap.items()] diff --git a/tools/rosbag/src/rosbag/rosbag_main.py b/tools/rosbag/src/rosbag/rosbag_main.py --- a/tools/rosbag/src/rosbag/rosbag_main.py +++ b/tools/rosbag/src/rosbag/rosbag_main.py @@ -93,6 +93,8 @@ def record_cmd(argv): parser.add_option( "--node", dest="node", default=None, type='string',action="store", help="record all topics subscribed to by a specific node") parser.add_option("-j", "--bz2", dest="compression", default=None, action="store_const", const='bz2', help="use BZ2 compression") parser.add_option("--lz4", dest="compression", action="store_const", const='lz4', help="use LZ4 compression") + parser.add_option("--tcpnodelay", dest="tcpnodelay", action="store_true", help="Use the TCP_NODELAY transport hint when subscribing to topics.") + parser.add_option("--udp", dest="udp", action="store_true", help="Use the UDP transport hint when subscribing to topics.") (options, args) = parser.parse_args(argv) @@ -128,6 +130,8 @@ def record_cmd(argv): if options.size: cmd.extend(["--size", str(options.size)]) if options.node: cmd.extend(["--node", options.node]) + if options.tcpnodelay: cmd.extend(["--tcpnodelay"]) + if options.udp: cmd.extend(["--udp"]) cmd.extend(args) diff --git a/tools/roslaunch/src/roslaunch/depends.py b/tools/roslaunch/src/roslaunch/depends.py --- a/tools/roslaunch/src/roslaunch/depends.py +++ b/tools/roslaunch/src/roslaunch/depends.py @@ -170,6 +170,12 @@ def _parse_launch(tags, launch_file, file_deps, verbose, context): else: launch_tag = dom[0] sub_context = _parse_subcontext(tag.childNodes, context) + try: + if tag.attributes['pass_all_args']: + sub_context["arg"] = context["arg"] + sub_context["arg"].update(_parse_subcontext(tag.childNodes, context)["arg"]) + except KeyError as e: + pass _parse_launch(launch_tag.childNodes, sub_launch_file, file_deps, verbose, sub_context) except IOError as e: raise RoslaunchDepsException("Cannot load roslaunch include '%s' in '%s'"%(sub_launch_file, launch_file)) diff --git a/tools/rostopic/src/rostopic/__init__.py b/tools/rostopic/src/rostopic/__init__.py --- a/tools/rostopic/src/rostopic/__init__.py +++ b/tools/rostopic/src/rostopic/__init__.py @@ -1137,7 +1137,7 @@ def _sub_rostopic_list(master, pubs, subs, publishers_only, subscribers_only, ve print(indent+"Subscribed topics:") for t, ttype, tlist in subs: if len(tlist) > 1: - print(indent+" * %s [%s] %s subscribers"%(t, ttype, len(llist))) + print(indent+" * %s [%s] %s subscribers"%(t, ttype, len(tlist))) else: print(indent+" * %s [%s] 1 subscriber"%(t, ttype)) print('') diff --git a/utilities/message_filters/src/message_filters/__init__.py b/utilities/message_filters/src/message_filters/__init__.py --- a/utilities/message_filters/src/message_filters/__init__.py +++ b/utilities/message_filters/src/message_filters/__init__.py @@ -156,12 +156,16 @@ def getElemBeforeTime(self, stamp): return None return older[-1] - def getLastestTime(self): + def getLatestTime(self): """Return the newest recorded timestamp.""" if not self.cache_times: return None return self.cache_times[-1] + def getLastestTime(self): + """Return the newest recorded timestamp (equivalent to `getLatestTime()`, but included for backwards compatibility).""" + return self.getLatestTime() + def getOldestTime(self): """Return the oldest recorded timestamp.""" if not self.cache_times:
diff --git a/test/test_roscpp/test/CMakeLists.txt b/test/test_roscpp/test/CMakeLists.txt --- a/test/test_roscpp/test/CMakeLists.txt +++ b/test/test_roscpp/test/CMakeLists.txt @@ -1,6 +1,6 @@ catkin_add_gtest(${PROJECT_NAME}-test_version test_version.cpp) if(TARGET ${PROJECT_NAME}-test_version) - target_link_libraries(${PROJECT_NAME}-test_version) + target_link_libraries(${PROJECT_NAME}-test_version ${catkin_LIBRARIES}) endif() catkin_add_gtest(${PROJECT_NAME}-test_header test_header.cpp) diff --git a/test/test_roscpp/test_serialization/CMakeLists.txt b/test/test_roscpp/test_serialization/CMakeLists.txt --- a/test/test_roscpp/test_serialization/CMakeLists.txt +++ b/test/test_roscpp/test_serialization/CMakeLists.txt @@ -4,24 +4,24 @@ endif() catkin_add_gtest(${PROJECT_NAME}-serialization src/serialization.cpp) if(TARGET ${PROJECT_NAME}-serialization) - target_link_libraries(${PROJECT_NAME}-serialization ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) + target_link_libraries(${PROJECT_NAME}-serialization ${GTEST_LIBRARIES} ${catkin_LIBRARIES}) add_dependencies(${PROJECT_NAME}-serialization ${${PROJECT_NAME}_EXPORTED_TARGETS}) endif() catkin_add_gtest(${PROJECT_NAME}-generated_messages src/generated_messages.cpp) if(TARGET ${PROJECT_NAME}-generated_messages) - target_link_libraries(${PROJECT_NAME}-generated_messages ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) + target_link_libraries(${PROJECT_NAME}-generated_messages ${GTEST_LIBRARIES} ${catkin_LIBRARIES}) add_dependencies(${PROJECT_NAME}-generated_messages ${${PROJECT_NAME}_EXPORTED_TARGETS}) endif() add_executable(${PROJECT_NAME}-builtin_types EXCLUDE_FROM_ALL src/builtin_types.cpp) add_dependencies(tests ${PROJECT_NAME}-builtin_types) -target_link_libraries(${PROJECT_NAME}-builtin_types ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) +target_link_libraries(${PROJECT_NAME}-builtin_types ${GTEST_LIBRARIES} ${catkin_LIBRARIES}) add_dependencies(${PROJECT_NAME}-builtin_types ${${PROJECT_NAME}_EXPORTED_TARGETS}) add_rostest(test/builtin_types.test) add_executable(${PROJECT_NAME}-pre_deserialize EXCLUDE_FROM_ALL src/pre_deserialize.cpp) add_dependencies(tests ${PROJECT_NAME}-pre_deserialize) -target_link_libraries(${PROJECT_NAME}-pre_deserialize ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) +target_link_libraries(${PROJECT_NAME}-pre_deserialize ${GTEST_LIBRARIES} ${catkin_LIBRARIES}) add_dependencies(${PROJECT_NAME}-pre_deserialize ${${PROJECT_NAME}_EXPORTED_TARGETS}) add_rostest(test/pre_deserialize.test) diff --git a/tools/roslaunch/test/unit/test_roslaunch_rlutil.py b/tools/roslaunch/test/unit/test_roslaunch_rlutil.py --- a/tools/roslaunch/test/unit/test_roslaunch_rlutil.py +++ b/tools/roslaunch/test/unit/test_roslaunch_rlutil.py @@ -84,4 +84,8 @@ def test_resolve_launch_arguments(self): self.fail("should have failed") except roslaunch.RLException: pass - + + def test_roslaunch_check_pass_all_args(self): + filename = os.path.join(get_example_path(), 'example-pass_all_args.launch') + error_msg = roslaunch.rlutil.check_roslaunch(filename) + assert error_msg is None diff --git a/utilities/message_filters/test/test_message_filters_cache.py b/utilities/message_filters/test/test_message_filters_cache.py --- a/utilities/message_filters/test/test_message_filters_cache.py +++ b/utilities/message_filters/test/test_message_filters_cache.py @@ -103,9 +103,11 @@ def test_all_funcs(self): self.assertEqual(s, rospy.Time(3), "invalid msg return by getElemBeforeTime") - s = cache.getLastestTime() + s = cache.getLatestTime() self.assertEqual(s, rospy.Time(4), - "invalid stamp return by getLastestTime") + "invalid stamp return by getLatestTime") + self.assertEqual(s, cache.getLastestTime(), + "stamps returned by getLatestTime and getLastestTime don't match") s = cache.getOldestTime() self.assertEqual(s, rospy.Time(0), diff --git a/utilities/xmlrpcpp/test/CMakeLists.txt b/utilities/xmlrpcpp/test/CMakeLists.txt --- a/utilities/xmlrpcpp/test/CMakeLists.txt +++ b/utilities/xmlrpcpp/test/CMakeLists.txt @@ -12,7 +12,8 @@ find_package(Boost REQUIRED COMPONENTS system thread) include_directories(${Boost_INCLUDE_DIRS}) add_library(test_fixtures test_fixtures.cpp) -target_link_libraries(test_fixtures ${Boost_LIBRARIES}) +target_link_libraries(test_fixtures xmlrpcpp ${Boost_LIBRARIES} ${GTEST_LIBRARIES}) +set_target_properties(test_fixtures PROPERTIES EXCLUDE_FROM_ALL TRUE) catkin_add_gtest(HelloTest HelloTest.cpp) target_link_libraries(HelloTest xmlrpcpp ${Boost_LIBRARIES}) @@ -24,6 +25,8 @@ catkin_add_gtest(test_ulimit test_ulimit.cpp) target_link_libraries(test_ulimit xmlrpcpp test_fixtures ${Boost_LIBRARIES}) add_library(mock_socket mock_socket.cpp) +target_link_libraries(mock_socket ${GTEST_LIBRARIES}) +set_target_properties(mock_socket PROPERTIES EXCLUDE_FROM_ALL TRUE) catkin_add_gtest(test_client test_client.cpp @@ -46,10 +49,17 @@ catkin_add_gtest(test_dispatch ../libb64/src/cencode.c ) target_link_libraries(test_dispatch mock_socket ${catkin_LIBRARIES}) +if(APPLE) set_target_properties(test_dispatch PROPERTIES LINK_FLAGS - "-Wl,--wrap=select -Wl,--wrap=poll" + "-Wl,-alias,___wrap_poll,_poll" ) +elseif(UNIX) +set_target_properties(test_dispatch PROPERTIES + LINK_FLAGS + "-Wl,--wrap=poll" +) +endif() if(NOT WIN32) catkin_add_gtest(test_socket @@ -58,10 +68,17 @@ if(NOT WIN32) ../src/XmlRpcSocket.cpp ../src/XmlRpcUtil.cpp ) - set_target_properties(test_socket PROPERTIES - LINK_FLAGS - "-Wl,--wrap=accept -Wl,--wrap=bind -Wl,--wrap=close -Wl,--wrap=connect -Wl,--wrap=getaddrinfo -Wl,--wrap=getsockname -Wl,--wrap=listen -Wl,--wrap=read -Wl,--wrap=setsockopt -Wl,--wrap=select -Wl,--wrap=socket -Wl,--wrap=write -Wl,--wrap=fcntl -Wl,--wrap=freeaddrinfo" - ) + if(APPLE) + set_target_properties(test_socket PROPERTIES + LINK_FLAGS + "-Wl,-alias,___wrap_accept,_accept -Wl,-alias,___wrap_bind,_bind -Wl,-alias,___wrap_close,_close -Wl,-alias,___wrap_connect,_connect -Wl,-alias,___wrap_getaddrinfo,_getaddrinfo -Wl,-alias,___wrap_getsockname,_getsockname -Wl,-alias,___wrap_listen,_listen -Wl,-alias,___wrap_read,_read -Wl,-alias,___wrap_setsockopt,_setsockopt -Wl,-alias,___wrap_select,_select -Wl,-alias,___wrap_select,_select$1050 -Wl,-alias,___wrap_socket,_socket -Wl,-alias,___wrap_write,_write -Wl,-alias,___wrap_fcntl,_fcntl -Wl,-alias,___wrap_freeaddrinfo,_freeaddrinfo" + ) + elseif(UNIX) + set_target_properties(test_socket PROPERTIES + LINK_FLAGS + "-Wl,--wrap=accept -Wl,--wrap=bind -Wl,--wrap=close -Wl,--wrap=connect -Wl,--wrap=getaddrinfo -Wl,--wrap=getsockname -Wl,--wrap=listen -Wl,--wrap=read -Wl,--wrap=setsockopt -Wl,--wrap=select -Wl,--wrap=socket -Wl,--wrap=write -Wl,--wrap=fcntl -Wl,--wrap=freeaddrinfo" + ) + endif() endif() catkin_add_gtest(TestValues TestValues.cpp) diff --git a/utilities/xmlrpcpp/test/mock_socket.cpp b/utilities/xmlrpcpp/test/mock_socket.cpp --- a/utilities/xmlrpcpp/test/mock_socket.cpp +++ b/utilities/xmlrpcpp/test/mock_socket.cpp @@ -60,7 +60,7 @@ std::string XmlRpcSocket::getErrorMsg(int error) { std::deque<int> close_calls; void XmlRpcSocket::close(int fd) { - EXPECT_LE(1, close_calls.size()); + EXPECT_LE(1u, close_calls.size()); if (close_calls.size() > 0) { int close_fd = close_calls.front(); close_calls.pop_front(); @@ -299,7 +299,7 @@ void MockSocketTest::TearDown() { void MockSocketTest::CheckCalls() { // Check that call counters and queues are empty. EXPECT_EQ(0, socket_calls); - EXPECT_EQ(0, close_calls.size()); + EXPECT_EQ(0u, close_calls.size()); EXPECT_EQ(0, setNonBlocking_calls); EXPECT_EQ(0, setReuseAddr_calls); EXPECT_EQ(0, bind_calls); diff --git a/utilities/xmlrpcpp/test/test_dispatch.cpp b/utilities/xmlrpcpp/test/test_dispatch.cpp --- a/utilities/xmlrpcpp/test/test_dispatch.cpp +++ b/utilities/xmlrpcpp/test/test_dispatch.cpp @@ -46,15 +46,13 @@ // those symbols instead use __wrap_xxx extern "C" { // Mock for poll -int __real_poll(struct pollfd *fds, nfds_t nfds, int timeout); - int (*fake_poll)(struct pollfd *, nfds_t, int) = 0; int __wrap_poll(struct pollfd *fds, nfds_t nfds, int timeout) { if(fake_poll) { return fake_poll(fds, nfds, timeout); } else { - return __real_poll(fds, nfds, timeout); + return 0; } } @@ -72,7 +70,7 @@ int mock_poll(struct pollfd *fds, nfds_t nfds, int timeout) { EXPECT_EQ(poll_fds.size(), nfds); EXPECT_EQ(poll_timeout, timeout); - for(nfds_t i=0; i<std::min(nfds, poll_fds.size()); i++) { + for(nfds_t i=0; i<nfds && i<poll_fds.size(); i++) { EXPECT_EQ(poll_fds[i].fd, fds[i].fd); EXPECT_EQ(poll_fds[i].events, fds[i].events); fds[i].revents = poll_fds[i].revents; diff --git a/utilities/xmlrpcpp/test/test_system_mocks.c b/utilities/xmlrpcpp/test/test_system_mocks.c --- a/utilities/xmlrpcpp/test/test_system_mocks.c +++ b/utilities/xmlrpcpp/test/test_system_mocks.c @@ -28,13 +28,12 @@ #endif #define MOCK_SYSCALL(ret, name, ARG_TYPES, ARG_NAMES) \ - ret __real_##name ARG_TYPES; \ ret(*fake_##name) ARG_TYPES = 0; \ ret __wrap_##name ARG_TYPES { \ if (fake_##name) { \ return fake_##name ARG_NAMES; \ } else { \ - return __real_##name ARG_NAMES; \ + return -1; \ } \ } \ int name##_calls = 0; \ @@ -47,7 +46,6 @@ // custom mock for fcntl because it is varargs // the mocked version always takes the third argument -int __real_fcntl(int fd, int cmd, ...); int (*fake_fcntl)(int fd, int cmd, unsigned long) = 0; int __wrap_fcntl(int fd, int cmd, ...) { va_list ap; @@ -58,7 +56,7 @@ int __wrap_fcntl(int fd, int cmd, ...) { if (fake_fcntl) { return fake_fcntl(fd, cmd, arg); } else { - return __real_fcntl(fd, cmd, arg); + return -1; } } int fcntl_calls = 0; @@ -68,13 +66,12 @@ int count_fcntl(int fd, int cmd, unsigned long arg) { } // Custom mock for freeaddrinfo because it returns void. -void __real_freeaddrinfo(struct addrinfo* res); void (*fake_freeaddrinfo)(struct addrinfo* res) = 0; void __wrap_freeaddrinfo(struct addrinfo* res) { if (fake_freeaddrinfo) { return fake_freeaddrinfo(res); } else { - return __real_freeaddrinfo(res); + return; } } int freeaddrinfo_calls = 0;
Node using StatisticsLogger crashes when message arrival time is large If enable_statistics is set to True and a node is subscribed to a topic that receives messages infrequently, it's possible for the arrival times between two messages to be very large (on order of hours). When this happens it's can trigger a "Duration is out of dual 32-bit range" runtime error when calculating the period standard deviation here https://github.com/ros/ros_comm/blob/lunar-devel/clients/roscpp/src/libros/statistics.cpp#L215 which then causes the node to crash. The solution is to add a try/except clause around the period standard devision calculation, similar to the stamp age deviation here https://github.com/ros/ros_comm/blob/lunar-devel/clients/roscpp/src/libros/statistics.cpp#L162
2018-08-09T22:31:10Z
[]
[]
ros/ros_comm
1,490
ros__ros_comm-1490
[ "1360" ]
b34f62aca2cd3d568cef2d397f8b182891d1e4d9
diff --git a/clients/rospy/src/rospy/topics.py b/clients/rospy/src/rospy/topics.py --- a/clients/rospy/src/rospy/topics.py +++ b/clients/rospy/src/rospy/topics.py @@ -1348,7 +1348,7 @@ def get_topics(self): @return: list of topic names this node subscribes to/publishes @rtype: [str] """ - return self.topics + return self.topics.copy() def _get_list(self, rmap): return [[k, v.type] for k, v in rmap.items()] diff --git a/tools/rosbag/src/rosbag/rosbag_main.py b/tools/rosbag/src/rosbag/rosbag_main.py --- a/tools/rosbag/src/rosbag/rosbag_main.py +++ b/tools/rosbag/src/rosbag/rosbag_main.py @@ -93,6 +93,8 @@ def record_cmd(argv): parser.add_option( "--node", dest="node", default=None, type='string',action="store", help="record all topics subscribed to by a specific node") parser.add_option("-j", "--bz2", dest="compression", default=None, action="store_const", const='bz2', help="use BZ2 compression") parser.add_option("--lz4", dest="compression", action="store_const", const='lz4', help="use LZ4 compression") + parser.add_option("--tcpnodelay", dest="tcpnodelay", action="store_true", help="Use the TCP_NODELAY transport hint when subscribing to topics.") + parser.add_option("--udp", dest="udp", action="store_true", help="Use the UDP transport hint when subscribing to topics.") (options, args) = parser.parse_args(argv) @@ -128,6 +130,8 @@ def record_cmd(argv): if options.size: cmd.extend(["--size", str(options.size)]) if options.node: cmd.extend(["--node", options.node]) + if options.tcpnodelay: cmd.extend(["--tcpnodelay"]) + if options.udp: cmd.extend(["--udp"]) cmd.extend(args) diff --git a/tools/roslaunch/src/roslaunch/depends.py b/tools/roslaunch/src/roslaunch/depends.py --- a/tools/roslaunch/src/roslaunch/depends.py +++ b/tools/roslaunch/src/roslaunch/depends.py @@ -170,6 +170,12 @@ def _parse_launch(tags, launch_file, file_deps, verbose, context): else: launch_tag = dom[0] sub_context = _parse_subcontext(tag.childNodes, context) + try: + if tag.attributes['pass_all_args']: + sub_context["arg"] = context["arg"] + sub_context["arg"].update(_parse_subcontext(tag.childNodes, context)["arg"]) + except KeyError as e: + pass _parse_launch(launch_tag.childNodes, sub_launch_file, file_deps, verbose, sub_context) except IOError as e: raise RoslaunchDepsException("Cannot load roslaunch include '%s' in '%s'"%(sub_launch_file, launch_file)) diff --git a/tools/roslaunch/src/roslaunch/loader.py b/tools/roslaunch/src/roslaunch/loader.py --- a/tools/roslaunch/src/roslaunch/loader.py +++ b/tools/roslaunch/src/roslaunch/loader.py @@ -358,7 +358,7 @@ def add_param(self, ros_config, param_name, param_value, verbose=True): else: ros_config.add_param(Param(param_name, param_value), verbose=verbose) - def load_rosparam(self, context, ros_config, cmd, param, file_, text, verbose=True): + def load_rosparam(self, context, ros_config, cmd, param, file_, text, verbose=True, subst_function=None): """ Load rosparam setting @@ -394,6 +394,8 @@ def load_rosparam(self, context, ros_config, cmd, param, file_, text, verbose=Tr with open(file_, 'r') as f: text = f.read() + if subst_function is not None: + text = subst_function(text) # parse YAML text # - lazy import global yaml diff --git a/tools/roslaunch/src/roslaunch/xmlloader.py b/tools/roslaunch/src/roslaunch/xmlloader.py --- a/tools/roslaunch/src/roslaunch/xmlloader.py +++ b/tools/roslaunch/src/roslaunch/xmlloader.py @@ -233,9 +233,10 @@ def _rosparam_tag(self, tag, context, ros_config, verbose=True): # load is the default command cmd = cmd or 'load' value = _get_text(tag) + subst_function = None if subst_value: - value = self.resolve_args(value, context) - self.load_rosparam(context, ros_config, cmd, param, file, value, verbose=verbose) + subst_function = lambda x: self.resolve_args(x, context) + self.load_rosparam(context, ros_config, cmd, param, file, value, verbose=verbose, subst_function=subst_function) except ValueError as e: raise loader.LoadException("error loading <rosparam> tag: \n\t"+str(e)+"\nXML is %s"%tag.toxml()) diff --git a/utilities/message_filters/src/message_filters/__init__.py b/utilities/message_filters/src/message_filters/__init__.py --- a/utilities/message_filters/src/message_filters/__init__.py +++ b/utilities/message_filters/src/message_filters/__init__.py @@ -156,12 +156,16 @@ def getElemBeforeTime(self, stamp): return None return older[-1] - def getLastestTime(self): + def getLatestTime(self): """Return the newest recorded timestamp.""" if not self.cache_times: return None return self.cache_times[-1] + def getLastestTime(self): + """Return the newest recorded timestamp (equivalent to `getLatestTime()`, but included for backwards compatibility).""" + return self.getLatestTime() + def getOldestTime(self): """Return the oldest recorded timestamp.""" if not self.cache_times:
diff --git a/test/test_roscpp/test/CMakeLists.txt b/test/test_roscpp/test/CMakeLists.txt --- a/test/test_roscpp/test/CMakeLists.txt +++ b/test/test_roscpp/test/CMakeLists.txt @@ -1,6 +1,6 @@ catkin_add_gtest(${PROJECT_NAME}-test_version test_version.cpp) if(TARGET ${PROJECT_NAME}-test_version) - target_link_libraries(${PROJECT_NAME}-test_version) + target_link_libraries(${PROJECT_NAME}-test_version ${catkin_LIBRARIES}) endif() catkin_add_gtest(${PROJECT_NAME}-test_header test_header.cpp) diff --git a/test/test_roscpp/test_serialization/CMakeLists.txt b/test/test_roscpp/test_serialization/CMakeLists.txt --- a/test/test_roscpp/test_serialization/CMakeLists.txt +++ b/test/test_roscpp/test_serialization/CMakeLists.txt @@ -4,24 +4,24 @@ endif() catkin_add_gtest(${PROJECT_NAME}-serialization src/serialization.cpp) if(TARGET ${PROJECT_NAME}-serialization) - target_link_libraries(${PROJECT_NAME}-serialization ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) + target_link_libraries(${PROJECT_NAME}-serialization ${GTEST_LIBRARIES} ${catkin_LIBRARIES}) add_dependencies(${PROJECT_NAME}-serialization ${${PROJECT_NAME}_EXPORTED_TARGETS}) endif() catkin_add_gtest(${PROJECT_NAME}-generated_messages src/generated_messages.cpp) if(TARGET ${PROJECT_NAME}-generated_messages) - target_link_libraries(${PROJECT_NAME}-generated_messages ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) + target_link_libraries(${PROJECT_NAME}-generated_messages ${GTEST_LIBRARIES} ${catkin_LIBRARIES}) add_dependencies(${PROJECT_NAME}-generated_messages ${${PROJECT_NAME}_EXPORTED_TARGETS}) endif() add_executable(${PROJECT_NAME}-builtin_types EXCLUDE_FROM_ALL src/builtin_types.cpp) add_dependencies(tests ${PROJECT_NAME}-builtin_types) -target_link_libraries(${PROJECT_NAME}-builtin_types ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) +target_link_libraries(${PROJECT_NAME}-builtin_types ${GTEST_LIBRARIES} ${catkin_LIBRARIES}) add_dependencies(${PROJECT_NAME}-builtin_types ${${PROJECT_NAME}_EXPORTED_TARGETS}) add_rostest(test/builtin_types.test) add_executable(${PROJECT_NAME}-pre_deserialize EXCLUDE_FROM_ALL src/pre_deserialize.cpp) add_dependencies(tests ${PROJECT_NAME}-pre_deserialize) -target_link_libraries(${PROJECT_NAME}-pre_deserialize ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) +target_link_libraries(${PROJECT_NAME}-pre_deserialize ${GTEST_LIBRARIES} ${catkin_LIBRARIES}) add_dependencies(${PROJECT_NAME}-pre_deserialize ${${PROJECT_NAME}_EXPORTED_TARGETS}) add_rostest(test/pre_deserialize.test) diff --git a/tools/roslaunch/test/params_subst.yaml b/tools/roslaunch/test/params_subst.yaml new file mode 100644 --- /dev/null +++ b/tools/roslaunch/test/params_subst.yaml @@ -0,0 +1 @@ +string1: $(anon foo) diff --git a/tools/roslaunch/test/unit/test_roslaunch_rlutil.py b/tools/roslaunch/test/unit/test_roslaunch_rlutil.py --- a/tools/roslaunch/test/unit/test_roslaunch_rlutil.py +++ b/tools/roslaunch/test/unit/test_roslaunch_rlutil.py @@ -84,4 +84,8 @@ def test_resolve_launch_arguments(self): self.fail("should have failed") except roslaunch.RLException: pass - + + def test_roslaunch_check_pass_all_args(self): + filename = os.path.join(get_example_path(), 'example-pass_all_args.launch') + error_msg = roslaunch.rlutil.check_roslaunch(filename) + assert error_msg is None diff --git a/tools/roslaunch/test/unit/test_xmlloader.py b/tools/roslaunch/test/unit/test_xmlloader.py --- a/tools/roslaunch/test/unit/test_xmlloader.py +++ b/tools/roslaunch/test/unit/test_xmlloader.py @@ -242,6 +242,10 @@ def test_rosparam_valid(self): self.assertEquals('bar', p.value) p = [p for p in mock.params if p.key == '/node_rosparam/robots/childparam'][0] self.assertEquals('a child namespace parameter', p.value) + + # test substitution in yaml files + p = [p for p in mock.params if p.key == '/rosparam_subst/string1'][0] + self.assertTrue('$(anon foo)' not in p.value) exes = [e for e in mock.executables if e.command == 'rosparam'] self.assertEquals(len(exes), 2, "expected 2 rosparam exes, got %s"%len(exes)) @@ -274,6 +278,10 @@ def test_rosparam_valid(self): p = [p for p in mock.params if p.key == '/inline_dict2/key4'][0] self.assertEquals('value4', p.value) + # test substitution in inline yaml + p = [p for p in mock.params if p.key == '/inline_subst'][0] + self.assertTrue('$(anon foo)' not in p.value) + # verify that later tags override # - key2 is overriden self.assertEquals(1, len([p for p in mock.params if p.key == '/override/key1'])) diff --git a/tools/roslaunch/test/xml/test-rosparam-valid.xml b/tools/roslaunch/test/xml/test-rosparam-valid.xml --- a/tools/roslaunch/test/xml/test-rosparam-valid.xml +++ b/tools/roslaunch/test/xml/test-rosparam-valid.xml @@ -12,6 +12,10 @@ <rosparam file="$(find roslaunch)/test/params.yaml" command="load" /> </node> + <group ns="rosparam_subst"> + <rosparam file="$(find roslaunch)/test/params_subst.yaml" command="load" subst_value="true" /> + </group> + <rosparam param="inline_str">value1</rosparam> <rosparam param="inline_list">[1, 2, 3, 4]</rosparam> <rosparam param="inline_dict">{key1: value1, key2: value2}</rosparam> @@ -19,6 +23,8 @@ key3: value3 key4: value4 </rosparam> + + <rosparam param="inline_subst" subst_value="true">$(anon foo)</rosparam> <rosparam param="override">{key1: value1, key2: value2}</rosparam> <rosparam param="override">{key1: override1}</rosparam> diff --git a/utilities/message_filters/test/test_message_filters_cache.py b/utilities/message_filters/test/test_message_filters_cache.py --- a/utilities/message_filters/test/test_message_filters_cache.py +++ b/utilities/message_filters/test/test_message_filters_cache.py @@ -103,9 +103,11 @@ def test_all_funcs(self): self.assertEqual(s, rospy.Time(3), "invalid msg return by getElemBeforeTime") - s = cache.getLastestTime() + s = cache.getLatestTime() self.assertEqual(s, rospy.Time(4), - "invalid stamp return by getLastestTime") + "invalid stamp return by getLatestTime") + self.assertEqual(s, cache.getLastestTime(), + "stamps returned by getLatestTime and getLastestTime don't match") s = cache.getOldestTime() self.assertEqual(s, rospy.Time(0),
Node using StatisticsLogger crashes when message arrival time is large If enable_statistics is set to True and a node is subscribed to a topic that receives messages infrequently, it's possible for the arrival times between two messages to be very large (on order of hours). When this happens it's can trigger a "Duration is out of dual 32-bit range" runtime error when calculating the period standard deviation here https://github.com/ros/ros_comm/blob/lunar-devel/clients/roscpp/src/libros/statistics.cpp#L215 which then causes the node to crash. The solution is to add a try/except clause around the period standard devision calculation, similar to the stamp age deviation here https://github.com/ros/ros_comm/blob/lunar-devel/clients/roscpp/src/libros/statistics.cpp#L162
2018-08-20T21:28:44Z
[]
[]
ros/ros_comm
1,520
ros__ros_comm-1520
[ "889" ]
b5ee2db6524d5335e65abca1cc190d948e80a02d
diff --git a/tools/roslaunch/src/roslaunch/xmlloader.py b/tools/roslaunch/src/roslaunch/xmlloader.py --- a/tools/roslaunch/src/roslaunch/xmlloader.py +++ b/tools/roslaunch/src/roslaunch/xmlloader.py @@ -632,8 +632,9 @@ def _include_tag(self, tag, context, ros_config, default_machine, is_core, verbo self._recurse_load(ros_config, launch.childNodes, child_ns, \ default_machine, is_core, verbose) - # check for unused args - loader.post_process_include_args(child_ns) + if not pass_all_args: + # check for unused args + loader.post_process_include_args(child_ns) except ArgException as e: raise XmlParseException("included file [%s] requires the '%s' arg to be set"%(inc_filename, str(e)))
diff --git a/tools/roslaunch/test/xml/test-arg-valid-include.xml b/tools/roslaunch/test/xml/test-arg-valid-include.xml --- a/tools/roslaunch/test/xml/test-arg-valid-include.xml +++ b/tools/roslaunch/test/xml/test-arg-valid-include.xml @@ -1,4 +1,5 @@ <launch> + <arg name="another_parameter_not_used" value="dummy"/> <arg name="grounded" value="not_set"/> <include file="$(find roslaunch)/test/xml/test-arg-invalid-included.xml" pass_all_args="true"/> </launch>
roslaunch: pass_all_args exception if there are extra args defined I just tried using this attribute on a fairly complex launch file that has multiple `<arg>`s declared, and it leads to an exception because not all of the `<arg>`s are defined in the `<include>`d file. For example, in the `drcsim_gazebo` package, [atlas.launch](https://bitbucket.org/osrf/drcsim/src/194be8500fef81593f79607a21ee2badd9700a0e/drcsim_gazebo/launch/atlas.launch?at=default&fileviewer=file-view-default#atlas.launch-14) includes [atlas_no_controllers.launch](https://bitbucket.org/osrf/drcsim/src/194be8500fef81593f79607a21ee2badd9700a0e/drcsim_gazebo/launch/atlas_no_controllers.launch?at=default&fileviewer=file-view-default) and passes three arguments. I removed some of the duplicate argument definitions in [this commit](https://bitbucket.org/osrf/drcsim/commits/d5c93d7db649ca4df6c07d6fcba6f9e77953913a), but roslaunch gives an exception since there are extra `<arg>`s in `atlas.launch` that aren't part of `atlas_no_controllers.launch` (for example, [inertia_args](https://bitbucket.org/osrf/drcsim/src/194be8500fef81593f79607a21ee2badd9700a0e/drcsim_gazebo/launch/atlas.launch?at=default&fileviewer=file-view-default#atlas.launch-8)). I'm guessing this will be close as a "won't fix", but I wanted to mention it since I was excited when @gerkey added this feature in #710, but now there's quite a few instances when I won't be able to use it.
Oh, that's interesting. I hadn't considered that case. Can you tell me which exception is thrown, to help me track down the code path that leads to it and have a look at what can be done? ``` [roslaunch][INFO] 2016-09-06 17:25:55,495: Checking log directory for disk usage. This may take awhile. Press Ctrl-C to interrupt [roslaunch][INFO] 2016-09-06 17:25:55,518: Done checking log file disk usage. Usage is <1GB. [roslaunch][INFO] 2016-09-06 17:25:55,518: roslaunch starting with args ['/opt/ros/indigo/bin/roslaunch', 'drcsim_gazebo', 'atlas.launch', 'gzname:=gzserver'] [roslaunch][INFO] 2016-09-06 17:25:55,519: roslaunch env is ... [roslaunch][INFO] 2016-09-06 17:25:55,519: starting in server mode [roslaunch.parent][INFO] 2016-09-06 17:25:55,519: starting roslaunch parent run [roslaunch][INFO] 2016-09-06 17:25:55,519: loading roscore config file /opt/ros/indigo/etc/ros/roscore.xml [roslaunch][INFO] 2016-09-06 17:25:55,607: Added core node of type [rosout/rosout] in namespace [/] [roslaunch.config][INFO] 2016-09-06 17:25:55,608: loading config file /data_fast/scpeters/ws/drcsim/install/share/drcsim_gazebo/launch/atlas.launch [roslaunch][INFO] 2016-09-06 17:25:55,638: Added node of type [drcsim_gazebo/run_gzserver] in namespace [/] [roslaunch][ERROR] 2016-09-06 17:25:55,638: unused args [hand_suffix, inertia_args, model_args] for include of [/data_fast/scpeters/ws/drcsim/install/share/drcsim_gazebo/launch/atlas_no_controllers.launch] [roslaunch][ERROR] 2016-09-06 17:25:55,638: The traceback for the exception was written to the log file [roslaunch][ERROR] 2016-09-06 17:25:55,639: Traceback (most recent call last): File "/opt/ros/indigo/lib/python2.7/dist-packages/roslaunch/__init__.py", line 307, in main p.start() File "/opt/ros/indigo/lib/python2.7/dist-packages/roslaunch/parent.py", line 268, in start self._start_infrastructure() File "/opt/ros/indigo/lib/python2.7/dist-packages/roslaunch/parent.py", line 217, in _start_infrastructure self._load_config() File "/opt/ros/indigo/lib/python2.7/dist-packages/roslaunch/parent.py", line 132, in _load_config roslaunch_strs=self.roslaunch_strs, verbose=self.verbose) File "/opt/ros/indigo/lib/python2.7/dist-packages/roslaunch/config.py", line 455, in load_config_default raise RLException(e) RLException: unused args [hand_suffix, inertia_args, model_args] for include of [/data_fast/scpeters/ws/drcsim/install/share/drcsim_gazebo/launch/atlas_no_controllers.launch] [rospy.core][INFO] 2016-09-06 17:25:55,639: signal_shutdown [atexit] ``` I'm also having this problem, and with nested launch files, it makes the `pass_all_args` unfortunately useless, where it should be able to greatly reduce the amount of repeated text all over the place. Thanks for the bump. I'll look into this. Any new on this? I actually never understood why it throws an exception just because some arguments are not used. This workaround excludes the check when `pass_all_args` is set: ``` if not pass_all_args: # check for unused args loader.post_process_include_args(child_ns) ``` Instead of the following: https://github.com/ros/ros_comm/blob/2c8a3f65628d0362d97c29d333d5669b2c7803a6/tools/roslaunch/src/roslaunch/xmlloader.py#L630-L631 I've not created any pull request yet since I don't know if this behavior is what you want. @scpeters @gerkey, if you guys give me some feedback on the intended behavior I can make a PR. > @scpeters @gerkey, if you guys give me some feedback on the intended behavior I can make a PR. This is precisely the behavior that would reduce the verbosity of many of my launch files. Is there a reason this behavior wasn't made PR? @GoosebumpsFactory I think there's no much interest, but I can make the PR anyway and discuss implementation details there. @alextoind Thanks. That would be great.
2018-10-16T18:39:27Z
[]
[]
ros/ros_comm
1,652
ros__ros_comm-1652
[ "1209" ]
b0dcc75a9fe7e781d764aec9a16495e31ca318f2
diff --git a/tools/rosbag/src/rosbag/bag.py b/tools/rosbag/src/rosbag/bag.py --- a/tools/rosbag/src/rosbag/bag.py +++ b/tools/rosbag/src/rosbag/bag.py @@ -79,8 +79,11 @@ class ROSBagException(Exception): """ Base class for exceptions in rosbag. """ - def __init__(self, value): + def __init__(self, value=None): self.value = value + #fix for #1209. needed in Python 2.7. + # For details: https://stackoverflow.com/questions/41808912/cannot-unpickle-exception-subclass + self.args = (value,) def __str__(self): return self.value @@ -96,7 +99,8 @@ class ROSBagUnindexedException(ROSBagException): """ Exception for unindexed bags. """ - def __init__(self): + def __init__(self, *args): + #*args needed for #1209 ROSBagException.__init__(self, 'Unindexed bag') class ROSBagEncryptNotSupportedException(ROSBagException):
diff --git a/test/test_rosbag/test/test_bag.py b/test/test_rosbag/test/test_bag.py --- a/test/test_rosbag/test/test_bag.py +++ b/test/test_rosbag/test/test_bag.py @@ -432,6 +432,20 @@ def _print_bag_records(self, fn): print(bag._OP_CODES.get(op, op)) + # #1209 + def test_rosbag_exceptions_are_pickleable(self): + #bag_exception = rosbag.ROSBagException("msg string") + def test(bag_exception): + import pickle + pickle_str = pickle.dumps(bag_exception) + unpickled = pickle.loads(pickle_str) + self.assertTrue(bag_exception.value == unpickled.value) + test(bag.ROSBagException("msg string")) + test(bag.ROSBagFormatException("msg string 2")) + test(bag.ROSBagUnindexedException()) + test(bag.ROSBagEncryptNotSupportedException("msg string 3")) + test(bag.ROSBagEncryptException("msg string 4")) + if __name__ == '__main__': import rostest PKG='rosbag'
ROSBagException should call the base constructor In python 2.7 there is a subtle bug with unpickling custom exceptions; this was fixed in 3.3, see https://bugs.python.org/issue1692335. As a workaround for 2.7 we can re-define these exceptions as below. ```java class ROSBagException(Exception): """ Base class for exceptions in rosbag. """ pass   class ROSBagFormatException(ROSBagException): """ Exceptions for errors relating to the bag file format. """ pass class ROSBagUnindexedException(ROSBagException): """ Exception for unindexed bags. """ def __init__(self, *args): ROSBagException.__init__(self, 'Unindexed bag') ``` For context, I came across this issue when a `ROSBagUnindexedException` was raised in a `multiprocessing.Pool.map` thread. It throws the `__init__` exception and hangs, as described here: https://lists.gt.net/python/bugs/1025933
Can you please provide a pull request with the proposed patch against the current default branch (`lunar-devel`). Thanks. Sure, though it's not really a ros_comm bug and has been patched in python 3.3. PR is here: https://github.com/ros/ros_comm/pull/1210
2019-03-13T13:57:00Z
[]
[]
ros/ros_comm
1,683
ros__ros_comm-1683
[ "1459" ]
2ca744d876a0648bcb7689af0078fcd2b1c31f2b
diff --git a/clients/rospy/src/rospy/client.py b/clients/rospy/src/rospy/client.py --- a/clients/rospy/src/rospy/client.py +++ b/clients/rospy/src/rospy/client.py @@ -82,7 +82,7 @@ def myargv(argv=None): """ if argv is None: argv = sys.argv - return [a for a in argv if not rosgraph.names.REMAP in a] + return [a for a in argv if not rosgraph.names.is_legal_remap(a)] def load_command_line_node_params(argv): """ @@ -97,7 +97,7 @@ def load_command_line_node_params(argv): try: mappings = {} for arg in argv: - if rosgraph.names.REMAP in arg: + if rosgraph.names.is_legal_remap(arg): src, dst = [x.strip() for x in arg.split(rosgraph.names.REMAP)] if src and dst: if len(src) > 1 and src[0] == '_' and src[1] != '_': diff --git a/tools/rosgraph/src/rosgraph/names.py b/tools/rosgraph/src/rosgraph/names.py --- a/tools/rosgraph/src/rosgraph/names.py +++ b/tools/rosgraph/src/rosgraph/names.py @@ -191,7 +191,7 @@ def load_mappings(argv): """ mappings = {} for arg in argv: - if REMAP in arg: + if is_legal_remap(arg): try: src, dst = [x.strip() for x in arg.split(REMAP)] if src and dst: @@ -243,6 +243,17 @@ def is_legal_base_name(name): m = BASE_NAME_LEGAL_CHARS_P.match(name) return m is not None and m.group(0) == name +REMAP_PATTERN = re.compile('^([\~\/A-Za-z]|_|__)[\w\/]*' + REMAP + '.*') + +def is_legal_remap(arg): + """ + Validates that arg is a legal remap according to U{http://wiki.ros.org/Remapping%20Arguments}. + """ + if arg is None: + return False + m = REMAP_PATTERN.match(arg) + return m is not None and m.group(0) == arg + def canonicalize_name(name): """ Put name in canonical form. Extra slashes '//' are removed and
diff --git a/test/test_rospy/test/unit/test_rospy_client.py b/test/test_rospy/test/unit/test_rospy_client.py --- a/test/test_rospy/test/unit/test_rospy_client.py +++ b/test/test_rospy/test/unit/test_rospy_client.py @@ -86,7 +86,7 @@ def test_myargv(self): self.assertEquals(['-foo', 'bar', '-baz'], myargv(['-foo','bar', '-baz'])) self.assertEquals(['foo'], myargv(['foo','bar:=baz'])) - self.assertEquals(['foo'], myargv(['foo','-bar:=baz'])) + self.assertEquals(['foo','-bar:=baz'], myargv(['foo','-bar:=baz'])) finally: sys.argv = orig_argv
Cannot send data containing `:=` through rostopic or rosservice We cannot send `:=` data using the command line tools such as rosservice or rostopic. ```bash $ rostopic pub /data std_msgs/String "data: ':='" > Usage: rostopic pub /topic type [args...] > > rostopic: error: Please specify message values ``` When I escape the value, the string still contains the escaping backslash.
2019-04-01T14:33:08Z
[]
[]
ros/ros_comm
1,688
ros__ros_comm-1688
[ "1686" ]
6e76dea720c63ec9cd5b7aa2f930be9185ece1ae
diff --git a/clients/rospy/src/rospy/client.py b/clients/rospy/src/rospy/client.py --- a/clients/rospy/src/rospy/client.py +++ b/clients/rospy/src/rospy/client.py @@ -101,7 +101,7 @@ def load_command_line_node_params(argv): src, dst = [x.strip() for x in arg.split(rosgraph.names.REMAP)] if src and dst: if len(src) > 1 and src[0] == '_' and src[1] != '_': - mappings[src[1:]] = yaml.load(dst) + mappings[src[1:]] = yaml.safe_load(dst) return mappings except Exception as e: raise rospy.exceptions.ROSInitException("invalid command-line parameters: %s"%(str(e))) diff --git a/tools/rosbag/src/rosbag/bag.py b/tools/rosbag/src/rosbag/bag.py --- a/tools/rosbag/src/rosbag/bag.py +++ b/tools/rosbag/src/rosbag/bag.py @@ -1250,7 +1250,7 @@ def __init__(self, d): else: setattr(self, a, DictObject(b) if isinstance(b, dict) else b) - obj = DictObject(yaml.load(s)) + obj = DictObject(yaml.safe_load(s)) try: val = eval('obj.' + key) except Exception as ex: diff --git a/tools/rosgraph/src/rosgraph/roslogging.py b/tools/rosgraph/src/rosgraph/roslogging.py --- a/tools/rosgraph/src/rosgraph/roslogging.py +++ b/tools/rosgraph/src/rosgraph/roslogging.py @@ -178,7 +178,7 @@ def configure_logging(logname, level=logging.INFO, filename=None, env=None): os.environ['ROS_LOG_FILENAME'] = log_filename if config_file.endswith(('.yaml', '.yml')): with open(config_file) as f: - dict_conf = yaml.load(f) + dict_conf = yaml.safe_load(f) dict_conf.setdefault('version', 1) logging.config.dictConfig(dict_conf) else: diff --git a/tools/roslaunch/src/roslaunch/loader.py b/tools/roslaunch/src/roslaunch/loader.py --- a/tools/roslaunch/src/roslaunch/loader.py +++ b/tools/roslaunch/src/roslaunch/loader.py @@ -99,7 +99,7 @@ def convert_value(value, type_): raise ValueError("%s is not a '%s' type"%(value, type_)) elif type_ == 'yaml': try: - return yaml.load(value) + return yaml.safe_load(value) except yaml.parser.ParserError as e: raise ValueError(e) else: @@ -410,7 +410,7 @@ def load_rosparam(self, context, ros_config, cmd, param, file_, text, verbose=Tr if rosparam is None: import rosparam try: - data = yaml.load(text) + data = yaml.safe_load(text) # #3162: if there is no YAML, load() will return an # empty string. We want an empty dictionary instead # for our representation of empty. diff --git a/tools/rosparam/src/rosparam/__init__.py b/tools/rosparam/src/rosparam/__init__.py --- a/tools/rosparam/src/rosparam/__init__.py +++ b/tools/rosparam/src/rosparam/__init__.py @@ -99,6 +99,7 @@ def construct_yaml_binary(loader, node): # register the (de)serializers with pyyaml yaml.add_representer(Binary,represent_xml_binary) yaml.add_constructor(u'tag:yaml.org,2002:binary', construct_yaml_binary) +yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:binary', construct_yaml_binary) def construct_angle_radians(loader, node): """ @@ -185,7 +186,7 @@ def load_str(str, filename, default_namespace=None, verbose=False): """ paramlist = [] default_namespace = default_namespace or get_ros_namespace() - for doc in yaml.load_all(str): + for doc in yaml.safe_load_all(str): if NS in doc: ns = ns_join(default_namespace, doc.get(NS, None)) if verbose: @@ -633,10 +634,14 @@ def yamlmain(argv=None): yaml.add_constructor(u'!radians', construct_angle_radians) yaml.add_constructor(u'!degrees', construct_angle_degrees) +yaml.SafeLoader.add_constructor(u'!radians', construct_angle_radians) +yaml.SafeLoader.add_constructor(u'!degrees', construct_angle_degrees) # allow both !degrees 180, !radians 2*pi pattern = re.compile(r'^deg\([^\)]*\)$') yaml.add_implicit_resolver(u'!degrees', pattern, first="deg(") +yaml.SafeLoader.add_implicit_resolver(u'!degrees', pattern, first="deg(") pattern = re.compile(r'^rad\([^\)]*\)$') yaml.add_implicit_resolver(u'!radians', pattern, first="rad(") +yaml.SafeLoader.add_implicit_resolver(u'!radians', pattern, first="rad(") diff --git a/tools/rosservice/src/rosservice/__init__.py b/tools/rosservice/src/rosservice/__init__.py --- a/tools/rosservice/src/rosservice/__init__.py +++ b/tools/rosservice/src/rosservice/__init__.py @@ -607,7 +607,7 @@ def _rosservice_cmd_call(argv): # convert empty args to YAML-empty strings if arg == '': arg = "''" - service_args.append(yaml.load(arg)) + service_args.append(yaml.safe_load(arg)) if not service_args and has_service_args(service_name, service_class=service_class): if sys.stdin.isatty(): parser.error("Please specify service arguments") @@ -650,7 +650,7 @@ def _stdin_yaml_arg(): elif arg.strip() != '---': buff = buff + arg try: - loaded = yaml.load(buff.rstrip()) + loaded = yaml.safe_load(buff.rstrip()) except Exception as e: print("Invalid YAML: %s"%str(e), file=sys.stderr) if loaded is not None: diff --git a/tools/rostopic/src/rostopic/__init__.py b/tools/rostopic/src/rostopic/__init__.py --- a/tools/rostopic/src/rostopic/__init__.py +++ b/tools/rostopic/src/rostopic/__init__.py @@ -1779,7 +1779,7 @@ def _rostopic_cmd_pub(argv): try: pub_args = [] for arg in args[2:]: - pub_args.append(yaml.load(arg)) + pub_args.append(yaml.safe_load(arg)) except Exception as e: parser.error("Argument error: "+str(e)) @@ -1822,7 +1822,7 @@ def bagy_iter(): try: with open(filename, 'r') as f: # load all documents - data = yaml.load_all(f) + data = yaml.safe_load_all(f) for d in data: yield [d] except yaml.YAMLError as e: @@ -2014,7 +2014,7 @@ def stdin_yaml_arg(): if arg.strip() == '---': # End of document try: - loaded = yaml.load(buff.rstrip()) + loaded = yaml.safe_load(buff.rstrip()) except Exception as e: sys.stderr.write("Invalid YAML: %s\n"%str(e)) if loaded is not None:
diff --git a/test/test_roslib_comm/test/test_roslib_message.py b/test/test_roslib_comm/test/test_roslib_message.py --- a/test/test_roslib_comm/test/test_roslib_message.py +++ b/test/test_roslib_comm/test/test_roslib_message.py @@ -61,7 +61,7 @@ def test_strify_message(self): def roundtrip(m): yaml_text = strify_message(m) print(yaml_text) - loaded = yaml.load(yaml_text) + loaded = yaml.safe_load(yaml_text) print("loaded", loaded) new_inst = m.__class__() if loaded is not None: diff --git a/test/test_rosmaster/test/client_verification/test_slave_api.py b/test/test_rosmaster/test/client_verification/test_slave_api.py --- a/test/test_rosmaster/test/client_verification/test_slave_api.py +++ b/test/test_rosmaster/test/client_verification/test_slave_api.py @@ -106,7 +106,7 @@ def __init__(self, *args, **kwds): def load_profile(self, filename): import yaml with open(filename) as f: - d = yaml.load(f) + d = yaml.safe_load(f) self.required_pubs = d.get('pubs', {}) self.required_subs = d.get('subs', {}) diff --git a/test/test_rosparam/test/check_rosparam.py b/test/test_rosparam/test/check_rosparam.py --- a/test/test_rosparam/test/check_rosparam.py +++ b/test/test_rosparam/test/check_rosparam.py @@ -227,7 +227,7 @@ def test_rosparam_get(self): with fakestdout() as b: rosparam.yamlmain([cmd, 'get', "g1"]) import yaml - d = yaml.load(b.getvalue()) + d = yaml.safe_load(b.getvalue()) self.assertEquals(d['float'], 10.0) self.assertEquals(d['int'], 10.0) self.assertEquals(d['string'], "g1-foo-value") @@ -346,18 +346,18 @@ def test_rosparam_dump(self): import yaml with open(f_out) as b: with open(f) as b2: - self.assertEquals(yaml.load(b.read()), yaml.load(b2.read())) + self.assertEquals(yaml.safe_load(b.read()), yaml.safe_load(b2.read())) rosparam.yamlmain([cmd, 'dump', '-v', f_out, 'rosparam_dump']) with open(f_out) as b: with open(f) as b2: - self.assertEquals(yaml.load(b.read()), yaml.load(b2.read())) + self.assertEquals(yaml.safe_load(b.read()), yaml.safe_load(b2.read())) # yaml file and std_out should be the same with fakestdout() as b: rosparam.yamlmain([cmd, 'dump']) with open(f) as b2: - self.assertEquals(yaml.load(b.getvalue())['rosparam_dump'], yaml.load(b2.read())) + self.assertEquals(yaml.safe_load(b.getvalue())['rosparam_dump'], yaml.safe_load(b2.read())) def test_fullusage(self): import rosparam diff --git a/test/test_rosparam/test/check_rosparam_command_line_online.py b/test/test_rosparam/test/check_rosparam_command_line_online.py --- a/test/test_rosparam/test/check_rosparam_command_line_online.py +++ b/test/test_rosparam/test/check_rosparam_command_line_online.py @@ -120,7 +120,7 @@ def test_rosparam(self): # - dictionary output = Popen([cmd, 'get', "g1"], stdout=PIPE).communicate()[0] import yaml - d = yaml.load(output) + d = yaml.safe_load(output) self.assertEquals(d['float'], 10.0) self.assertEquals(d['int'], 10.0) self.assertEquals(d['string'], "g1-foo-value") diff --git a/test/test_rosservice/test/check_rosservice_command_line_online.py b/test/test_rosservice/test/check_rosservice_command_line_online.py --- a/test/test_rosservice/test/check_rosservice_command_line_online.py +++ b/test/test_rosservice/test/check_rosservice_command_line_online.py @@ -122,7 +122,7 @@ def test_rosservice(self): output = Popen([cmd, 'call', name, v], stdout=PIPE).communicate()[0] output = output.strip() self.assert_(output, output) - val = yaml.load(output)['header'] + val = yaml.safe_load(output)['header'] self.assertEquals('', val['frame_id']) self.assert_(val['seq'] >= 0) self.assertEquals(0, val['stamp']['secs']) @@ -131,7 +131,7 @@ def test_rosservice(self): # test with auto headers for v in ['{header: auto}', '{header: {stamp: now}}']: output = Popen([cmd, 'call', name, v], stdout=PIPE).communicate()[0] - val = yaml.load(output.strip())['header'] + val = yaml.safe_load(output.strip())['header'] self.assertEquals('', val['frame_id']) self.assert_(val['seq'] >= 0) self.assert_(val['stamp']['secs'] >= int(t)) diff --git a/test/test_rostopic/test/test_rostopic_unit.py b/test/test_rostopic/test/test_rostopic_unit.py --- a/test/test_rostopic/test/test_rostopic_unit.py +++ b/test/test_rostopic/test/test_rostopic_unit.py @@ -249,16 +249,16 @@ def test_strify_message(self): m = String('foo') self.assertEquals('data: "foo"', strify_message(m, field_filter=f)) m = TVals(Time(123, 456), Duration(78, 90)) - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals({'t': {'secs': 123, 'nsecs': 456}, 'd': {'secs': 78, 'nsecs': 90}}, v) m = simple_v - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals(simple_d, v) m = arrays_v - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals(arrays_d, v) m = embed_v - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals(embed_d, v) f = create_field_filter(echo_nostr=True, echo_noarr=False) @@ -267,16 +267,16 @@ def test_strify_message(self): m = String('foo') self.assertEquals('', strify_message(m, field_filter=f)) m = TVals(Time(123, 456), Duration(78, 90)) - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals({'t': {'secs': 123, 'nsecs': 456}, 'd': {'secs': 78, 'nsecs': 90}}, v) m = simple_v - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals(simple_nostr, v) m = arrays_v - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals(arrays_nostr, v) m = embed_v - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals({'simple': simple_nostr, 'arrays': arrays_nostr}, v) f = create_field_filter(echo_nostr=False, echo_noarr=True) @@ -285,16 +285,16 @@ def test_strify_message(self): m = String('foo') self.assertEquals('data: "foo"', strify_message(m, field_filter=f)) m = TVals(Time(123, 456), Duration(78, 90)) - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals({'t': {'secs': 123, 'nsecs': 456}, 'd': {'secs': 78, 'nsecs': 90}}, v) m = simple_v - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals(simple_d, v) m = arrays_v - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals(None, v) m = embed_v - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals({'simple': simple_d, 'arrays': None}, v) f = create_field_filter(echo_nostr=True, echo_noarr=True) @@ -303,13 +303,13 @@ def test_strify_message(self): m = String('foo') self.assertEquals('', strify_message(m, field_filter=f)) m = TVals(Time(123, 456), Duration(78, 90)) - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals({'t': {'secs': 123, 'nsecs': 456}, 'd': {'secs': 78, 'nsecs': 90}}, v) m = simple_v - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals(simple_nostr, v) m = embed_v - v = yaml.load(strify_message(m, field_filter=f)) + v = yaml.safe_load(strify_message(m, field_filter=f)) self.assertEquals({'simple': simple_nostr, 'arrays': None}, v) def test_create_field_filter(self): diff --git a/tools/roslaunch/test/unit/test_roslaunch_dump_params.py b/tools/roslaunch/test/unit/test_roslaunch_dump_params.py --- a/tools/roslaunch/test/unit/test_roslaunch_dump_params.py +++ b/tools/roslaunch/test/unit/test_roslaunch_dump_params.py @@ -53,7 +53,7 @@ def test_roslaunch(self): o, e = p.communicate() self.assert_(p.returncode == 0, "Return code nonzero for param dump! Code: %d" % (p.returncode)) - self.assertEquals({'/noop': 'noop'}, yaml.load(o)) + self.assertEquals({'/noop': 'noop'}, yaml.safe_load(o)) p = Popen([cmd, '--dump-params', 'roslaunch', 'test-dump-rosparam.launch'], stdout = PIPE) o, e = p.communicate() @@ -95,7 +95,7 @@ def test_roslaunch(self): '/noparam1': 'value1', '/noparam2': 'value2', } - output_val = yaml.load(o) + output_val = yaml.safe_load(o) if not val == output_val: for k, v in val.items(): if k not in output_val:
rosparam YAMLLoadWarning Recently I've been seeing the following warning when I run `rosparam set` from commandline. ``` $ rosparam set /use_sim_time true /opt/ros/melodic/lib/python2.7/dist-packages/rosparam/__init__.py:370: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details. set_param_raw(param, yaml.load(value), verbose=verbose) ``` As far as I could find this is related to the new release of pyyaml deprecating the use of `yaml.load(..)` as is described here: https://github.com/yaml/pyyaml/pull/257 Good to note is that I have PyYAML version 5.1 installed in my user directory through pip, this won't happen when using pyyaml from the Ubuntu repositories, since that's currently still version 3.12.
Please consider to provide a pull request to use `yaml.safe_load()` instead.
2019-04-03T19:55:49Z
[]
[]
ros/ros_comm
1,695
ros__ros_comm-1695
[ "1562" ]
71872217c441280f189544aa0c2cf4909774f953
diff --git a/clients/rospy/src/rospy/impl/statistics.py b/clients/rospy/src/rospy/impl/statistics.py --- a/clients/rospy/src/rospy/impl/statistics.py +++ b/clients/rospy/src/rospy/impl/statistics.py @@ -68,15 +68,15 @@ def read_parameters(self): Fetch window parameters from parameter server """ - # Range of window length, in seconds - self.min_elements = rospy.get_param("/statistics_window_min_elements", 10) - self.max_elements = rospy.get_param("/statistics_window_max_elements", 100) - # Range of acceptable messages in window. # Window size will be adjusted if number of observed is # outside this range. - self.max_window = rospy.get_param("/statistics_window_max_size", 64) + self.min_elements = rospy.get_param("/statistics_window_min_elements", 10) + self.max_elements = rospy.get_param("/statistics_window_max_elements", 100) + + # Range of window length, in seconds self.min_window = rospy.get_param("/statistics_window_min_size", 4) + self.max_window = rospy.get_param("/statistics_window_max_size", 64) def callback(self, msg, publisher, stat_bytes): """ @@ -208,9 +208,10 @@ def sendStatistics(self, subscriber_statistics_logger): self.pub.publish(msg) # adjust window, if message count is not appropriate. - if len(self.arrival_time_list_) > subscriber_statistics_logger.max_elements and self.pub_frequency.to_sec() * 2 <= subscriber_statistics_logger.max_window: + pub_period = 1.0 / self.pub_frequency.to_sec() + if len(self.arrival_time_list_) > subscriber_statistics_logger.max_elements and pub_period / 2 >= subscriber_statistics_logger.min_window: self.pub_frequency *= 2 - if len(self.arrival_time_list_) < subscriber_statistics_logger.min_elements and self.pub_frequency.to_sec() / 2 >= subscriber_statistics_logger.min_window: + if len(self.arrival_time_list_) < subscriber_statistics_logger.min_elements and pub_period * 2 <= subscriber_statistics_logger.max_window: self.pub_frequency /= 2 # clear collected stats, start new window. @@ -257,7 +258,7 @@ def callback(self, subscriber_statistics_logger, msg, stat_bytes): self.last_seq_ = msg.header.seq # send out statistics with a certain frequency - if self.last_pub_time + self.pub_frequency < arrival_time: + if self.last_pub_time + rospy.Duration(1.0 / self.pub_frequency.to_sec()) < arrival_time: self.last_pub_time = arrival_time self.sendStatistics(subscriber_statistics_logger)
diff --git a/test/test_roscpp/test/CMakeLists.txt b/test/test_roscpp/test/CMakeLists.txt --- a/test/test_roscpp/test/CMakeLists.txt +++ b/test/test_roscpp/test/CMakeLists.txt @@ -149,6 +149,7 @@ add_rostest(launch/ns_node_remapping.xml) add_rostest(launch/search_param.xml) add_rostest(launch/stamped_topic_statistics_with_empty_timestamp.xml) +add_rostest(launch/topic_statistic_frequency.xml DEPENDENCIES ${PROJECT_NAME}-publisher_rate ${PROJECT_NAME}-subscriber ${PROJECT_NAME}-topic_statistic_frequency) # Test spinners add_rostest(launch/spinners.xml) diff --git a/test/test_roscpp/test/launch/topic_statistic_frequency.xml b/test/test_roscpp/test/launch/topic_statistic_frequency.xml new file mode 100644 --- /dev/null +++ b/test/test_roscpp/test/launch/topic_statistic_frequency.xml @@ -0,0 +1,39 @@ +<!-- basic smoke test for TopicStatistics --> +<launch> + <param name="/enable_statistics" value="true" /> + <!-- default 10 would take 5s to warm up for very slow talker --> + <param name="/statistics_window_min_elements" value="4" /> + + <!-- under 1Hz important, since checking window starts there --> + <node name="very_slow_talker" pkg="test_roscpp" type="test_roscpp-publisher_rate" required="true" args="0.8"> + <remap from="data" to="very_slow_chatter" /> + </node> + <node name="very_slow_listener" pkg="test_roscpp" type="test_roscpp-subscriber" required="true"> + <remap from="data" to="very_slow_chatter" /> + </node> + + <!-- publishing within fairly normal range of frequencies --> + <node name="slow_talker" pkg="test_roscpp" type="test_roscpp-publisher_rate" required="true" args="18"> + <remap from="data" to="slow_chatter" /> + </node> + <node name="slow_listener" pkg="test_roscpp" type="test_roscpp-subscriber" required="true"> + <remap from="data" to="slow_chatter" /> + </node> + + <node name="fast_talker" pkg="test_roscpp" type="test_roscpp-publisher_rate" required="true" args="53"> + <remap from="data" to="fast_chatter" /> + </node> + <node name="fast_listener" pkg="test_roscpp" type="test_roscpp-subscriber" required="true"> + <remap from="data" to="fast_chatter" /> + </node> + + <!-- fast outlier (for most ros systems) --> + <node name="very_fast_talker" pkg="test_roscpp" type="test_roscpp-publisher_rate" required="true" args="171"> + <remap from="data" to="/very_fast_chatter" /> + </node> + <node name="very_fast_listener" pkg="test_roscpp" type="test_roscpp-subscriber" required="true"> + <remap from="data" to="/very_fast_chatter" /> + </node> + + <test test-name="roscpp_topic_statistics" pkg="test_roscpp" type="test_roscpp-topic_statistic_frequency" /> +</launch> diff --git a/test/test_roscpp/test/src/CMakeLists.txt b/test/test_roscpp/test/src/CMakeLists.txt --- a/test/test_roscpp/test/src/CMakeLists.txt +++ b/test/test_roscpp/test/src/CMakeLists.txt @@ -205,13 +205,20 @@ add_dependencies(${PROJECT_NAME}-string_msg_expect ${std_msgs_EXPORTED_TARGETS}) add_executable(${PROJECT_NAME}-stamped_topic_statistics_empty_timestamp EXCLUDE_FROM_ALL stamped_topic_statistics_empty_timestamp.cpp) target_link_libraries(${PROJECT_NAME}-stamped_topic_statistics_empty_timestamp ${GTEST_LIBRARIES} ${catkin_LIBRARIES}) -# The publisher and subscriber are compiled but not registered as a unit test +add_executable(${PROJECT_NAME}-topic_statistic_frequency EXCLUDE_FROM_ALL topic_statistic_frequency.cpp) +target_link_libraries(${PROJECT_NAME}-topic_statistic_frequency ${GTEST_LIBRARIES} ${catkin_LIBRARIES}) + +# The publishers and subscriber are compiled but not registered as a unit test # since the test execution requires a network connection which drops packages. # Call scripts/test_udp_with_dropped_packets.sh to run the test. add_executable(${PROJECT_NAME}-publisher EXCLUDE_FROM_ALL publisher.cpp) target_link_libraries(${PROJECT_NAME}-publisher ${catkin_LIBRARIES}) add_dependencies(${PROJECT_NAME}-publisher ${std_msgs_EXPORTED_TARGETS}) +add_executable(${PROJECT_NAME}-publisher_rate EXCLUDE_FROM_ALL publisher_rate.cpp) +target_link_libraries(${PROJECT_NAME}-publisher_rate ${catkin_LIBRARIES}) +add_dependencies(${PROJECT_NAME}-publisher_rate ${std_msgs_EXPORTED_TARGETS}) + add_executable(${PROJECT_NAME}-subscriber EXCLUDE_FROM_ALL subscriber.cpp) target_link_libraries(${PROJECT_NAME}-subscriber ${catkin_LIBRARIES}) add_dependencies(${PROJECT_NAME}-subscriber ${std_msgs_EXPORTED_TARGETS}) @@ -281,6 +288,7 @@ if(TARGET tests) ${PROJECT_NAME}-publisher ${PROJECT_NAME}-subscriber ${PROJECT_NAME}-stamped_topic_statistics_empty_timestamp + ${PROJECT_NAME}-topic_statistic_frequency ) endif() @@ -346,3 +354,4 @@ add_dependencies(${PROJECT_NAME}-left_right ${${PROJECT_NAME}_EXPORTED_TARGETS}) add_dependencies(${PROJECT_NAME}-string_msg_expect ${${PROJECT_NAME}_EXPORTED_TARGETS}) add_dependencies(${PROJECT_NAME}-stamped_topic_statistics_empty_timestamp ${${PROJECT_NAME}_EXPORTED_TARGETS}) +add_dependencies(${PROJECT_NAME}-topic_statistic_frequency ${${PROJECT_NAME}_EXPORTED_TARGETS}) diff --git a/test/test_roscpp/test/src/publisher_rate.cpp b/test/test_roscpp/test/src/publisher_rate.cpp new file mode 100644 --- /dev/null +++ b/test/test_roscpp/test/src/publisher_rate.cpp @@ -0,0 +1,38 @@ +// Publish big data chunks +// Author: Max Schwarz <[email protected]> + +#include <ros/publisher.h> +#include <ros/init.h> +#include <ros/node_handle.h> + +#include <std_msgs/Int8MultiArray.h> + +int main(int argc, char** argv) +{ + ros::init(argc, argv, "publisher"); + ros::NodeHandle n; + + const size_t NUM_BYTES = 8; + std_msgs::Int8MultiArray data; + data.data.reserve(NUM_BYTES); + + assert(argc > 1); + float frequency = atof(argv[1]); + + ros::Publisher pub = n.advertise<std_msgs::Int8MultiArray>("data", 1); + ros::Rate rate(frequency); + + size_t start = 0; + while(ros::ok()) + { + data.data.clear(); + for(size_t i = 0; i < NUM_BYTES; ++i) + { + data.data.push_back(start + i); + } + pub.publish(data); + rate.sleep(); + start++; + } + return 0; +} diff --git a/test/test_roscpp/test/src/topic_statistic_frequency.cpp b/test/test_roscpp/test/src/topic_statistic_frequency.cpp new file mode 100644 --- /dev/null +++ b/test/test_roscpp/test/src/topic_statistic_frequency.cpp @@ -0,0 +1,65 @@ +#include <map> +#include <string> + +#include <ros/ros.h> +#include <gtest/gtest.h> +#include <rosgraph_msgs/TopicStatistics.h> +#include <boost/thread.hpp> +#include <std_msgs/Int8MultiArray.h> + +class Aggregator { +public: + std::map<std::string, ros::Duration> topic_period_mean_map_; + + void cb(const rosgraph_msgs::TopicStatistics& msg) { + topic_period_mean_map_[msg.topic] = msg.period_mean; + } + + bool frequencyAcceptable(const std::string& topic, float expected) { + float errorMargin = 0.1; + float foundFreq = 1.f / topic_period_mean_map_[topic].toSec(); + return std::fabs(foundFreq - expected) / expected <= errorMargin; + } +}; + +void assertEventuallyHasTopic(const Aggregator& agg, const std::string& topic) { + ros::Duration timeout(5.0); + auto start = ros::Time::now(); + while (ros::Time::now() - start < timeout && !agg.topic_period_mean_map_.count(topic)) { + ros::Duration(0.5).sleep(); + } + ASSERT_EQ(agg.topic_period_mean_map_.count(topic), 1u); +} + +TEST(TopicStatisticFrequency, statisticFrequency) +{ + ros::NodeHandle nh; + Aggregator agg; + ros::Subscriber stat_sub = nh.subscribe("/statistics", 1, &Aggregator::cb, &agg); + + ros::AsyncSpinner spinner(4); + spinner.start(); + + ros::Duration(5.0).sleep(); + + assertEventuallyHasTopic(agg, "/very_fast_chatter"); + assertEventuallyHasTopic(agg, "/fast_chatter"); + assertEventuallyHasTopic(agg, "/slow_chatter"); + assertEventuallyHasTopic(agg, "/very_slow_chatter"); + + ros::shutdown(); + + ASSERT_TRUE(agg.frequencyAcceptable("/very_fast_chatter", 171)); + ASSERT_TRUE(agg.frequencyAcceptable("/fast_chatter", 53)); + ASSERT_TRUE(agg.frequencyAcceptable("/slow_chatter", 18)); + ASSERT_TRUE(agg.frequencyAcceptable("/very_slow_chatter", 0.8)); +} + + +int main(int argc, char** argv) +{ + testing::InitGoogleTest(&argc, argv); + ros::init(argc, argv, "topic_statistic_frequency"); + ros::NodeHandle nh; + return RUN_ALL_TESTS(); +} diff --git a/test/test_rospy/CMakeLists.txt b/test/test_rospy/CMakeLists.txt --- a/test/test_rospy/CMakeLists.txt +++ b/test/test_rospy/CMakeLists.txt @@ -58,4 +58,5 @@ if(CATKIN_ENABLE_TESTING) add_rostest(test/rostest/on_shutdown.test) add_rostest(test/rostest/sub_to_multiple_pubs.test) add_rostest(test/rostest/latch_unsubscribe.test) + add_rostest(test/rostest/statistics.test) endif() diff --git a/test/test_rospy/nodes/freq_talker b/test/test_rospy/nodes/freq_talker new file mode 100755 --- /dev/null +++ b/test/test_rospy/nodes/freq_talker @@ -0,0 +1,58 @@ +#!/usr/bin/env python +# Software License Agreement (BSD License) +# +# Copyright (c) 2008, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Willow Garage, Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +''' +Copy of talker demo for stats testing purposes. Publishes String to 'chatter' +''' + +import rospy +from std_msgs.msg import String + +NAME = 'talker' + + +def talker(): + pub = rospy.Publisher("chatter", String, queue_size=1) + rospy.init_node(NAME, anonymous=True) + freq = rospy.get_param("~frequency", 10) + rate = rospy.Rate(freq) + + count = 0 + while not rospy.is_shutdown(): + pub.publish(String("hello world {}".format(count))) + count += 1 + rate.sleep() + + +if __name__ == '__main__': + talker() diff --git a/test/test_rospy/test/rostest/statistics.test b/test/test_rospy/test/rostest/statistics.test new file mode 100644 --- /dev/null +++ b/test/test_rospy/test/rostest/statistics.test @@ -0,0 +1,42 @@ +<!-- basic smoke test for TopicStatistics --> +<launch> + <param name="/enable_statistics" value="true" /> + <!-- default 10 would take 5s to warm up for very slow talker --> + <param name="/statistics_window_min_elements" value="5" /> + + <!-- under 1Hz important, since checking window starts there --> + <node name="very_slow_talker" pkg="test_rospy" type="freq_talker" required="true"> + <param name="frequency" value="0.5" /> + <remap from="chatter" to="very_slow_chatter" /> + </node> + <node name="very_slow_listener" pkg="test_rospy" type="listener.py" required="true"> + <remap from="chatter" to="very_slow_chatter" /> + </node> + <!-- publishing within fairly normal range of frequencies --> + <node name="slow_talker" pkg="test_rospy" type="freq_talker" required="true"> + <param name="frequency" value="8" /> + <remap from="chatter" to="slow_chatter" /> + </node> + <node name="slow_listener" pkg="test_rospy" type="listener.py" required="true"> + <remap from="chatter" to="slow_chatter" /> + </node> + + <node name="fast_talker" pkg="test_rospy" type="freq_talker" required="true"> + <param name="frequency" value="53" /> + <remap from="chatter" to="fast_chatter" /> + </node> + <node name="fast_listener" pkg="test_rospy" type="listener.py" required="true"> + <remap from="chatter" to="fast_chatter" /> + </node> + + <!-- fast outlier (for most ros systems) --> + <node name="very_fast_talker" pkg="test_rospy" type="freq_talker" required="true"> + <param name="frequency" value="150" /> + <remap from="chatter" to="very_fast_chatter" /> + </node> + <node name="very_fast_listener" pkg="test_rospy" type="listener.py" required="true"> + <remap from="chatter" to="very_fast_chatter" /> + </node> + + <test test-name="rospy_topic_statistics" pkg="test_rospy" type="test_topic_statistics.py" /> +</launch> diff --git a/test/test_rospy/test/rostest/test_topic_statistics.py b/test/test_rospy/test/rostest/test_topic_statistics.py new file mode 100755 --- /dev/null +++ b/test/test_rospy/test/rostest/test_topic_statistics.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +# Software License Agreement (BSD License) +# +# Copyright (c) 2008, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Willow Garage, Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +''' +Integration test for topic statistics +''' + +from __future__ import print_function +import sys +import unittest + +import rospy +import rostest +from rosgraph_msgs.msg import TopicStatistics + +PKG = 'test_rospy' + + +class TestTopicStatistics(unittest.TestCase): + def setUp(self): + self.topic_statistic_msg_map = {} + + def new_msg(self, msg): + self.topic_statistic_msg_map[msg.topic] = msg + + def assert_eventually( + self, cond, timeout=rospy.Duration(5.0), interval=rospy.Duration(0.5) + ): + started = rospy.Time.now() + while rospy.Time.now() - started < timeout: + if cond(): + return True + rospy.sleep(interval) + self.assertTrue(False) + + def frequency_acceptable(self, topic, expected, error_margin=0.1): + ''' return True if topic message's measured frequency + is within some error margin of expected frequency ''' + msg = self.topic_statistic_msg_map[topic] + found_freq = 1.0 / msg.period_mean.to_sec() + rospy.loginfo( + "Testing {}'s found frequency {} against expected {}".format( + topic, found_freq, expected)) + return abs(found_freq - expected) / expected <= error_margin + + def test_frequencies(self): + rospy.Subscriber('/statistics', TopicStatistics, self.new_msg) + + self.assert_eventually( + lambda: '/very_fast_chatter' in self.topic_statistic_msg_map) + self.assert_eventually( + lambda: '/fast_chatter' in self.topic_statistic_msg_map) + self.assert_eventually( + lambda: '/slow_chatter' in self.topic_statistic_msg_map) + self.assert_eventually( + lambda: '/very_slow_chatter' in self.topic_statistic_msg_map) + + self.assertTrue(self.frequency_acceptable('/very_fast_chatter', 150)) + self.assertTrue(self.frequency_acceptable('/fast_chatter', 53)) + self.assertTrue(self.frequency_acceptable('/slow_chatter', 8)) + self.assertTrue(self.frequency_acceptable('/very_slow_chatter', 0.5)) + + +if __name__ == '__main__': + rospy.init_node('test_topic_statistics') + rostest.run(PKG, 'rospy_topic_statistics', TestTopicStatistics, sys.argv)
Topic Statistics reports 0 for topics under 1Hz When `/enable_statistics` parameter is `true`, Subscribers publish to the `/statistics` topic. For a topic that has an expected 0.2Hz publishing frequency, this feature always reports `msg.mean_period` as 0, no matter how much time is given.
2019-04-12T11:06:06Z
[]
[]
ros/ros_comm
1,698
ros__ros_comm-1698
[ "1694" ]
71872217c441280f189544aa0c2cf4909774f953
diff --git a/tools/rosgraph/src/rosgraph/network.py b/tools/rosgraph/src/rosgraph/network.py --- a/tools/rosgraph/src/rosgraph/network.py +++ b/tools/rosgraph/src/rosgraph/network.py @@ -83,18 +83,13 @@ def parse_http_host_and_port(url): :returns: hostname and port number in URL or 80 (default), ``(str, int)`` :raises: :exc:`ValueError` If the url does not validate """ - # can't use p.port because that's only available in Python 2.5 if not url: raise ValueError('not a valid URL') p = urlparse.urlparse(url) - if not p[0] or not p[1]: #protocol and host + if not p.scheme or not p.hostname: raise ValueError('not a valid URL') - if ':' in p[1]: - hostname, port = p[1].split(':') - port = int(port) - else: - hostname, port = p[1], 80 - return hostname, port + port = p.port if p.port else 80 + return p.hostname, port def _is_unix_like_platform(): """
diff --git a/tools/rosgraph/test/test_network.py b/tools/rosgraph/test/test_network.py --- a/tools/rosgraph/test/test_network.py +++ b/tools/rosgraph/test/test_network.py @@ -152,6 +152,7 @@ def test_parse_http_host_and_port(self): assert ('localhost', 1234) == parse_http_host_and_port('http://localhost:1234') assert ('localhost', 1) == parse_http_host_and_port('http://localhost:1') assert ('willowgarage.com', 1) == parse_http_host_and_port('http://willowgarage.com:1') + assert ('FEDC:BA98:7654:3210:FEDC:BA98:7654:3210'.lower(), 81) == parse_http_host_and_port('http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:81') def test_get_local_address(self): # mostly a tripwire test
parse_http_host_and_port function does not support IPv6 address https://github.com/ros/ros_comm/blob/71872217c441280f189544aa0c2cf4909774f953/tools/rosgraph/src/rosgraph/network.py#L75 This function doesn't support IPv6 addresses of the form: http://[<link address>%<network interface]:<port> where link address is of the form: aaaa:bbbb:cccc:dddd:eeee:ffff:gggg:hhhh:iiii
2019-04-15T10:28:15Z
[]
[]
ros/ros_comm
1,885
ros__ros_comm-1885
[ "274" ]
902fb00fc7b2e881575a270a99c3a077ba5cdbba
diff --git a/tools/roslaunch/src/roslaunch/xmlloader.py b/tools/roslaunch/src/roslaunch/xmlloader.py --- a/tools/roslaunch/src/roslaunch/xmlloader.py +++ b/tools/roslaunch/src/roslaunch/xmlloader.py @@ -388,8 +388,6 @@ def _node_tag(self, tag, context, ros_config, default_machine, is_test=False, ve required = self.opt_attrs(tag, context, ('machine', 'args', 'output', 'respawn', 'respawn_delay', 'cwd', 'launch-prefix', 'required')) - if tag.hasAttribute('machine') and not len(machine.strip()): - raise XmlParseException("<node> 'machine' must be non-empty: [%s]"%machine) if not machine and default_machine: machine = default_machine.name # validate respawn, required
diff --git a/tools/roslaunch/test/unit/test_xmlloader.py b/tools/roslaunch/test/unit/test_xmlloader.py --- a/tools/roslaunch/test/unit/test_xmlloader.py +++ b/tools/roslaunch/test/unit/test_xmlloader.py @@ -757,7 +757,6 @@ def test_node_invalid(self): 'test-node-invalid-name-1.xml', 'test-node-invalid-name-2.xml', 'test-node-invalid-name-3.xml', - 'test-node-invalid-machine.xml', 'test-node-invalid-respawn.xml', 'test-node-invalid-respawn-required.xml', 'test-node-invalid-required-1.xml', diff --git a/tools/roslaunch/test/xml/test-node-invalid-machine.xml b/tools/roslaunch/test/xml/test-node-invalid-machine.xml deleted file mode 100644 --- a/tools/roslaunch/test/xml/test-node-invalid-machine.xml +++ /dev/null @@ -1,3 +0,0 @@ -<launch> - <node name="n" pkg="package" type="test_ns_invalid" machine="" /> -</launch>
Allow empty machine attribute Allowing empty machine attribute of node tag would help to make a launch-files with optional machine argument. An example: ``` <arg name="machine" default=""/> <node name="some_name" pkg="some_pkg" type="some_type" machine="$(arg machine)"/> ```
Wouldn't it be sufficient if you use `localhost` / `127.0.0.1`? It would, but it require to have defined localhost as a machine in every such launchfile. I still think that allow machine to be empty would be the better solution. This is what you are suggesting? ``` <machine name="localhost" address="localhost" env-loader="/opt/ros/hydro/env.sh"/> <arg name="machine" default="localhost"/> <node name="some_name" pkg="some_pkg" type="some_type" machine="$(arg machine)"/> ``` None of the attributes in roslaunch currently supports this kind of behavior. In order to stay consistent that rule would need to apply to all attributes (e.g. `ns` attribute of the `group` tag). This would require some effort to investigate the effect on all attributes and describing the consequences of such a change. Since the maintainers do not have the time to address this in the near future the issue will be marked with the milestone _untargeted_. Any contribution on this is highly welcome. @dirk-thomas @liborw I've got the same issue. How about to take `localhost` as a type of reserved word, and if node are written in launch files with `machine="localhost"`, launch the node even if there is no declaration of `<machine name="localhost" address="localhost" />`? In the meantime some attributes started to allow empty values (e.g. the include tag #882). So allowing an empty machine tag doesn't sound that different anymore. A generic approach (optional attributes with an empty value are considered equal to the attribute not being specified) would be preferred. But any pull request would be appreciated. Also if it only implements the handling of an empty machine tag.
2020-02-16T07:53:35Z
[]
[]
ros/ros_comm
2,015
ros__ros_comm-2015
[ "1650", "1776", "889", "1508", "1370" ]
acf826f2eb4dc14a0b6e08cb2e2b083f85d79f84
diff --git a/clients/rospy/src/rospy/__init__.py b/clients/rospy/src/rospy/__init__.py --- a/clients/rospy/src/rospy/__init__.py +++ b/clients/rospy/src/rospy/__init__.py @@ -94,7 +94,7 @@ 'INFO', 'WARN', 'ERROR', - 'FATAL' + 'FATAL', 'is_shutdown', 'signal_shutdown', 'get_node_uri', @@ -107,7 +107,6 @@ 'logerr_throttle', 'logfatal_throttle', 'parse_rosrpc_uri', 'MasterProxy', - 'NodeProxy', 'ROSException', 'ROSSerializationException', 'ROSInitException', diff --git a/clients/rospy/src/rospy/impl/tcpros_base.py b/clients/rospy/src/rospy/impl/tcpros_base.py --- a/clients/rospy/src/rospy/impl/tcpros_base.py +++ b/clients/rospy/src/rospy/impl/tcpros_base.py @@ -157,7 +157,8 @@ def run(self): (errno, msg) = e.args if errno == 4: #interrupted system call continue - raise + if not self.is_shutdown: + raise if self.is_shutdown: break try: diff --git a/clients/rospy/src/rospy/topics.py b/clients/rospy/src/rospy/topics.py --- a/clients/rospy/src/rospy/topics.py +++ b/clients/rospy/src/rospy/topics.py @@ -1173,7 +1173,7 @@ def check_all(self): Check all registered publication and subscriptions. """ with self.lock: - for t in chain(iter(self.pubs.values()), iter(self.subs.values())): + for t in chain(list(self.pubs.values()), list(self.subs.values())): t.check() def _add(self, ps, rmap, reg_type): @@ -1264,7 +1264,7 @@ def acquire_impl(self, reg_type, resolved_name, data_class): rmap = self.subs impl_class = _SubscriberImpl else: - raise TypeError("invalid reg_type: %s"%s) + raise TypeError("invalid reg_type: %s"%reg_type) with self.lock: impl = rmap.get(resolved_name, None) if not impl: diff --git a/tools/rosbag/scripts/fix_msg_defs.py b/tools/rosbag/scripts/fix_msg_defs.py --- a/tools/rosbag/scripts/fix_msg_defs.py +++ b/tools/rosbag/scripts/fix_msg_defs.py @@ -35,6 +35,7 @@ import sys import rosbag.migration +import roslib.message if __name__ == '__main__': if len(sys.argv) != 3: diff --git a/tools/rosbag/src/rosbag/bag.py b/tools/rosbag/src/rosbag/bag.py --- a/tools/rosbag/src/rosbag/bag.py +++ b/tools/rosbag/src/rosbag/bag.py @@ -733,8 +733,11 @@ def __str__(self): msg_count = 0 for connection in connections: - for chunk in self._chunks: - msg_count += chunk.connection_counts.get(connection.id, 0) + if self._chunks: + for chunk in self._chunks: + msg_count += chunk.connection_counts.get(connection.id, 0) + else: + msg_count += len(self._connection_indexes.get(connection.id, [])) topic_msg_counts[topic] = msg_count if self._connection_indexes_read: diff --git a/tools/rosbag/src/rosbag/migration.py b/tools/rosbag/src/rosbag/migration.py --- a/tools/rosbag/src/rosbag/migration.py +++ b/tools/rosbag/src/rosbag/migration.py @@ -43,6 +43,7 @@ import os import string import sys +import traceback import genmsg.msgs import genpy @@ -257,6 +258,9 @@ class MessageUpdateRule(object): valid = False + class EmptyType(Exception): + pass + ## Initialize class def __init__(self, migrator, location): # Every rule needs to hang onto the migrator so we can potentially use it @@ -271,23 +275,26 @@ def __init__(self, migrator, location): # Instantiate types dynamically based on definition try: if self.old_type == "": - raise Exception + raise self.EmptyType self.old_types = genpy.dynamic.generate_dynamic(self.old_type, self.old_full_text) self.old_class = self.old_types[self.old_type] self.old_md5sum = self.old_class._md5sum - except: - self.old_types = [] + except Exception as e: + if not isinstance(e, self.EmptyType): + traceback.print_exc(file=sys.stderr) + self.old_types = {} self.old_class = None self.old_md5sum = "" - try: if self.new_type == "": - raise Exception + raise self.EmptyType self.new_types = genpy.dynamic.generate_dynamic(self.new_type, self.new_full_text) self.new_class = self.new_types[self.new_type] self.new_md5sum = self.new_class._md5sum - except: - self.new_types = [] + except Exception as e: + if not isinstance(e, self.EmptyType): + traceback.print_exc(file=sys.stderr) + self.new_types = {} self.new_class = None self.new_md5sum = "" @@ -830,7 +837,7 @@ def scaffold_range(self, old_type, new_type): if (tmp_sn != first_sn): sn_range.append(tmp_sn) if (tmp_sn.new_class._type == new_type): - found_new_type == True + found_new_type = True if (found_new_type and tmp_sn.new_class._type != new_type): break diff --git a/tools/roslaunch/src/roslaunch/__init__.py b/tools/roslaunch/src/roslaunch/__init__.py --- a/tools/roslaunch/src/roslaunch/__init__.py +++ b/tools/roslaunch/src/roslaunch/__init__.py @@ -205,8 +205,10 @@ def _validate_args(parser, options, args): elif len(args) == 0: parser.error("you must specify at least one input file") - elif [f for f in args if not (f == '-' or os.path.exists(f))]: - parser.error("The following input files do not exist: %s"%f) + else: + missing_files = [f for f in args if not (f == '-' or os.path.exists(f))] + if missing_files: + parser.error("The following input files do not exist: %s"%', '.join(missing_files)) if args.count('-') > 1: parser.error("Only a single instance of the dash ('-') may be specified.") diff --git a/tools/roslaunch/src/roslaunch/core.py b/tools/roslaunch/src/roslaunch/core.py --- a/tools/roslaunch/src/roslaunch/core.py +++ b/tools/roslaunch/src/roslaunch/core.py @@ -208,9 +208,9 @@ def setup_env(node, machine, master_uri, env=None): if ns[-1] == '/': ns = ns[:-1] if ns: - d[rosgraph.ROS_NAMESPACE] = ns + d[rosgraph.ROS_NAMESPACE] = str(ns) for name, value in node.env_args: - d[name] = value + d[str(name)] = str(value) return d @@ -346,7 +346,7 @@ def __init__(self, name, address, self.name = name self.env_loader = env_loader self.user = user or None - self.password = password or None + self.password = password self.address = address self.ssh_port = ssh_port self.assignable = assignable diff --git a/tools/roslaunch/src/roslaunch/depends.py b/tools/roslaunch/src/roslaunch/depends.py --- a/tools/roslaunch/src/roslaunch/depends.py +++ b/tools/roslaunch/src/roslaunch/depends.py @@ -45,7 +45,7 @@ import rospkg -from .loader import convert_value +from .loader import convert_value, load_mappings from .substitution_args import resolve_args NAME="roslaunch-deps" @@ -118,6 +118,7 @@ def _parse_subcontext(tags, context): return subcontext def _parse_launch(tags, launch_file, file_deps, verbose, context): + context['filename'] = os.path.abspath(launch_file) dir_path = os.path.dirname(os.path.abspath(launch_file)) launch_file_pkg = rospkg.get_package_name(dir_path) @@ -205,7 +206,7 @@ def parse_launch(launch_file, file_deps, verbose): file_deps[launch_file] = RoslaunchDeps() launch_tag = dom[0] - context = { 'arg': {}} + context = { 'arg': load_mappings(sys.argv) } _parse_launch(launch_tag.childNodes, launch_file, file_deps, verbose, context) def rl_file_deps(file_deps, launch_file, verbose=False): diff --git a/tools/roslaunch/src/roslaunch/pmon.py b/tools/roslaunch/src/roslaunch/pmon.py --- a/tools/roslaunch/src/roslaunch/pmon.py +++ b/tools/roslaunch/src/roslaunch/pmon.py @@ -586,7 +586,9 @@ def _run(self): break #stop polling for d in dead: try: - if d.should_respawn(): + # when should_respawn() returns 0.0, bool(0.0) evaluates to False + # work around this by checking if the return value is False + if d.should_respawn() is not False: respawn.append(d) else: self.unregister(d) diff --git a/tools/roslaunch/src/roslaunch/remoteprocess.py b/tools/roslaunch/src/roslaunch/remoteprocess.py --- a/tools/roslaunch/src/roslaunch/remoteprocess.py +++ b/tools/roslaunch/src/roslaunch/remoteprocess.py @@ -186,7 +186,7 @@ def _ssh_exec(self, command, address, port, username=None, password=None): if not err_msg: username_str = '%s@'%username if username else '' try: - if not password: #use SSH agent + if password is None: #use SSH agent ssh.connect(address, port, username, timeout=TIMEOUT_SSH_CONNECT, key_filename=identity_file) else: #use SSH with login/pass ssh.connect(address, port, username, password, timeout=TIMEOUT_SSH_CONNECT) diff --git a/tools/roslaunch/src/roslaunch/xmlloader.py b/tools/roslaunch/src/roslaunch/xmlloader.py --- a/tools/roslaunch/src/roslaunch/xmlloader.py +++ b/tools/roslaunch/src/roslaunch/xmlloader.py @@ -626,8 +626,9 @@ def _include_tag(self, tag, context, ros_config, default_machine, is_core, verbo self._recurse_load(ros_config, launch.childNodes, child_ns, \ default_machine, is_core, verbose) - # check for unused args - loader.post_process_include_args(child_ns) + if not pass_all_args: + # check for unused args + loader.post_process_include_args(child_ns) except ArgException as e: raise XmlParseException("included file [%s] requires the '%s' arg to be set"%(inc_filename, str(e))) @@ -713,7 +714,7 @@ def _load_launch(self, launch, ros_config, is_core=False, filename=None, argv=No argv = sys.argv self._launch_tag(launch, ros_config, filename) - self.root_context = loader.LoaderContext(get_ros_namespace(), filename) + self.root_context = loader.LoaderContext(get_ros_namespace(argv=argv), filename) loader.load_sysargs_into_context(self.root_context, argv) if len(launch.getElementsByTagName('master')) > 0: diff --git a/tools/rosmaster/src/rosmaster/master_api.py b/tools/rosmaster/src/rosmaster/master_api.py --- a/tools/rosmaster/src/rosmaster/master_api.py +++ b/tools/rosmaster/src/rosmaster/master_api.py @@ -375,7 +375,7 @@ def setParam(self, caller_id, key, value): @rtype: [int, str, int] """ key = resolve_name(key, caller_id) - self.param_server.set_param(key, value, self._notify_param_subscribers) + self.param_server.set_param(key, value, self._notify_param_subscribers, caller_id) mloginfo("+PARAM [%s] by %s",key, caller_id) return 1, "parameter %s set"%key, 0 @@ -623,7 +623,7 @@ def registerService(self, caller_id, service, service_api, caller_api): self.ps_lock.release() return 1, "Registered [%s] as provider of [%s]"%(caller_id, service), 1 - @apivalidate(0, (is_service('service'),)) + @apivalidate('', (is_service('service'),)) def lookupService(self, caller_id, service): """ Lookup all provider of a particular service. @@ -672,7 +672,7 @@ def unregisterService(self, caller_id, service, service_api): ################################################################################## # PUBLISH/SUBSCRIBE - @apivalidate(0, ( is_topic('topic'), valid_type_name('topic_type'), is_api('caller_api'))) + @apivalidate([], ( is_topic('topic'), valid_type_name('topic_type'), is_api('caller_api'))) def registerSubscriber(self, caller_id, topic, topic_type, caller_api): """ Subscribe the caller to the specified topic. In addition to receiving @@ -708,7 +708,7 @@ def registerSubscriber(self, caller_id, topic, topic_type, caller_api): @apivalidate(0, (is_topic('topic'), is_api('caller_api'))) def unregisterSubscriber(self, caller_id, topic, caller_api): """ - Unregister the caller as a publisher of the topic. + Unregister the caller as a subscriber of the topic. @param caller_id: ROS caller id @type caller_id: str @param topic: Fully-qualified name of topic to unregister. @@ -729,7 +729,7 @@ def unregisterSubscriber(self, caller_id, topic, caller_api): finally: self.ps_lock.release() - @apivalidate(0, ( is_topic('topic'), valid_type_name('topic_type'), is_api('caller_api'))) + @apivalidate([], ( is_topic('topic'), valid_type_name('topic_type'), is_api('caller_api'))) def registerPublisher(self, caller_id, topic, topic_type, caller_api): """ Register the caller as a publisher the topic. diff --git a/tools/rosmaster/src/rosmaster/paramserver.py b/tools/rosmaster/src/rosmaster/paramserver.py --- a/tools/rosmaster/src/rosmaster/paramserver.py +++ b/tools/rosmaster/src/rosmaster/paramserver.py @@ -166,7 +166,7 @@ def get_param(self, key): finally: self.lock.release() - def set_param(self, key, value, notify_task=None): + def set_param(self, key, value, notify_task=None, caller_id=None): """ Set the parameter in the parameter dictionary. @@ -178,6 +178,8 @@ def set_param(self, key, value, notify_task=None): [(subscribers, param_key, param_value)*]. The empty dictionary represents an unset parameter. @type notify_task: fn(updates) + @param caller_id: the caller id + @type caller_id: str """ try: self.lock.acquire() @@ -208,7 +210,7 @@ def set_param(self, key, value, notify_task=None): # ParamDictionary needs to queue updates so that the updates are thread-safe if notify_task: - updates = compute_param_updates(self.reg_manager.param_subscribers, key, value) + updates = compute_param_updates(self.reg_manager.param_subscribers, key, value, caller_id) if updates: notify_task(updates) finally: @@ -332,7 +334,7 @@ def _compute_all_keys(param_key, param_value, all_keys=None): _compute_all_keys(new_k, v, all_keys) return all_keys -def compute_param_updates(subscribers, param_key, param_value): +def compute_param_updates(subscribers, param_key, param_value, caller_id_to_ignore=None): """ Compute subscribers that should be notified based on the parameter update @param subscribers: parameter subscribers @@ -341,6 +343,8 @@ def compute_param_updates(subscribers, param_key, param_value): @type param_key: str @param param_value: parameter value @type param_value: str + @param caller_id_to_ignore: the caller to ignore + @type caller_id_to_ignore: str """ # logic correct for both updates and deletions @@ -368,6 +372,11 @@ def compute_param_updates(subscribers, param_key, param_value): ns_key = sub_key + SEP if param_key.startswith(ns_key): node_apis = subscribers[sub_key] + if caller_id_to_ignore is not None: + node_apis = [ + (caller_id, caller_api) + for (caller_id, caller_api) in node_apis + if caller_id != caller_id_to_ignore] updates.append((node_apis, param_key, param_value)) elif all_keys is not None and ns_key.startswith(param_key) \ and not sub_key in all_keys: diff --git a/tools/rosmaster/src/rosmaster/util.py b/tools/rosmaster/src/rosmaster/util.py --- a/tools/rosmaster/src/rosmaster/util.py +++ b/tools/rosmaster/src/rosmaster/util.py @@ -51,8 +51,9 @@ import errno import socket +import threading -_proxies = {} #cache ServerProxys +_proxies = threading.local() #cache ServerProxys def xmlrpcapi(uri): """ @return: instance for calling remote server or None if not a valid URI @@ -63,16 +64,16 @@ def xmlrpcapi(uri): uriValidate = urlparse(uri) if not uriValidate[0] or not uriValidate[1]: return None - if not uri in _proxies: - _proxies[uri] = ServerProxy(uri) + if not uri in _proxies.__dict__: + _proxies.__dict__[uri] = ServerProxy(uri) close_half_closed_sockets() - return _proxies[uri] + return _proxies.__dict__[uri] def close_half_closed_sockets(): if not hasattr(socket, 'TCP_INFO'): return - for proxy in _proxies.values(): + for proxy in _proxies.__dict__.values(): transport = proxy("transport") if transport._connection and transport._connection[1] is not None and transport._connection[1].sock is not None: try: @@ -86,5 +87,5 @@ def close_half_closed_sockets(): def remove_server_proxy(uri): - if uri in _proxies: - del _proxies[uri] + if uri in _proxies.__dict__: + del _proxies.__dict__[uri] diff --git a/tools/rosnode/src/rosnode/__init__.py b/tools/rosnode/src/rosnode/__init__.py --- a/tools/rosnode/src/rosnode/__init__.py +++ b/tools/rosnode/src/rosnode/__init__.py @@ -333,6 +333,9 @@ def rosnode_ping(node_name, max_count=None, verbose=False): if verbose: print("xmlrpc reply from %s\ttime=%fms"%(node_api, dur)) # 1s between pings + except socket.timeout: + print("connection to [%s] timed out"%node_name, file=sys.stderr) + return False except socket.error as e: # 3786: catch ValueError on unpack as socket.error is not always a tuple try: @@ -355,7 +358,7 @@ def rosnode_ping(node_name, max_count=None, verbose=False): continue print("ERROR: connection refused to [%s]"%(node_api), file=sys.stderr) else: - print("connection to [%s] timed out"%node_name, file=sys.stderr) + print("connection to [%s] failed"%node_name, file=sys.stderr) return False except ValueError: print("unknown network error contacting node: %s"%(str(e))) diff --git a/tools/rosservice/src/rosservice/__init__.py b/tools/rosservice/src/rosservice/__init__.py --- a/tools/rosservice/src/rosservice/__init__.py +++ b/tools/rosservice/src/rosservice/__init__.py @@ -322,7 +322,10 @@ def rosservice_find(service_type): try: _, _, services = master.getSystemState() for s, l in services: - t = get_service_type(s) + try: + t = get_service_type(s) + except ROSServiceIOException: + continue if t == service_type: matches.append(s) except socket.error: diff --git a/tools/rostopic/src/rostopic/__init__.py b/tools/rostopic/src/rostopic/__init__.py --- a/tools/rostopic/src/rostopic/__init__.py +++ b/tools/rostopic/src/rostopic/__init__.py @@ -1172,11 +1172,11 @@ def build_map(master, state, uricache): if not puri.hostname in tmap: tmap[puri.hostname] = [] # recreate the system state data structure, but for a single host - matches = [l for x, l in tmap[puri.hostname] if x == topic] + matches = [l for x, _, l in tmap[puri.hostname] if x == topic] if matches: matches[0].append(p) else: - tmap[puri.hostname].append((topic, [p])) + tmap[puri.hostname].append((topic, ttype, [p])) return tmap uricache = {} diff --git a/utilities/roswtf/src/roswtf/plugins.py b/utilities/roswtf/src/roswtf/plugins.py --- a/utilities/roswtf/src/roswtf/plugins.py +++ b/utilities/roswtf/src/roswtf/plugins.py @@ -89,7 +89,7 @@ def load_plugins(): else: print("Loaded plugin", p_module) - except Exception: - print("Unable to load plugin [%s] from package [%s]"%(p_module, pkg), file=sys.stderr) + except Exception as e: + print("Unable to load plugin [%s] from package [%s]. Exception thrown: [%s]"%(p_module, pkg, str(e)), file=sys.stderr) return static_plugins, online_plugins diff --git a/utilities/roswtf/src/roswtf/roslaunchwtf.py b/utilities/roswtf/src/roswtf/roslaunchwtf.py --- a/utilities/roswtf/src/roswtf/roslaunchwtf.py +++ b/utilities/roswtf/src/roswtf/roslaunchwtf.py @@ -293,6 +293,8 @@ def _load_online_ctx(ctx): def wtf_check_online(ctx): _load_online_ctx(ctx) + if not ctx.roslaunch_uris: + return for r in online_roslaunch_warnings: warning_rule(r, r[0](ctx), ctx) for r in online_roslaunch_errors:
diff --git a/test/test_roscpp/test/src/params.cpp b/test/test_roscpp/test/src/params.cpp --- a/test/test_roscpp/test/src/params.cpp +++ b/test/test_roscpp/test/src/params.cpp @@ -566,12 +566,23 @@ TEST(Params, getParamNames) { EXPECT_LT(10, test_params.size()); } +TEST(Params, getParamCachedSetParamLoop) { + NodeHandle nh; + const std::string name = "changeable_int"; + for (int i = 0; i < 100; i++) { + nh.setParam(name, i); + int v = 0; + ASSERT_TRUE(nh.getParamCached(name, v)); + ASSERT_EQ(i, v); + } +} + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); - ros::init( argc, argv, "params" ); -// ros::NodeHandle nh; + ros::init(argc, argv, "params"); + ros::NodeHandle nh; return RUN_ALL_TESTS(); } diff --git a/test/test_rospy/test/rostest/sub_to_multiple_pubs.test b/test/test_rospy/test/rostest/sub_to_multiple_pubs.test --- a/test/test_rospy/test/rostest/sub_to_multiple_pubs.test +++ b/test/test_rospy/test/rostest/sub_to_multiple_pubs.test @@ -1,4 +1,5 @@ <launch> + <node name="listener" pkg="test_rospy" type="listener.py" /> <node name="talker1" pkg="test_rospy" type="talker.py" /> <node name="talker2" pkg="test_rospy" type="talker.py" /> <node name="talker3" pkg="test_rospy" type="talker.py" /> @@ -98,6 +99,5 @@ <node name="talker97" pkg="test_rospy" type="talker.py" /> <node name="talker98" pkg="test_rospy" type="talker.py" /> <node name="talker99" pkg="test_rospy" type="talker.py" /> - <node name="listener" pkg="test_rospy" type="listener.py" /> <test test-name="sub_to_multiple_pubs" pkg="test_rospy" type="test_sub_to_multiple_pubs.py" /> </launch> diff --git a/tools/roslaunch/test/xml/test-arg-valid-include.xml b/tools/roslaunch/test/xml/test-arg-valid-include.xml --- a/tools/roslaunch/test/xml/test-arg-valid-include.xml +++ b/tools/roslaunch/test/xml/test-arg-valid-include.xml @@ -1,4 +1,5 @@ <launch> + <arg name="another_parameter_not_used" value="dummy"/> <arg name="grounded" value="not_set"/> <include file="$(find roslaunch)/test/xml/test-arg-invalid-included.xml" pass_all_args="true"/> </launch>
ros::Timer::isValid() is not a const operation I believe `ros::Timer::isValid()` could be made `const`. It seems to only rely on `ros::Duration::isZero()` which is a `const` operation. Or am I missing something here? Source: https://github.com/ros/ros_comm/blob/melodic-devel/clients/roscpp/include/ros/timer.h#L75 https://github.com/ros/ros_comm/blob/melodic-devel/clients/roscpp/src/libros/timer.cpp#L52 This seems a deadcode https://github.com/ros/ros_comm/blob/7b69fc9e439c8e5b28a4f6a8e1c451ca7c96a8e8/tools/rosbag/src/rosbag/migration.py#L841-L842 due to https://github.com/ros/ros_comm/blob/7b69fc9e439c8e5b28a4f6a8e1c451ca7c96a8e8/tools/rosbag/src/rosbag/migration.py#L830-L840 roslaunch: pass_all_args exception if there are extra args defined I just tried using this attribute on a fairly complex launch file that has multiple `<arg>`s declared, and it leads to an exception because not all of the `<arg>`s are defined in the `<include>`d file. For example, in the `drcsim_gazebo` package, [atlas.launch](https://bitbucket.org/osrf/drcsim/src/194be8500fef81593f79607a21ee2badd9700a0e/drcsim_gazebo/launch/atlas.launch?at=default&fileviewer=file-view-default#atlas.launch-14) includes [atlas_no_controllers.launch](https://bitbucket.org/osrf/drcsim/src/194be8500fef81593f79607a21ee2badd9700a0e/drcsim_gazebo/launch/atlas_no_controllers.launch?at=default&fileviewer=file-view-default) and passes three arguments. I removed some of the duplicate argument definitions in [this commit](https://bitbucket.org/osrf/drcsim/commits/d5c93d7db649ca4df6c07d6fcba6f9e77953913a), but roslaunch gives an exception since there are extra `<arg>`s in `atlas.launch` that aren't part of `atlas_no_controllers.launch` (for example, [inertia_args](https://bitbucket.org/osrf/drcsim/src/194be8500fef81593f79607a21ee2badd9700a0e/drcsim_gazebo/launch/atlas.launch?at=default&fileviewer=file-view-default#atlas.launch-8)). I'm guessing this will be close as a "won't fix", but I wanted to mention it since I was excited when @gerkey added this feature in #710, but now there's quite a few instances when I won't be able to use it. Little error in function description Should probably be "Unregister the caller as a subscriber of the topic" https://github.com/ros/ros_comm/blob/fb6a6bd2839206ab99994a0d0beb89a6ffab1223/tools/rosmaster/src/rosmaster/master_api.py#L711 gdb debugging generates error messages "[ERROR] [<TIME_STAMP>]: poll failed with error Interrupted system call" summary: -------- In the current version of ros_comm, when debugging with gdb using either single-steps or breakpoints, the error message "[ERROR] [<TIME_STAMP>]: poll failed with error Interrupted system call" is generated repeatedly (by the ros macro ROS_ERROR_STREAM). The error messages were not generated in older ros_comm versions. Only since end of february / beginning of march 2018 this problem seem to have occured. severity of problem: -------------------- A lot of error messages are generated during debugging. Important error messages are therefore harder to see and may be overseen. setup: ------ Ubuntu 16.04.4 LTS ros-kinetic ros-kinetic-roscpp_1.12.13-0xenial-20180222-205956-0800_amd64 gdb 7.11.1 gdbserver 7.11.1 details to reproduce problem: ----------------------------- Debugging was performed with two simple publisher/subscriber nodes similar to talker/listener nodes of roscpp_tutorials. Eclipse was used as main IDE. See reference [1] how to setup eclipse project from catkin workspace. (Note: problem can also be reproduced with command line gdb without eclipse). The behaviour of error message generation dependends on different debugging setups. The following list provides an overview of debugging setups (gdb with eclipse) and the occurance of error messages. Local C/C++ Application: - all-stop-mode -- error messages on single-step - non-stop-mode -- no error messages on single-step nor on breakpoints -- error messages, when thread #2 is suspended during poll and then resumed Remote C/C++ Application on localhost (with gdbserver): - non-stop-mode -- error messages on single-steps and on breakpoints Remote C/C++ Application on remote machine (with gdbserver): - non-stop-mode --- error messages on single-steps and on breakpoints hints: ------ Building ros_comm package in debug mode, the source of the error messages was locatable. See reference [2] how to build ros from source. The library libroscpp.so generates the error messages. The problem is located in the following files, lines and functions (as of writing 2018-04-20 16:00): - https://github.com/ros/ros_comm/blob/kinetic-devel/clients/roscpp/src/libros/io.cpp -- line: --- ~165 --- 304 - 316 -- function: --- pollfd_vector_ptr poll_sockets(int epfd, socket_pollfd *fds, nfds_t nfds, int timeout) - https://github.com/ros/ros_comm/blob/kinetic-devel/clients/roscpp/src/libros/poll_set.cpp -- line: --- ~184 -- function: --- void PollSet::update(int poll_timeout) In the simple publisher/subscriber test applications, the above mentioned functions were executed in thread number 2. During debugging the main thread, in io.cpp, function poll_sockets, the system call epoll_wait is interrupted. - io.cpp line 306: int fd_cnt = ::epoll_wait(epfd, ev, nfds, timeout); A comment indicates, that an interrupted system call is not an error. - io.cpp line 310: // EINTR means that we got interrupted by a signal, and is not an error The gdb online documentation also states, that interrupted system calls are not errors, see reference [3]. The ROS_ERROR macro is NOT called in io.cpp. But in poll_set.cpp, function PollSet::update (the caller of io.cpp, function poll_sockets), the return value of poll_sockets in case if interrupted system call is still handled as an error and the macro ROS_ERROR_STREAM is called. This leads to many error messages with "poll failed with error Interrupted system call" during debugging. The issue may be fixable by wrapping the system call epoll_wait into a while loop, see reference [4][5]. references: ----------- references for setup: [1] http://wiki.ros.org/IDEs#Eclipse [2] http://wiki.ros.org/Installation/Source reference about interrupted system calls can be found in gdbs online documentation: [3] https://sourceware.org/gdb/current/onlinedocs/gdb/Interrupted-System-Calls.html#Interrupted-System-Calls similar issue of interrupted system calls and approach to solve: [4] https://stackoverflow.com/questions/6813629/how-do-i-use-gdb-for-multi-threaded-networking-program [5] https://stackoverflow.com/a/6815333
Oh, that's interesting. I hadn't considered that case. Can you tell me which exception is thrown, to help me track down the code path that leads to it and have a look at what can be done? ``` [roslaunch][INFO] 2016-09-06 17:25:55,495: Checking log directory for disk usage. This may take awhile. Press Ctrl-C to interrupt [roslaunch][INFO] 2016-09-06 17:25:55,518: Done checking log file disk usage. Usage is <1GB. [roslaunch][INFO] 2016-09-06 17:25:55,518: roslaunch starting with args ['/opt/ros/indigo/bin/roslaunch', 'drcsim_gazebo', 'atlas.launch', 'gzname:=gzserver'] [roslaunch][INFO] 2016-09-06 17:25:55,519: roslaunch env is ... [roslaunch][INFO] 2016-09-06 17:25:55,519: starting in server mode [roslaunch.parent][INFO] 2016-09-06 17:25:55,519: starting roslaunch parent run [roslaunch][INFO] 2016-09-06 17:25:55,519: loading roscore config file /opt/ros/indigo/etc/ros/roscore.xml [roslaunch][INFO] 2016-09-06 17:25:55,607: Added core node of type [rosout/rosout] in namespace [/] [roslaunch.config][INFO] 2016-09-06 17:25:55,608: loading config file /data_fast/scpeters/ws/drcsim/install/share/drcsim_gazebo/launch/atlas.launch [roslaunch][INFO] 2016-09-06 17:25:55,638: Added node of type [drcsim_gazebo/run_gzserver] in namespace [/] [roslaunch][ERROR] 2016-09-06 17:25:55,638: unused args [hand_suffix, inertia_args, model_args] for include of [/data_fast/scpeters/ws/drcsim/install/share/drcsim_gazebo/launch/atlas_no_controllers.launch] [roslaunch][ERROR] 2016-09-06 17:25:55,638: The traceback for the exception was written to the log file [roslaunch][ERROR] 2016-09-06 17:25:55,639: Traceback (most recent call last): File "/opt/ros/indigo/lib/python2.7/dist-packages/roslaunch/__init__.py", line 307, in main p.start() File "/opt/ros/indigo/lib/python2.7/dist-packages/roslaunch/parent.py", line 268, in start self._start_infrastructure() File "/opt/ros/indigo/lib/python2.7/dist-packages/roslaunch/parent.py", line 217, in _start_infrastructure self._load_config() File "/opt/ros/indigo/lib/python2.7/dist-packages/roslaunch/parent.py", line 132, in _load_config roslaunch_strs=self.roslaunch_strs, verbose=self.verbose) File "/opt/ros/indigo/lib/python2.7/dist-packages/roslaunch/config.py", line 455, in load_config_default raise RLException(e) RLException: unused args [hand_suffix, inertia_args, model_args] for include of [/data_fast/scpeters/ws/drcsim/install/share/drcsim_gazebo/launch/atlas_no_controllers.launch] [rospy.core][INFO] 2016-09-06 17:25:55,639: signal_shutdown [atexit] ``` I'm also having this problem, and with nested launch files, it makes the `pass_all_args` unfortunately useless, where it should be able to greatly reduce the amount of repeated text all over the place. Thanks for the bump. I'll look into this. Any new on this? I actually never understood why it throws an exception just because some arguments are not used. This workaround excludes the check when `pass_all_args` is set: ``` if not pass_all_args: # check for unused args loader.post_process_include_args(child_ns) ``` Instead of the following: https://github.com/ros/ros_comm/blob/2c8a3f65628d0362d97c29d333d5669b2c7803a6/tools/roslaunch/src/roslaunch/xmlloader.py#L630-L631 I've not created any pull request yet since I don't know if this behavior is what you want. @scpeters @gerkey, if you guys give me some feedback on the intended behavior I can make a PR. great point
2020-08-03T22:34:16Z
[]
[]
ros/ros_comm
2,038
ros__ros_comm-2038
[ "2006" ]
84d413fa5689891d871680c18f217a6a16d12731
diff --git a/tools/rosbag/src/rosbag/rosbag_main.py b/tools/rosbag/src/rosbag/rosbag_main.py --- a/tools/rosbag/src/rosbag/rosbag_main.py +++ b/tools/rosbag/src/rosbag/rosbag_main.py @@ -72,6 +72,12 @@ def _stop_process(signum, frame, old_handler, process): old_handler(signum, frame) +def _send_process_sigint(signum, frame, old_handler, process): + process.send_signal(signal.SIGINT) + if old_handler: + old_handler(signum, frame) + + def record_cmd(argv): parser = optparse.OptionParser(usage="rosbag record TOPIC1 [TOPIC2 TOPIC3 ...]", description="Record a bag file with the contents of specified topics.", @@ -143,6 +149,12 @@ def record_cmd(argv): signal.SIGTERM, lambda signum, frame: _stop_process(signum, frame, old_handler, process) ) + + old_handler = signal.signal( + signal.SIGINT, + lambda signum, frame: _send_process_sigint(signum, frame, old_handler, process) + ) + # Better way of handling it than os.execv # This makes sure stdin handles are passed to the process. process = subprocess.Popen(cmd)
diff --git a/test/test_rosbag/CMakeLists.txt b/test/test_rosbag/CMakeLists.txt --- a/test/test_rosbag/CMakeLists.txt +++ b/test/test_rosbag/CMakeLists.txt @@ -50,6 +50,8 @@ if(CATKIN_ENABLE_TESTING) add_rostest(${PROJECT_BINARY_DIR}/test/latched_sub.test) add_rostest(test/record_two_publishers.test) add_rostest(test/record_one_publisher_two_topics.test) + add_rostest(test/record_sigint_cleanup.test) + add_rostest(test/record_sigterm_cleanup.test) include_directories(${GTEST_INCLUDE_DIRS}) add_executable(double_pub EXCLUDE_FROM_ALL test/double_pub.cpp) diff --git a/test/test_rosbag/test/record_sigint_cleanup.py b/test/test_rosbag/test/record_sigint_cleanup.py new file mode 100755 --- /dev/null +++ b/test/test_rosbag/test/record_sigint_cleanup.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# Software License Agreement (BSD License) +# +# Copyright (c) 2020 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Willow Garage, Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import roslib +roslib.load_manifest('rosbag') + +import os +import unittest +import rostest +import sys +import signal +from record_signal_cleanup_helper import test_signal_cleanup + +TEST_BAG_FILE_NAME = '/tmp/record_sigint_cleanup_test.bag' + +class RecordSigintCleanup(unittest.TestCase): + + def test_sigint_cleanup(self): + """ + Test that rosbag cleans up after handling SIGINT + """ + test_signal_cleanup(TEST_BAG_FILE_NAME, signal.SIGINT) + + # check that the recorded file is no longer active + self.assertTrue(os.path.isfile(TEST_BAG_FILE_NAME)) + self.assertFalse(os.path.isfile(TEST_BAG_FILE_NAME+ '.active')) + + +if __name__ == '__main__': + rostest.unitrun('test_rosbag', 'test_sigint_cleanup', RecordSigintCleanup, sys.argv) diff --git a/test/test_rosbag/test/record_sigint_cleanup.test b/test/test_rosbag/test/record_sigint_cleanup.test new file mode 100644 --- /dev/null +++ b/test/test_rosbag/test/record_sigint_cleanup.test @@ -0,0 +1,5 @@ +<launch> + <node pkg="rostopic" type="rostopic" name="rostopic_pub1" + args="pub -r 10 chatter std_msgs/String chatter1"/> + <test test-name="test_sigint_cleanup" pkg="test_rosbag" type="record_sigint_cleanup.py"/> +</launch> diff --git a/test/test_rosbag/test/record_signal_cleanup_helper.py b/test/test_rosbag/test/record_signal_cleanup_helper.py new file mode 100644 --- /dev/null +++ b/test/test_rosbag/test/record_signal_cleanup_helper.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python +# Software License Agreement (BSD License) +# +# Copyright (c) 2020 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Willow Garage, Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import time +import subprocess + +RECORD_COMMAND = ['rosbag', + 'record', + 'chatter', + '-O', + '--duration=5'] +SLEEP_TIME_SEC = 10 + +def test_signal_cleanup(test_bag_file_name, test_signal): + """ + Run rosbag record and send a signal to it after some time. + + :param test_bag_file_name: bag name for recorded output + :param test_signal: signal to send to rosbag + """ + test_command = list(RECORD_COMMAND) + test_command.insert(4, test_bag_file_name) + + p = subprocess.Popen(test_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + # wait while the recorder creates a bag for us to examine + time.sleep(SLEEP_TIME_SEC) + p.send_signal(test_signal) + p.wait() diff --git a/test/test_rosbag/test/record_sigterm_cleanup.py b/test/test_rosbag/test/record_sigterm_cleanup.py new file mode 100755 --- /dev/null +++ b/test/test_rosbag/test/record_sigterm_cleanup.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# Software License Agreement (BSD License) +# +# Copyright (c) 2020 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Willow Garage, Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import roslib +roslib.load_manifest('rosbag') + +import os +import unittest +import rostest +import sys +import signal +from record_signal_cleanup_helper import test_signal_cleanup + +TEST_BAG_FILE_NAME = '/tmp/record_sigterm_cleanup_test.bag' + +class RecordSigtermCleanup(unittest.TestCase): + + def test_sigterm_cleanup(self): + """ + Test that rosbag cleans up after handling SIGTERM + """ + test_signal_cleanup(TEST_BAG_FILE_NAME, signal.SIGTERM) + + # check that the recorded file is no longer active + self.assertTrue(os.path.isfile(TEST_BAG_FILE_NAME)) + self.assertFalse(os.path.isfile(TEST_BAG_FILE_NAME+ '.active')) + + +if __name__ == '__main__': + rostest.unitrun('test_rosbag', 'test_sigterm_cleanup', RecordSigtermCleanup, sys.argv) diff --git a/test/test_rosbag/test/record_sigterm_cleanup.test b/test/test_rosbag/test/record_sigterm_cleanup.test new file mode 100644 --- /dev/null +++ b/test/test_rosbag/test/record_sigterm_cleanup.test @@ -0,0 +1,5 @@ +<launch> + <node pkg="rostopic" type="rostopic" name="rostopic_pub1" + args="pub -r 10 chatter std_msgs/String chatter1"/> + <test test-name="test_sigterm_cleanup" pkg="test_rosbag" type="record_sigterm_cleanup.py"/> +</launch>
rosbag leaves an ".active" file dangling when receiving SIGTERM Currently, rosbag leaves a dangling ".active" file when stopped with SIGTERM. 1. Can we update `rosbag` so that the behavior of SIGTERM and SIGINT is the same? 2. Would it be acceptable to backport this behavior to Kinetic, and Melodic?
Extra note: SIGINT on the main process has the same behavior. SIGINT on the process group works as expected Please consider to contribute a pull request for this. > Would it be acceptable to backport this behavior to Kinetic, and Melodic? That depends on the patch necessary. The intended behavior change (to clean up a dangling file) sounds reasonable.
2020-09-04T21:20:47Z
[]
[]
ros/ros_comm
2,152
ros__ros_comm-2152
[ "2128" ]
f4ab23fe6d9e64fcc2d08cbd88ba368f5188b131
diff --git a/clients/rospy/src/rospy/core.py b/clients/rospy/src/rospy/core.py --- a/clients/rospy/src/rospy/core.py +++ b/clients/rospy/src/rospy/core.py @@ -509,7 +509,7 @@ def _add_shutdown_thread(t): # last thread may not get reaped until shutdown, but this is # relatively minor for other in _shutdown_threads[:]: - if not other.isAlive(): + if not other.is_alive(): _shutdown_threads.remove(other) _shutdown_threads.append(t) @@ -595,7 +595,7 @@ def signal_shutdown(reason): threads = _shutdown_threads[:] for t in threads: - if t.isAlive(): + if t.is_alive(): t.join(_TIMEOUT_SHUTDOWN_JOIN) del _shutdown_threads[:] try: diff --git a/clients/rospy/src/rospy/impl/tcpros_base.py b/clients/rospy/src/rospy/impl/tcpros_base.py --- a/clients/rospy/src/rospy/impl/tcpros_base.py +++ b/clients/rospy/src/rospy/impl/tcpros_base.py @@ -817,8 +817,8 @@ def receive_loop(self, msgs_callback): except DeserializationError as e: #TODO: how should we handle reconnect in this case? - logerr("[%s] error deserializing incoming request: %s"%self.name, str(e)) - rospyerr("[%s] error deserializing incoming request: %s"%self.name, traceback.format_exc()) + logerr("[%s] error deserializing incoming request: %s"%(self.name, str(e))) + rospyerr("[%s] error deserializing incoming request: %s"%(self.name, traceback.format_exc())) except: # in many cases this will be a normal hangup, but log internally try: @@ -847,4 +847,3 @@ def close(self): finally: self.socket = self.read_buff = self.write_buff = self.protocol = None super(TCPROSTransport, self).close() - diff --git a/tools/rosbag/src/rosbag/rosbag_main.py b/tools/rosbag/src/rosbag/rosbag_main.py --- a/tools/rosbag/src/rosbag/rosbag_main.py +++ b/tools/rosbag/src/rosbag/rosbag_main.py @@ -306,6 +306,12 @@ def play_cmd(argv): signal.SIGTERM, lambda signum, frame: _stop_process(signum, frame, old_handler, process) ) + + old_handler = signal.signal( + signal.SIGINT, + lambda signum, frame: _send_process_sigint(signum, frame, old_handler, process) + ) + # Better way of handling it than os.execv # This makes sure stdin handles are passed to the process. process = subprocess.Popen(cmd) diff --git a/tools/rosgraph/src/rosgraph/xmlrpc.py b/tools/rosgraph/src/rosgraph/xmlrpc.py --- a/tools/rosgraph/src/rosgraph/xmlrpc.py +++ b/tools/rosgraph/src/rosgraph/xmlrpc.py @@ -44,6 +44,7 @@ import errno import logging +import platform import select import socket @@ -76,9 +77,32 @@ def isstring(s): except NameError: return isinstance(s, str) -class SilenceableXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): - protocol_version = 'HTTP/1.1' +def _support_http_1_1(): + """ + Determine whether HTTP 1.1 should be enabled for XMLRPC communications. + + This will be true on non-Linux systems, and on Linux kernels at least as + new as 4.16. Linux kernels 4.15 and older cause significant performance + degradation in the roscore when using HTTP 1.1 + """ + if platform.system() != 'Linux': + return True + minimum_supported_major, minimum_supported_minor = (4, 16) + release = platform.release().split('.') + platform_major = int(release[0]) + platform_minor = int(release[1]) + if platform_major < minimum_supported_major: + return False + if (platform_major == minimum_supported_major and + platform_minor < minimum_supported_minor): + return False + return True + + +class SilenceableXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): + if _support_http_1_1(): + protocol_version = 'HTTP/1.1' def log_message(self, format, *args): if 0: @@ -90,7 +114,8 @@ class ThreadingXMLRPCServer(socketserver.ThreadingMixIn, SimpleXMLRPCServer): requests via threading. Also makes logging toggleable. """ - daemon_threads = True + if _support_http_1_1(): + daemon_threads = True def __init__(self, addr, log_requests=1): """ diff --git a/tools/roslaunch/src/roslaunch/pmon.py b/tools/roslaunch/src/roslaunch/pmon.py --- a/tools/roslaunch/src/roslaunch/pmon.py +++ b/tools/roslaunch/src/roslaunch/pmon.py @@ -113,7 +113,7 @@ def shutdown_process_monitor(process_monitor): process_monitor.shutdown() #logger.debug("shutdown_process_monitor: joining ProcessMonitor") process_monitor.join(20.0) - if process_monitor.isAlive(): + if process_monitor.is_alive(): logger.error("shutdown_process_monitor: ProcessMonitor shutdown failed!") return False else: diff --git a/utilities/roswtf/src/roswtf/graph.py b/utilities/roswtf/src/roswtf/graph.py --- a/utilities/roswtf/src/roswtf/graph.py +++ b/utilities/roswtf/src/roswtf/graph.py @@ -166,7 +166,7 @@ def unconnected_subscriptions(ctx): ] graph_errors = [ - (simtime_check, "/use_simtime is set but no publisher of /clock is present"), + (simtime_check, "/use_sim_time is set but no publisher of /clock is present"), (ping_check, "Could not contact the following nodes:"), (missing_edges, "The following nodes should be connected but aren't:"), (probe_all_services, "Errors connecting to the following services:"),
diff --git a/tools/roslaunch/test/unit/test_roslaunch_pmon.py b/tools/roslaunch/test/unit/test_roslaunch_pmon.py --- a/tools/roslaunch/test/unit/test_roslaunch_pmon.py +++ b/tools/roslaunch/test/unit/test_roslaunch_pmon.py @@ -54,9 +54,14 @@ def __init__(self): self.alive = False self.is_shutdown = False + # The preferred method is 'is_alive' + # Keep this method around for backwards compatibility def isAlive(self): return self.alive + def is_alive(self): + return self.alive + def join(self, *args): return @@ -302,9 +307,9 @@ def failer(): # Test with a real process monitor pmon = roslaunch.pmon.start_process_monitor() - self.assert_(pmon.isAlive()) + self.assert_(pmon.is_alive()) self.assert_(roslaunch.pmon.shutdown_process_monitor(pmon)) - self.failIf(pmon.isAlive()) + self.failIf(pmon.is_alive()) # fiddle around with some state that would shouldn't be roslaunch.pmon._shutting_down = True @@ -321,13 +326,13 @@ def test_pmon_shutdown(self): # pmon_shutdown pmon1 = roslaunch.pmon.start_process_monitor() pmon2 = roslaunch.pmon.start_process_monitor() - self.assert_(pmon1.isAlive()) - self.assert_(pmon2.isAlive()) + self.assert_(pmon1.is_alive()) + self.assert_(pmon2.is_alive()) roslaunch.pmon.pmon_shutdown() - self.failIf(pmon1.isAlive()) - self.failIf(pmon2.isAlive()) + self.failIf(pmon1.is_alive()) + self.failIf(pmon2.is_alive()) def test_add_process_listener(self): # coverage test, not a functionality test as that would be much more difficult to simulate diff --git a/tools/rosmsg/test/DiagnosticStatus_raw.txt b/tools/rosmsg/test/DiagnosticStatus_raw.txt new file mode 100644 --- /dev/null +++ b/tools/rosmsg/test/DiagnosticStatus_raw.txt @@ -0,0 +1,11 @@ +byte OK=0 +byte WARN=1 +byte ERROR=2 +byte STALE=3 +byte level +string name +string message +string hardware_id +diagnostic_msgs/KeyValue[] values + string key + string value diff --git a/tools/rosmsg/test/RosmsgC_raw.txt b/tools/rosmsg/test/RosmsgC_raw.txt deleted file mode 100644 --- a/tools/rosmsg/test/RosmsgC_raw.txt +++ /dev/null @@ -1,4 +0,0 @@ -test_rosmaster/String s1 - string data -test_rosmaster/String s2 - string data diff --git a/tools/rosmsg/test/msg/DiagnosticStatus.msg b/tools/rosmsg/test/msg/DiagnosticStatus.msg new file mode 100644 --- /dev/null +++ b/tools/rosmsg/test/msg/DiagnosticStatus.msg @@ -0,0 +1,9 @@ +byte OK=0 +byte WARN=1 +byte ERROR=2 +byte STALE=3 +byte level +string name +string message +string hardware_id +diagnostic_msgs/KeyValue[] values diff --git a/tools/rosmsg/test/msg/KeyValue.msg b/tools/rosmsg/test/msg/KeyValue.msg new file mode 100644 --- /dev/null +++ b/tools/rosmsg/test/msg/KeyValue.msg @@ -0,0 +1,2 @@ +string key +string value diff --git a/tools/rosmsg/test/msg/RosmsgA.msg b/tools/rosmsg/test/msg/RosmsgA.msg deleted file mode 100644 --- a/tools/rosmsg/test/msg/RosmsgA.msg +++ /dev/null @@ -1 +0,0 @@ -int32 a diff --git a/tools/rosmsg/test/msg/RosmsgB.msg b/tools/rosmsg/test/msg/RosmsgB.msg deleted file mode 100644 --- a/tools/rosmsg/test/msg/RosmsgB.msg +++ /dev/null @@ -1 +0,0 @@ -test_rosmaster/Empty empty diff --git a/tools/rosmsg/test/srv/AddDiagnostics.srv b/tools/rosmsg/test/srv/AddDiagnostics.srv new file mode 100644 --- /dev/null +++ b/tools/rosmsg/test/srv/AddDiagnostics.srv @@ -0,0 +1,4 @@ +string load_namespace +--- +bool success +string message diff --git a/tools/rosmsg/test/srv/RossrvA.srv b/tools/rosmsg/test/srv/RossrvA.srv deleted file mode 100644 --- a/tools/rosmsg/test/srv/RossrvA.srv +++ /dev/null @@ -1,3 +0,0 @@ -int32 areq ---- -int32 aresp diff --git a/tools/rosmsg/test/srv/RossrvB.srv b/tools/rosmsg/test/srv/RossrvB.srv deleted file mode 100644 --- a/tools/rosmsg/test/srv/RossrvB.srv +++ /dev/null @@ -1,3 +0,0 @@ -test_rosmaster/Empty empty ---- -test_rosmaster/Empty empty diff --git a/tools/rosmsg/test/srv/SelfTest.srv b/tools/rosmsg/test/srv/SelfTest.srv new file mode 100644 --- /dev/null +++ b/tools/rosmsg/test/srv/SelfTest.srv @@ -0,0 +1,15 @@ +--- +string id +byte passed +diagnostic_msgs/DiagnosticStatus[] status + byte OK=0 + byte WARN=1 + byte ERROR=2 + byte STALE=3 + byte level + string name + string message + string hardware_id + diagnostic_msgs/KeyValue[] values + string key + string value diff --git a/tools/rosmsg/test/test_rosmsg.py b/tools/rosmsg/test/test_rosmsg.py --- a/tools/rosmsg/test/test_rosmsg.py +++ b/tools/rosmsg/test/test_rosmsg.py @@ -69,21 +69,21 @@ def test_get_msg_text(self): d = get_test_path() msg_d = os.path.join(d, 'msg') - test_message_package = 'test_rosmaster' + test_message_package = 'diagnostic_msgs' rospack = rospkg.RosPack() msg_raw_d = os.path.join(rospack.get_path(test_message_package), 'msg') - for t in ['RosmsgA', 'RosmsgB']: - with open(os.path.join(msg_d, '%s.msg'%t), 'r') as f: - text = f.read() - with open(os.path.join(msg_raw_d, '%s.msg'%t), 'r') as f: - text_raw = f.read() - - type_ = test_message_package+'/'+t - self.assertEquals(text, rosmsg.get_msg_text(type_, raw=False)) - self.assertEquals(text_raw, rosmsg.get_msg_text(type_, raw=True)) + t = 'KeyValue' + with open(os.path.join(msg_d, '%s.msg'%t), 'r') as f: + text = f.read() + with open(os.path.join(msg_raw_d, '%s.msg'%t), 'r') as f: + text_raw = f.read() + + type_ = test_message_package+'/'+t + self.assertEquals(text, rosmsg.get_msg_text(type_, raw=False)) + self.assertEquals(text_raw, rosmsg.get_msg_text(type_, raw=True)) # test recursive types - t = 'RosmsgC' + t = 'DiagnosticStatus' with open(os.path.join(d, '%s_raw.txt'%t), 'r') as f: text = f.read() with open(os.path.join(msg_raw_d, '%s.msg'%t), 'r') as f: @@ -120,24 +120,26 @@ def test_list_types(self): # test msgs l = rosmsg.list_types('rospy', mode='.msg') self.assertEquals([], l) - l = rosmsg.list_types('test_rosmaster', mode='.msg') - for t in ['test_rosmaster/RosmsgA', 'test_rosmaster/RosmsgB', 'test_rosmaster/RosmsgC']: + l = rosmsg.list_types('diagnostic_msgs', mode='.msg') + for t in ['diagnostic_msgs/DiagnosticArray', + 'diagnostic_msgs/DiagnosticStatus', + 'diagnostic_msgs/KeyValue']: assert t in l l = rosmsg.list_types('rospy', mode='.srv') self.assertEquals([], l) - l = rosmsg.list_types('test_rosmaster', mode='.srv') - for t in ['test_rosmaster/RossrvA', 'test_rosmaster/RossrvB']: + l = rosmsg.list_types('diagnostic_msgs', mode='.srv') + for t in ['diagnostic_msgs/AddDiagnostics', 'diagnostic_msgs/SelfTest']: assert t in l def test_get_srv_text(self): d = get_test_path() srv_d = os.path.join(d, 'srv') - test_srv_package = 'test_rosmaster' + test_srv_package = 'diagnostic_msgs' rospack = rospkg.RosPack() srv_raw_d = os.path.join(rospack.get_path(test_srv_package), 'srv') - for t in ['RossrvA', 'RossrvB']: + for t in ['AddDiagnostics', 'SelfTest']: with open(os.path.join(srv_d, '%s.srv'%t), 'r') as f: text = f.read() with open(os.path.join(srv_raw_d, '%s.srv'%t), 'r') as f: diff --git a/tools/rosmsg/test/test_rosmsg_command_line.py b/tools/rosmsg/test/test_rosmsg_command_line.py --- a/tools/rosmsg/test/test_rosmsg_command_line.py +++ b/tools/rosmsg/test/test_rosmsg_command_line.py @@ -75,7 +75,7 @@ def test_cmd_packages(self): l1 = [x for x in output1.split() if x] l2 = [x.strip() for x in output2.split('\n') if x.strip()] self.assertEquals(l1, l2) - for p in ['std_msgs', 'test_rosmaster']: + for p in ['std_msgs', 'diagnostic_msgs']: self.assert_(p in l1) for p in ['std_srvs', 'rosmsg']: self.assert_(p not in l1) @@ -85,7 +85,7 @@ def test_cmd_packages(self): l1 = [x for x in output1.split() if x] l2 = [x.strip() for x in output2.split('\n') if x.strip()] self.assertEquals(l1, l2) - for p in ['std_srvs', 'test_rosmaster']: + for p in ['std_srvs', 'diagnostic_msgs']: self.assert_(p in l1) for p in ['std_msgs', 'rospy']: self.assert_(p not in l1) @@ -94,7 +94,7 @@ def test_cmd_list(self): # - multi-line output1 = Popen([sys.executable, os.path.join(_SCRIPT_FOLDER,'rosmsg'), 'list'], stdout=PIPE).communicate()[0].decode() l1 = [x.strip() for x in output1.split('\n') if x.strip()] - for p in ['std_msgs/String', 'test_rosmaster/Floats']: + for p in ['std_msgs/String', 'diagnostic_msgs/DiagnosticArray']: self.assert_(p in l1) for p in ['std_srvs/Empty', 'roscpp/Empty']: self.assert_(p not in l1) @@ -103,31 +103,31 @@ def test_cmd_list(self): l1 = [x.strip() for x in output1.split('\n') if x.strip()] for p in ['std_srvs/Empty', 'roscpp/Empty']: self.assert_(p in l1) - for p in ['std_msgs/String', 'test_rosmaster/Floats']: + for p in ['std_msgs/String', 'diagnostic_msgs/DiagnosticStatus']: self.assert_(p not in l1) def test_cmd_package(self): # this test is obviously very brittle, but should stabilize as the tests stabilize # - single line output - output1 = Popen(['rosmsg', 'package', '-s', 'test_rosmaster'], stdout=PIPE).communicate()[0].decode() + output1 = Popen(['rosmsg', 'package', '-s', 'diagnostic_msgs'], stdout=PIPE).communicate()[0].decode() # - multi-line output - output2 = Popen(['rosmsg', 'package', 'test_rosmaster'], stdout=PIPE).communicate()[0].decode() + output2 = Popen(['rosmsg', 'package', 'diagnostic_msgs'], stdout=PIPE).communicate()[0].decode() l = set([x for x in output1.split() if x]) l2 = set([x.strip() for x in output2.split('\n') if x.strip()]) self.assertEquals(l, l2) - for m in ['test_rosmaster/RosmsgA', - 'test_rosmaster/RosmsgB', - 'test_rosmaster/RosmsgC']: + for m in ['diagnostic_msgs/DiagnosticArray', + 'diagnostic_msgs/DiagnosticStatus', + 'diagnostic_msgs/KeyValue']: self.assertTrue(m in l, l) - output = Popen(['rossrv', 'package', '-s', 'test_rosmaster'], stdout=PIPE).communicate()[0].decode() - output2 = Popen(['rossrv', 'package','test_rosmaster'], stdout=PIPE).communicate()[0].decode() + output = Popen(['rossrv', 'package', '-s', 'diagnostic_msgs'], stdout=PIPE).communicate()[0].decode() + output2 = Popen(['rossrv', 'package','diagnostic_msgs'], stdout=PIPE).communicate()[0].decode() l = set([x for x in output.split() if x]) l2 = set([x.strip() for x in output2.split('\n') if x.strip()]) self.assertEquals(l, l2) - for m in ['test_rosmaster/RossrvA', 'test_rosmaster/RossrvB']: + for m in ['diagnostic_msgs/AddDiagnostics', 'diagnostic_msgs/SelfTest']: self.assertTrue(m in l, l) ## test that the rosmsg/rossrv show command works @@ -139,41 +139,41 @@ def test_cmd_show(self): self.assertEquals('---', output.strip()) output = Popen(['rossrv', 'show', 'std_srvs/Empty'], stdout=PIPE).communicate()[0].decode() self.assertEquals('---', output.strip()) - output = Popen(['rossrv', 'show', 'test_rosmaster/AddTwoInts'], stdout=PIPE).communicate()[0].decode() - self.assertEquals(os.linesep.join(['int64 a', 'int64 b', '---', 'int64 sum']), output.strip()) + output = Popen(['rossrv', 'show', 'diagnostic_msgs/AddDiagnostics'], stdout=PIPE).communicate()[0].decode() + self.assertEquals('string load_namespace\n---\nbool success\nstring message', output.strip()) # test against test_rosmsg package d = os.path.abspath(os.path.dirname(__file__)) msg_d = os.path.join(d, 'msg') - test_message_package = 'test_rosmaster' + test_message_package = 'diagnostic_msgs' rospack = rospkg.RosPack() msg_raw_d = os.path.join(rospack.get_path(test_message_package), 'msg') # - test with non-recursive types - for t in ['RosmsgA', 'RosmsgB']: - with open(os.path.join(msg_d, '%s.msg'%t), 'r') as f: - text = f.read() - with open(os.path.join(msg_raw_d, '%s.msg'%t), 'r') as f: - text_raw = f.read() - text = text.strip() - text_raw = text_raw.strip() - type_ =test_message_package+'/'+t - output = Popen(['rosmsg', 'show', type_], stdout=PIPE).communicate()[0].decode() - self.assertEquals(text, output.strip()) - output = Popen(['rosmsg', 'show', '-r',type_], stdout=PIPE).communicate()[0].decode() - self.assertEquals(text_raw, output.strip()) - output = Popen(['rosmsg', 'show', '--raw', type_], stdout=PIPE).communicate()[0].decode() - self.assertEquals(text_raw, output.strip()) + t = 'KeyValue' + with open(os.path.join(msg_d, '%s.msg'%t), 'r') as f: + text = f.read() + with open(os.path.join(msg_raw_d, '%s.msg'%t), 'r') as f: + text_raw = f.read() + text = text.strip() + text_raw = text_raw.strip() + type_ =test_message_package+'/'+t + output = Popen(['rosmsg', 'show', type_], stdout=PIPE).communicate()[0].decode() + self.assertEquals(text, output.strip()) + output = Popen(['rosmsg', 'show', '-r',type_], stdout=PIPE).communicate()[0].decode() + self.assertEquals(text_raw, output.strip()) + output = Popen(['rosmsg', 'show', '--raw', type_], stdout=PIPE).communicate()[0].decode() + self.assertEquals(text_raw, output.strip()) - # test as search - type_ = t - text_prefix = "[test_rosmaster/%s]:"%t - text = os.linesep.join([text_prefix, text]) - text_raw = os.linesep.join([text_prefix, text_raw]) - output = Popen(['rosmsg', 'show', type_], stdout=PIPE).communicate()[0].decode() - self.assertEquals(text, output.strip()) - output = Popen(['rosmsg', 'show', '-r',type_], stdout=PIPE, stderr=PIPE).communicate() - self.assertEquals(text_raw, output[0].decode().strip(), "Failed: %s"%(str(output))) - output = Popen(['rosmsg', 'show', '--raw', type_], stdout=PIPE).communicate()[0].decode() - self.assertEquals(text_raw, output.strip()) + # test as search + type_ = t + text_prefix = "[diagnostic_msgs/%s]:"%t + text = os.linesep.join([text_prefix, text]) + text_raw = os.linesep.join([text_prefix, text_raw]) + output = Popen(['rosmsg', 'show', type_], stdout=PIPE).communicate()[0].decode() + self.assertEquals(text, output.strip()) + output = Popen(['rosmsg', 'show', '-r',type_], stdout=PIPE, stderr=PIPE).communicate() + self.assertEquals(text_raw, output[0].decode().strip(), "Failed: %s"%(str(output))) + output = Popen(['rosmsg', 'show', '--raw', type_], stdout=PIPE).communicate()[0].decode() + self.assertEquals(text_raw, output.strip()) diff --git a/utilities/xmlrpcpp/test/TestValues.cpp b/utilities/xmlrpcpp/test/TestValues.cpp --- a/utilities/xmlrpcpp/test/TestValues.cpp +++ b/utilities/xmlrpcpp/test/TestValues.cpp @@ -26,6 +26,7 @@ #include <stdlib.h> #include <string> +#include <climits> #include "xmlrpcpp/XmlRpcValue.h" #include "xmlrpcpp/XmlRpcException.h" @@ -213,7 +214,7 @@ TEST(XmlRpc, testString) { TEST(XmlRpc, testOversizeString) { try { std::string xml = "<tag><nexttag>"; - xml += std::string(__INT_MAX__, 'a'); + xml += std::string(INT_MAX, 'a'); xml += "a</nextag></tag>"; int offset; diff --git a/utilities/xmlrpcpp/test/test_client.cpp b/utilities/xmlrpcpp/test/test_client.cpp --- a/utilities/xmlrpcpp/test/test_client.cpp +++ b/utilities/xmlrpcpp/test/test_client.cpp @@ -26,6 +26,7 @@ #include "mock_socket.h" #include <errno.h> +#include <climits> #include <gtest/gtest.h> @@ -935,7 +936,7 @@ TEST_F(MockSocketTest, readHeader_oversize) { // Add a large content-length to the standard header std::string header_cl = header3; - header_cl += std::to_string(size_t(__INT_MAX__) + 1); + header_cl += std::to_string(size_t(INT_MAX) + 1); header_cl += "\r\n\r\n "; Expect_nbRead(7, header_cl, false, true);
when I raise an expection "DeserializationError",I find this error "TypeError: not enough arguments for format string" This is a format error using logerr and rospyerr ``` self.socket.close() except: pass self.socket = None except DeserializationError as e: #TODO: how should we handle reconnect in this case? logerr("[%s] error deserializing incoming request: %s"%self.name, str(e)) rospyerr("[%s] error deserializing incoming request: %s"%self.name, traceback.format_exc()) except: ```
2021-04-06T19:19:09Z
[]
[]
ros/ros_comm
2,173
ros__ros_comm-2173
[ "1487" ]
c7c5bcb22780e7f6dbbff9bc923afca34fd612b6
diff --git a/tools/roslaunch/src/roslaunch/xmlloader.py b/tools/roslaunch/src/roslaunch/xmlloader.py --- a/tools/roslaunch/src/roslaunch/xmlloader.py +++ b/tools/roslaunch/src/roslaunch/xmlloader.py @@ -613,6 +613,7 @@ def _include_tag(self, tag, context, ros_config, default_machine, is_core, verbo # error on) attempts to set the same arg twice. child_ns.pass_all_args = True + child_ns.filename = context.filename # evaluate substitutions w.r.t. parent filename for t in [c for c in tag.childNodes if c.nodeType == DomNode.ELEMENT_NODE]: tag_name = t.tagName.lower() if tag_name == 'env': @@ -621,6 +622,7 @@ def _include_tag(self, tag, context, ros_config, default_machine, is_core, verbo self._arg_tag(t, child_ns, ros_config, verbose=verbose) else: print("WARN: unrecognized '%s' tag in <%s> tag"%(t.tagName, tag.tagName), file=sys.stderr) + child_ns.filename = inc_filename # restore filename # setup arg passing loader.process_include_args(child_ns)
diff --git a/tools/roslaunch/test/unit/test_xmlloader.py b/tools/roslaunch/test/unit/test_xmlloader.py --- a/tools/roslaunch/test/unit/test_xmlloader.py +++ b/tools/roslaunch/test/unit/test_xmlloader.py @@ -1080,5 +1080,7 @@ def test_dirname(self): for p in mock.params: param_d[p.key] = p.value - self.assertEquals(param_d['/foo'], self.xml_dir + '/bar') - self.assertEquals(param_d['/bar'], self.xml_dir + '/test-dirname/baz') + self.assertEqual(param_d['/base'], self.xml_dir) + self.assertEqual(param_d['/foo'], self.xml_dir + '/bar') + self.assertEqual(param_d['/baz'], self.xml_dir + '/baz') + self.assertEqual(param_d['/bar'], self.xml_dir + '/test-dirname/baz') diff --git a/tools/roslaunch/test/xml/test-dirname.xml b/tools/roslaunch/test/xml/test-dirname.xml --- a/tools/roslaunch/test/xml/test-dirname.xml +++ b/tools/roslaunch/test/xml/test-dirname.xml @@ -1,4 +1,7 @@ <launch> + <arg name="base" value="$(dirname)" /> <param name="foo" value="$(dirname)/bar" /> - <include file="$(dirname)/test-dirname/included.xml" /> + <include file="$(dirname)/test-dirname/included.xml" pass_all_args="True"> + <arg name="baz" value="$(dirname)/baz" /> + </include> </launch> diff --git a/tools/roslaunch/test/xml/test-dirname/included.xml b/tools/roslaunch/test/xml/test-dirname/included.xml --- a/tools/roslaunch/test/xml/test-dirname/included.xml +++ b/tools/roslaunch/test/xml/test-dirname/included.xml @@ -1,3 +1,6 @@ <launch> + <arg name="baz" /> + <param name="base" value="$(arg base)" /> <param name="bar" value="$(dirname)/baz" /> + <param name="baz" value="$(arg baz)" /> </launch>
Unexpected $(dirname) behaviour when value assigned to arg. The existing test is here, added as part of the overall feature in #1103: https://github.com/ros/ros_comm/blob/e96c407c64e1c17b0dd2bb85b67f388380527097/tools/roslaunch/test/xml/test-dirname.xml If the test is augmented to be: ``` <launch> <param name="foo" value="$(dirname)/bar" /> <include file="$(dirname)/test-dirname/included.xml"> <arg name="baz" value="$(dirname)/baz" /> </include> </launch> ``` And then the `included.xml` file is changed to: ``` <launch> <arg name="baz" /> <param name="bar" value="$(dirname)/baz" /> <param name="baz" value="$(arg baz)" /> </launch> ``` You'd expect the `baz` parameter to have `tools/roslaunch/test/xml/baz` as its path, but instead it's `tools/roslaunch/test/xml/test-dirname/baz`. That is, dirname is being lazily resolved at the point where the value is assigned to the parameter, rather than when the arg value is assigned. Historically this hasn't mattered, as no other substitutions have this kind of context sensitivity. I'm not sure how much effort this will be to fix, but I had an instance recently where it would have been nice to be able to pass a "back path" into an included file, and wasn't able to do so due to this problem.
this indeed is a issue This is entirely unintuitive behavior demoting the use of the (in ROS terms) relatively new `$(dirname)`. Sad to see this unresolved three years after Mike went through the trouble to document it... @v4hn In fairness to the maintainers of this repo, I am in fact the author of the original `$(dirname)` patch, so this ticket was as much about self-disclosure as it was anything else. :) We ended up reworking our launchfiles to not require this fixed, but I left the ticket up in case someone else was interested in tackling it. > I am in fact the author of the original `$(dirname)` patch, Thanks! Without this bug, it would be an even more useful command, though it already helps with relative includes. :sunglasses: > We ended up reworking our launchfiles to not require this fixed, but I left the ticket up in case someone else was interested in tackling it. Yeah, the usual lot of bugs that are relatively easy to work around... They just never disappear unless some good soul has the interest and time to fix it.
2021-07-17T08:48:38Z
[]
[]
ros/ros_comm
2,219
ros__ros_comm-2219
[ "2123" ]
9bec9d113196a4389d1dfa574dee882172823f71
diff --git a/clients/rospy/src/rospy/impl/tcpros_base.py b/clients/rospy/src/rospy/impl/tcpros_base.py --- a/clients/rospy/src/rospy/impl/tcpros_base.py +++ b/clients/rospy/src/rospy/impl/tcpros_base.py @@ -63,6 +63,7 @@ from rospy.service import ServiceException from rospy.impl.transport import Transport, BIDIRECTIONAL +from errno import EAGAIN, EWOULDBLOCK logger = logging.getLogger('rospy.tcpros') @@ -101,12 +102,18 @@ def recv_buff(sock, b, buff_size): @return: number of bytes read @rtype: int """ - d = sock.recv(buff_size) - if d: - b.write(d) - return len(d) - else: #bomb out - raise TransportTerminated("unable to receive data from sender, check sender's logs for details") + try: + d = sock.recv(buff_size) + if d: + b.write(d) + return len(d) + else: #bomb out + raise TransportTerminated("unable to receive data from sender, check sender's logs for details") + except socket.error as ex: + if ex.errno not in (EAGAIN, EWOULDBLOCK): + raise TransportTerminated("unable to receive data from sender, check sender's logs for details") + else: + return 0 class TCPServer(object): """ @@ -800,6 +807,11 @@ def receive_loop(self, msgs_callback): else: self._reconnect() + except TransportTerminated as e: + logdebug("[%s] failed to receive incoming message : %s" % (self.name, str(e))) + rospydebug("[%s] failed to receive incoming message: %s" % (self.name, traceback.format_exc())) + break + except TransportException as e: # set socket to None so we reconnect try: diff --git a/clients/rospy/src/rospy/impl/tcpros_service.py b/clients/rospy/src/rospy/impl/tcpros_service.py --- a/clients/rospy/src/rospy/impl/tcpros_service.py +++ b/clients/rospy/src/rospy/impl/tcpros_service.py @@ -510,7 +510,8 @@ def call(self, *args, **kwds): except TransportInitError as e: # can be a connection or md5sum mismatch raise ServiceException("unable to connect to service: %s"%e) - self.transport = transport + if self.persistent: + self.transport = transport else: transport = self.transport diff --git a/clients/rospy/src/rospy/timer.py b/clients/rospy/src/rospy/timer.py --- a/clients/rospy/src/rospy/timer.py +++ b/clients/rospy/src/rospy/timer.py @@ -173,6 +173,9 @@ class TimerEvent(object): @type last_real: rospy.Time @param current_expected: in a perfect world, this is when the current callback should have been called @type current_expected: rospy.Time + @param current_real: when the current callback is actually being called + (rospy.Time.now() as of immediately before calling the callback) + @type current_real: rospy.Time @param last_duration: contains the duration of the last callback (end time minus start time) in seconds. Note that this is always in wall-clock time. @type last_duration: float diff --git a/tools/rosgraph/src/rosgraph/xmlrpc.py b/tools/rosgraph/src/rosgraph/xmlrpc.py --- a/tools/rosgraph/src/rosgraph/xmlrpc.py +++ b/tools/rosgraph/src/rosgraph/xmlrpc.py @@ -114,8 +114,7 @@ class ThreadingXMLRPCServer(socketserver.ThreadingMixIn, SimpleXMLRPCServer): requests via threading. Also makes logging toggleable. """ - if _support_http_1_1(): - daemon_threads = True + daemon_threads = True def __init__(self, addr, log_requests=1): """ diff --git a/tools/roslaunch/src/roslaunch/__init__.py b/tools/roslaunch/src/roslaunch/__init__.py --- a/tools/roslaunch/src/roslaunch/__init__.py +++ b/tools/roslaunch/src/roslaunch/__init__.py @@ -68,6 +68,7 @@ DEFAULT_MASTER_PORT = 11311 from rosmaster.master_api import NUM_WORKERS +from roslaunch.nodeprocess import DEFAULT_TIMEOUT_SIGINT, DEFAULT_TIMEOUT_SIGTERM NAME = 'roslaunch' @@ -193,6 +194,16 @@ def _get_optparse(): parser.add_option("--master-logger-level", dest="master_logger_level", default=False, type=str, help="set rosmaster.master logger level ('debug', 'info', 'warn', 'error', 'fatal')") + parser.add_option("--sigint-timeout", + dest="sigint_timeout", + default=DEFAULT_TIMEOUT_SIGINT, type=float, + help="the SIGINT timeout used when killing nodes (in seconds).", + metavar="SIGINT_TIMEOUT") + parser.add_option("--sigterm-timeout", + dest="sigterm_timeout", + default=DEFAULT_TIMEOUT_SIGTERM, type=float, + help="the SIGTERM timeout used when killing nodes if SIGINT does not stop the node (in seconds).", + metavar="SIGTERM_TIMEOUT") return parser @@ -298,7 +309,9 @@ def main(argv=sys.argv): # client spins up an XML-RPC server that waits for # commands and configuration from the server. from . import child as roslaunch_child - c = roslaunch_child.ROSLaunchChild(uuid, options.child_name, options.server_uri) + c = roslaunch_child.ROSLaunchChild(uuid, options.child_name, options.server_uri, + sigint_timeout=options.sigint_timeout, + sigterm_timeout=options.sigterm_timeout) c.run() else: logger.info('starting in server mode') @@ -328,7 +341,9 @@ def main(argv=sys.argv): num_workers=options.num_workers, timeout=options.timeout, master_logger_level=options.master_logger_level, show_summary=not options.no_summary, - force_required=options.force_required) + force_required=options.force_required, + sigint_timeout=options.sigint_timeout, + sigterm_timeout=options.sigterm_timeout) p.start() p.spin() diff --git a/tools/roslaunch/src/roslaunch/child.py b/tools/roslaunch/src/roslaunch/child.py --- a/tools/roslaunch/src/roslaunch/child.py +++ b/tools/roslaunch/src/roslaunch/child.py @@ -49,6 +49,7 @@ import roslaunch.core import roslaunch.pmon import roslaunch.server +from roslaunch.nodeprocess import DEFAULT_TIMEOUT_SIGINT, DEFAULT_TIMEOUT_SIGTERM class ROSLaunchChild(object): """ @@ -57,7 +58,7 @@ class ROSLaunchChild(object): This must be called from the Python Main thread due to signal registration. """ - def __init__(self, run_id, name, server_uri): + def __init__(self, run_id, name, server_uri, sigint_timeout=DEFAULT_TIMEOUT_SIGINT, sigterm_timeout=DEFAULT_TIMEOUT_SIGTERM): """ Startup roslaunch remote client XML-RPC services. Blocks until shutdown @param run_id: UUID of roslaunch session @@ -66,6 +67,11 @@ def __init__(self, run_id, name, server_uri): @type name: str @param server_uri: XML-RPC URI of roslaunch server @type server_uri: str + @param sigint_timeout: The SIGINT timeout used when killing nodes (in seconds). + @type sigint_timeout: float + @param sigterm_timeout: The SIGTERM timeout used when killing nodes if SIGINT does not stop the node ( + in seconds). + @type sigterm_timeout: float @return: XML-RPC URI @rtype: str """ @@ -77,6 +83,8 @@ def __init__(self, run_id, name, server_uri): self.server_uri = server_uri self.child_server = None self.pm = None + self.sigint_timeout = sigint_timeout + self.sigterm_timeout = sigterm_timeout roslaunch.pmon._init_signal_handlers() @@ -102,7 +110,9 @@ def run(self): try: self.logger.info("starting roslaunch child process [%s], server URI is [%s]", self.name, self.server_uri) self._start_pm() - self.child_server = roslaunch.server.ROSLaunchChildNode(self.run_id, self.name, self.server_uri, self.pm) + self.child_server = roslaunch.server.ROSLaunchChildNode(self.run_id, self.name, self.server_uri, + self.pm, sigint_timeout=self.sigint_timeout, + sigterm_timeout=self.sigterm_timeout) self.logger.info("... creating XMLRPC server for child") self.child_server.start() self.logger.info("... started XMLRPC server for child") diff --git a/tools/roslaunch/src/roslaunch/launch.py b/tools/roslaunch/src/roslaunch/launch.py --- a/tools/roslaunch/src/roslaunch/launch.py +++ b/tools/roslaunch/src/roslaunch/launch.py @@ -52,7 +52,7 @@ from roslaunch.core import * #from roslaunch.core import setup_env -from roslaunch.nodeprocess import create_master_process, create_node_process +from roslaunch.nodeprocess import create_master_process, create_node_process, DEFAULT_TIMEOUT_SIGINT, DEFAULT_TIMEOUT_SIGTERM from roslaunch.pmon import start_process_monitor, ProcessListener from roslaunch.rlutil import update_terminal_name @@ -235,7 +235,8 @@ class ROSLaunchRunner(object): monitored. """ - def __init__(self, run_id, config, server_uri=None, pmon=None, is_core=False, remote_runner=None, is_child=False, is_rostest=False, num_workers=NUM_WORKERS, timeout=None, master_logger_level=False): + def __init__(self, run_id, config, server_uri=None, pmon=None, is_core=False, remote_runner=None, is_child=False, is_rostest=False, num_workers=NUM_WORKERS, timeout=None, + master_logger_level=False, sigint_timeout=DEFAULT_TIMEOUT_SIGINT, sigterm_timeout=DEFAULT_TIMEOUT_SIGTERM): """ @param run_id: /run_id for this launch. If the core is not running, this value will be used to initialize /run_id. If @@ -266,9 +267,19 @@ def __init__(self, run_id, config, server_uri=None, pmon=None, is_core=False, re @type timeout: Float or None @param master_logger_level: Specify roscore's rosmaster.master logger level, use default if it is False. @type master_logger_level: str or False + @param sigint_timeout: The SIGINT timeout used when killing nodes (in seconds). + @type sigint_timeout: float + @param sigterm_timeout: The SIGTERM timeout used when killing nodes if SIGINT does not stop the node (in seconds). + @type sigterm_timeout: float + @raise RLException: If sigint_timeout or sigterm_timeout are nonpositive. """ if run_id is None: raise RLException("run_id is None") + if sigint_timeout <= 0: + raise RLException("sigint_timeout must be a positive number, received %f" % sigint_timeout) + if sigterm_timeout <= 0: + raise RLException("sigterm_timeout must be a positive number, received %f" % sigterm_timeout) + self.run_id = run_id # In the future we should can separate the notion of a core @@ -286,6 +297,8 @@ def __init__(self, run_id, config, server_uri=None, pmon=None, is_core=False, re self.master_logger_level = master_logger_level self.logger = logging.getLogger('roslaunch') self.pm = pmon or start_process_monitor() + self.sigint_timeout = sigint_timeout + self.sigterm_timeout = sigterm_timeout # wire in ProcessMonitor events to our listeners # aggregator. We similarly wire in the remote events when we @@ -402,7 +415,8 @@ def _launch_master(self): printlog("auto-starting new master") p = create_master_process( self.run_id, m.type, get_ros_root(), m.get_port(), self.num_workers, - self.timeout, master_logger_level=self.master_logger_level) + self.timeout, master_logger_level=self.master_logger_level, + sigint_timeout=self.sigint_timeout, sigterm_timeout=self.sigterm_timeout) self.pm.register_core_proc(p) success = p.start() if not success: @@ -541,7 +555,7 @@ def launch_node(self, node, core=False): master = self.config.master import roslaunch.node_args try: - process = create_node_process(self.run_id, node, master.uri) + process = create_node_process(self.run_id, node, master.uri, sigint_timeout=self.sigint_timeout, sigterm_timeout=self.sigterm_timeout) except roslaunch.node_args.NodeParamsException as e: self.logger.error(e) printerrlog("ERROR: cannot launch node of type [%s/%s]: %s"%(node.package, node.type, str(e))) diff --git a/tools/roslaunch/src/roslaunch/nodeprocess.py b/tools/roslaunch/src/roslaunch/nodeprocess.py --- a/tools/roslaunch/src/roslaunch/nodeprocess.py +++ b/tools/roslaunch/src/roslaunch/nodeprocess.py @@ -55,8 +55,8 @@ import logging _logger = logging.getLogger("roslaunch") -_TIMEOUT_SIGINT = 15.0 #seconds -_TIMEOUT_SIGTERM = 2.0 #seconds +DEFAULT_TIMEOUT_SIGINT = 15.0 #seconds +DEFAULT_TIMEOUT_SIGTERM = 2.0 #seconds _counter = 0 def _next_counter(): @@ -64,7 +64,7 @@ def _next_counter(): _counter += 1 return _counter -def create_master_process(run_id, type_, ros_root, port, num_workers=NUM_WORKERS, timeout=None, master_logger_level=False): +def create_master_process(run_id, type_, ros_root, port, num_workers=NUM_WORKERS, timeout=None, master_logger_level=False, sigint_timeout=DEFAULT_TIMEOUT_SIGINT, sigterm_timeout=DEFAULT_TIMEOUT_SIGTERM): """ Launch a master @param type_: name of master executable (currently just Master.ZENMASTER) @@ -77,10 +77,14 @@ def create_master_process(run_id, type_, ros_root, port, num_workers=NUM_WORKERS @type num_workers: int @param timeout: socket timeout for connections. @type timeout: float - @raise RLException: if type_ or port is invalid @param master_logger_level: rosmaster.master logger debug level @type master_logger_level=: str or False - """ + @param sigint_timeout: The SIGINT timeout used when killing nodes (in seconds). + @type sigint_timeout: float + @param sigterm_timeout: The SIGTERM timeout used when killing nodes if SIGINT does not stop the node (in seconds). + @type sigterm_timeout: float + @raise RLException: if type_ or port is invalid or sigint_timeout or sigterm_timeout are nonpositive. + """ if port < 1 or port > 65535: raise RLException("invalid port assignment: %s"%port) @@ -100,9 +104,10 @@ def create_master_process(run_id, type_, ros_root, port, num_workers=NUM_WORKERS _logger.info("process[master]: launching with args [%s]"%args) log_output = False - return LocalProcess(run_id, package, 'master', args, os.environ, log_output, None, required=True) + return LocalProcess(run_id, package, 'master', args, os.environ, log_output, None, required=True, + sigint_timeout=sigint_timeout, sigterm_timeout=sigterm_timeout) -def create_node_process(run_id, node, master_uri): +def create_node_process(run_id, node, master_uri, sigint_timeout=DEFAULT_TIMEOUT_SIGINT, sigterm_timeout=DEFAULT_TIMEOUT_SIGTERM): """ Factory for generating processes for launching local ROS nodes. Also registers the process with the L{ProcessMonitor} so that @@ -114,9 +119,14 @@ def create_node_process(run_id, node, master_uri): @type node: L{Node} @param master_uri: API URI for master node @type master_uri: str + @param sigint_timeout: The SIGINT timeout used when killing nodes (in seconds). + @type sigint_timeout: float + @param sigterm_timeout: The SIGTERM timeout used when killing nodes if SIGINT does not stop the node (in seconds). + @type sigterm_timeout: float @return: local process instance @rtype: L{LocalProcess} @raise NodeParamsException: If the node's parameters are improperly specific + @raise RLException: If sigint_timeout or sigterm_timeout are nonpositive. """ _logger.info("create_node_process: package[%s] type[%s] machine[%s] master_uri[%s]", node.package, node.type, node.machine, master_uri) # check input args @@ -150,7 +160,8 @@ def create_node_process(run_id, node, master_uri): _logger.debug('process[%s]: returning LocalProcess wrapper') return LocalProcess(run_id, node.package, name, args, env, log_output, \ respawn=node.respawn, respawn_delay=node.respawn_delay, \ - required=node.required, cwd=node.cwd) + required=node.required, cwd=node.cwd, \ + sigint_timeout=sigint_timeout, sigterm_timeout=sigterm_timeout) class LocalProcess(Process): @@ -160,7 +171,7 @@ class LocalProcess(Process): def __init__(self, run_id, package, name, args, env, log_output, respawn=False, respawn_delay=0.0, required=False, cwd=None, - is_node=True): + is_node=True, sigint_timeout=DEFAULT_TIMEOUT_SIGINT, sigterm_timeout=DEFAULT_TIMEOUT_SIGTERM): """ @param run_id: unique run ID for this roslaunch. Used to generate log directory location. run_id may be None if this @@ -184,9 +195,20 @@ def __init__(self, run_id, package, name, args, env, log_output, @type cwd: str @param is_node: (optional) if True, process is ROS node and accepts ROS node command-line arguments. Default: True @type is_node: False + @param sigint_timeout: The SIGINT timeout used when killing nodes (in seconds). + @type sigint_timeout: float + @param sigterm_timeout: The SIGTERM timeout used when killing nodes if SIGINT does not stop the node (in seconds). + @type sigterm_timeout: float + @raise RLException: If sigint_timeout or sigterm_timeout are nonpositive. """ super(LocalProcess, self).__init__(package, name, args, env, respawn, respawn_delay, required) + + if sigint_timeout <= 0: + raise RLException("sigint_timeout must be a positive number, received %f" % sigint_timeout) + if sigterm_timeout <= 0: + raise RLException("sigterm_timeout must be a positive number, received %f" % sigterm_timeout) + self.run_id = run_id self.popen = None self.log_output = log_output @@ -196,6 +218,8 @@ def __init__(self, run_id, package, name, args, env, log_output, self.log_dir = None self.pid = -1 self.is_node = is_node + self.sigint_timeout = sigint_timeout + self.sigterm_timeout = sigterm_timeout # NOTE: in the future, info() is going to have to be sufficient for relaunching a process def get_info(self): @@ -406,7 +430,7 @@ def _stop_unix(self, errors): _logger.info("[%s] sending SIGINT to pgid [%s]", self.name, pgid) os.killpg(pgid, signal.SIGINT) _logger.info("[%s] sent SIGINT to pgid [%s]", self.name, pgid) - timeout_t = time.time() + _TIMEOUT_SIGINT + timeout_t = time.time() + self.sigint_timeout retcode = self.popen.poll() while time.time() < timeout_t and retcode is None: time.sleep(0.1) @@ -414,7 +438,7 @@ def _stop_unix(self, errors): # Escalate non-responsive process if retcode is None: printerrlog("[%s] escalating to SIGTERM"%self.name) - timeout_t = time.time() + _TIMEOUT_SIGTERM + timeout_t = time.time() + self.sigterm_timeout os.killpg(pgid, signal.SIGTERM) _logger.info("[%s] sent SIGTERM to pgid [%s]"%(self.name, pgid)) retcode = self.popen.poll() @@ -474,7 +498,7 @@ def _stop_win32(self, errors): _logger.info("[%s] running taskkill pid tree [%s]", self.name, pid) subprocess.call(['taskkill', '/F', '/T', '/PID', str(pid)]) _logger.info("[%s] run taskkill pid tree [%s]", self.name, pid) - timeout_t = time.time() + _TIMEOUT_SIGINT + timeout_t = time.time() + self.sigint_timeout retcode = self.popen.poll() while time.time() < timeout_t and retcode is None: time.sleep(0.1) diff --git a/tools/roslaunch/src/roslaunch/parent.py b/tools/roslaunch/src/roslaunch/parent.py --- a/tools/roslaunch/src/roslaunch/parent.py +++ b/tools/roslaunch/src/roslaunch/parent.py @@ -55,6 +55,7 @@ import roslaunch.xmlloader from rosmaster.master_api import NUM_WORKERS +from roslaunch.nodeprocess import DEFAULT_TIMEOUT_SIGINT, DEFAULT_TIMEOUT_SIGTERM #TODO: probably move process listener infrastructure into here @@ -73,7 +74,8 @@ class ROSLaunchParent(object): """ def __init__(self, run_id, roslaunch_files, is_core=False, port=None, local_only=False, process_listeners=None, - verbose=False, force_screen=False, force_log=False, is_rostest=False, roslaunch_strs=None, num_workers=NUM_WORKERS, timeout=None, master_logger_level=False, show_summary=True, force_required=False): + verbose=False, force_screen=False, force_log=False, is_rostest=False, roslaunch_strs=None, num_workers=NUM_WORKERS, timeout=None, master_logger_level=False, show_summary=True, force_required=False, + sigint_timeout=DEFAULT_TIMEOUT_SIGINT, sigterm_timeout=DEFAULT_TIMEOUT_SIGTERM): """ @param run_id: UUID of roslaunch session @type run_id: str @@ -109,7 +111,16 @@ def __init__(self, run_id, roslaunch_files, is_core=False, port=None, local_only @type master_logger_level: str or False @param force_required: (optional) whether to make all nodes required @type force_required: boolean + @param sigint_timeout: The SIGINT timeout used when killing nodes (in seconds). + @type sigint_timeout: float + @param sigterm_timeout: The SIGTERM timeout used when killing nodes if SIGINT does not stop the node (in seconds). + @type sigterm_timeout: float + @raise RLException: If sigint_timeout or sigterm_timeout are nonpositive. """ + if sigint_timeout <= 0: + raise RLException("sigint_timeout must be a positive number, received %f" % sigint_timeout) + if sigterm_timeout <= 0: + raise RLException("sigterm_timeout must be a positive number, received %f" % sigterm_timeout) self.logger = logging.getLogger('roslaunch.parent') self.run_id = run_id @@ -126,6 +137,8 @@ def __init__(self, run_id, roslaunch_files, is_core=False, port=None, local_only self.num_workers = num_workers self.timeout = timeout self.master_logger_level = master_logger_level + self.sigint_timeout = sigint_timeout + self.sigterm_timeout = sigterm_timeout # I don't think we should have to pass in so many options from # the outside into the roslaunch parent. One possibility is to @@ -173,7 +186,8 @@ def _init_runner(self): raise RLException("pm is not initialized") if self.server is None: raise RLException("server is not initialized") - self.runner = roslaunch.launch.ROSLaunchRunner(self.run_id, self.config, server_uri=self.server.uri, pmon=self.pm, is_core=self.is_core, remote_runner=self.remote_runner, is_rostest=self.is_rostest, num_workers=self.num_workers, timeout=self.timeout, master_logger_level=self.master_logger_level) + self.runner = roslaunch.launch.ROSLaunchRunner(self.run_id, self.config, server_uri=self.server.uri, pmon=self.pm, is_core=self.is_core, remote_runner=self.remote_runner, is_rostest=self.is_rostest, num_workers=self.num_workers, timeout=self.timeout, master_logger_level=self.master_logger_level, + sigint_timeout=self.sigint_timeout, sigterm_timeout=self.sigterm_timeout) # print runner info to user, put errors last to make the more visible if self.is_core: @@ -215,7 +229,9 @@ def _init_remote(self): if not self.local_only and self.config.has_remote_nodes(): # keep the remote package lazy-imported import roslaunch.remote - self.remote_runner = roslaunch.remote.ROSRemoteRunner(self.run_id, self.config, self.pm, self.server) + self.remote_runner = roslaunch.remote.ROSRemoteRunner(self.run_id, self.config, self.pm, self.server, + sigint_timeout=self.sigint_timeout, + sigterm_timeout=self.sigterm_timeout) elif self.local_only: printlog_bold("LOCAL\nlocal only launch specified, will not launch remote nodes\nLOCAL\n") diff --git a/tools/roslaunch/src/roslaunch/remote.py b/tools/roslaunch/src/roslaunch/remote.py --- a/tools/roslaunch/src/roslaunch/remote.py +++ b/tools/roslaunch/src/roslaunch/remote.py @@ -48,6 +48,7 @@ import roslaunch.launch import roslaunch.server #ROSLaunchParentNode hidden dep from roslaunch.core import RLException, is_machine_local, printerrlog, printlog +from roslaunch.nodeprocess import DEFAULT_TIMEOUT_SIGINT, DEFAULT_TIMEOUT_SIGTERM _CHILD_REGISTER_TIMEOUT = 10.0 #seconds @@ -56,17 +57,23 @@ class ROSRemoteRunner(roslaunch.launch.ROSRemoteRunnerIF): Manages the running of remote roslaunch children """ - def __init__(self, run_id, rosconfig, pm, server): + def __init__(self, run_id, rosconfig, pm, server, sigint_timeout=DEFAULT_TIMEOUT_SIGINT, sigterm_timeout=DEFAULT_TIMEOUT_SIGTERM): """ :param run_id: roslaunch run_id of this runner, ``str`` :param config: launch configuration, ``ROSConfig`` :param pm process monitor, ``ProcessMonitor`` :param server: roslaunch parent server, ``ROSLaunchParentNode`` + :param sigint_timeout: The SIGINT timeout used when killing nodes (in seconds). + :type sigint_timeout: float + :param sigterm_timeout: The SIGTERM timeout used when killing nodes if SIGINT does not stop the node (in seconds). + :type sigterm_timeout: float """ self.run_id = run_id self.rosconfig = rosconfig self.server = server self.pm = pm + self.sigint_timeout = sigint_timeout + self.sigterm_timeout = sigterm_timeout self.logger = logging.getLogger('roslaunch.remote') self.listeners = [] @@ -90,7 +97,8 @@ def _start_child(self, server_node_uri, machine, counter): self.logger.info("remote[%s] starting roslaunch", name) printlog("remote[%s] starting roslaunch"%name) - p = SSHChildROSLaunchProcess(self.run_id, name, server_node_uri, machine, self.rosconfig.master.uri) + p = SSHChildROSLaunchProcess(self.run_id, name, server_node_uri, machine, self.rosconfig.master.uri, + sigint_timeout=self.sigint_timeout, sigterm_timeout=self.sigterm_timeout) success = p.start() self.pm.register(p) if not success: #treat as fatal diff --git a/tools/roslaunch/src/roslaunch/remoteprocess.py b/tools/roslaunch/src/roslaunch/remoteprocess.py --- a/tools/roslaunch/src/roslaunch/remoteprocess.py +++ b/tools/roslaunch/src/roslaunch/remoteprocess.py @@ -48,6 +48,7 @@ from roslaunch.core import printlog, printerrlog import roslaunch.pmon import roslaunch.server +from roslaunch.nodeprocess import DEFAULT_TIMEOUT_SIGINT, DEFAULT_TIMEOUT_SIGTERM import logging _logger = logging.getLogger("roslaunch.remoteprocess") @@ -126,18 +127,25 @@ class SSHChildROSLaunchProcess(roslaunch.server.ChildROSLaunchProcess): """ Process wrapper for launching and monitoring a child roslaunch process over SSH """ - def __init__(self, run_id, name, server_uri, machine, master_uri=None): + def __init__(self, run_id, name, server_uri, machine, master_uri=None, sigint_timeout=DEFAULT_TIMEOUT_SIGINT, sigterm_timeout=DEFAULT_TIMEOUT_SIGTERM): """ :param machine: Machine instance. Must be fully configured. machine.env_loader is required to be set. + :param sigint_timeout: The SIGINT timeout used when killing nodes (in seconds). + :type sigint_timeout: float + :param sigterm_timeout: The SIGTERM timeout used when killing nodes if SIGINT does not stop the node (in seconds). + :type sigterm_timeout: float """ if not machine.env_loader: raise ValueError("machine.env_loader must have been assigned before creating ssh child instance") - args = [machine.env_loader, 'roslaunch', '-c', name, '-u', server_uri, '--run_id', run_id] + args = [machine.env_loader, 'roslaunch', '-c', name, '-u', server_uri, '--run_id', run_id, + '--sigint-timeout', str(sigint_timeout), '--sigterm-timeout', str(sigterm_timeout)] # env is always empty dict because we only use env_loader super(SSHChildROSLaunchProcess, self).__init__(name, args, {}) self.machine = machine self.master_uri = master_uri + self.sigint_timeout = sigint_timeout + self.sigterm_timeout = sigterm_timeout self.ssh = self.sshin = self.sshout = self.ssherr = None self.started = False self.uri = None diff --git a/tools/roslaunch/src/roslaunch/server.py b/tools/roslaunch/src/roslaunch/server.py --- a/tools/roslaunch/src/roslaunch/server.py +++ b/tools/roslaunch/src/roslaunch/server.py @@ -70,6 +70,7 @@ import roslaunch.config from roslaunch.pmon import ProcessListener, Process import roslaunch.xmlloader +from roslaunch.nodeprocess import DEFAULT_TIMEOUT_SIGINT, DEFAULT_TIMEOUT_SIGTERM from roslaunch.launch import ROSLaunchRunner from roslaunch.core import RLException, \ @@ -243,12 +244,17 @@ class ROSLaunchChildHandler(ROSLaunchBaseHandler): it can track processes across requests """ - def __init__(self, run_id, name, server_uri, pm): + def __init__(self, run_id, name, server_uri, pm, sigint_timeout=DEFAULT_TIMEOUT_SIGINT, sigterm_timeout=DEFAULT_TIMEOUT_SIGTERM): """ @param server_uri: XML-RPC URI of server @type server_uri: str @param pm: process monitor to use @type pm: L{ProcessMonitor} + @param sigint_timeout: The SIGINT timeout used when killing nodes (in seconds). + @type sigint_timeout: float + @param sigterm_timeout: The SIGTERM timeout used when killing nodes if SIGINT does not stop the node ( + in seconds). + @type sigterm_timeout: float @raise RLException: If parameters are invalid """ super(ROSLaunchChildHandler, self).__init__(pm) @@ -264,6 +270,8 @@ def __init__(self, run_id, name, server_uri, pm): self.name = name self.pm = pm self.server_uri = server_uri + self.sigint_timeout = sigint_timeout + self.sigterm_timeout = sigterm_timeout self.server = ServerProxy(server_uri) def _shutdown(self, reason): @@ -328,7 +336,8 @@ def launch(self, launch_xml): # mainly the responsibility of the roslaunch server to not give us any XML that might # cause conflict (e.g. master tags, param tags, etc...). self._log(Log.INFO, "launching nodes...") - runner = ROSLaunchRunner(self.run_id, rosconfig, server_uri=self.server_uri, pmon=self.pm) + runner = ROSLaunchRunner(self.run_id, rosconfig, server_uri=self.server_uri, pmon=self.pm, + sigint_timeout=self.sigint_timeout, sigterm_timeout=self.sigterm_timeout) succeeded, failed = runner.launch() self._log(Log.INFO, "... done launching nodes") # enable the process monitor to exit of all processes die @@ -475,13 +484,18 @@ class ROSLaunchChildNode(ROSLaunchNode): XML-RPC server for roslaunch child processes """ - def __init__(self, run_id, name, server_uri, pm): + def __init__(self, run_id, name, server_uri, pm, sigint_timeout=DEFAULT_TIMEOUT_SIGINT, sigterm_timeout=DEFAULT_TIMEOUT_SIGTERM): """ ## Startup roslaunch remote client XML-RPC services. Blocks until shutdown ## @param name: name of remote client ## @type name: str ## @param server_uri: XML-RPC URI of roslaunch server ## @type server_uri: str + ## @param sigint_timeout: The SIGINT timeout used when killing nodes (in seconds). + ## @type sigint_timeout: float + ## @param sigterm_timeout: The SIGTERM timeout used when killing nodes if SIGINT does not stop the node ( + ## in seconds). + ## @type sigterm_timeout: float ## @return: XML-RPC URI ## @rtype: str """ @@ -493,7 +507,8 @@ def __init__(self, run_id, name, server_uri, pm): if self.pm is None: raise RLException("cannot create child node: pm is not initialized") - handler = ROSLaunchChildHandler(self.run_id, self.name, self.server_uri, self.pm) + handler = ROSLaunchChildHandler(self.run_id, self.name, self.server_uri, self.pm, + sigint_timeout=sigint_timeout, sigterm_timeout=sigterm_timeout) super(ROSLaunchChildNode, self).__init__(handler) def _register_with_server(self):
diff --git a/tools/roslaunch/test/manual-test-remote-timeouts.sh b/tools/roslaunch/test/manual-test-remote-timeouts.sh new file mode 100755 --- /dev/null +++ b/tools/roslaunch/test/manual-test-remote-timeouts.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" + +if [ $# -lt 5 ] || [ "$1" == "--help" ] || [ "$1" == "-h" ]; then + echo "Usage: manual-test-remote-timeouts.sh sigint_timeout sigterm_timeout address env_loader user [roslaunch_timeout]" + echo "Run this script set up to connect to a remote machine (or your own one, if you have self-ssh enabled), and after a while, break the program with Ctrl-C." + echo "Observe, if SIGINT and SIGTERM are issued approximately after the time you have given in sigint_timeout and sigterm_timeout" + echo "Make sure the remote machine also has the same version of ros_comm as this one!" + exit 1 +fi + +sigint=$1 +sigterm=$2 +address=$3 +env_loader=$4 +user=$5 +if [ $# -gt 6 ]; then + timeout=$6 +else + timeout=10.0 +fi + +bold="\033[1m" +normal="\033[0m" +echo -e "${bold}A while after you see '... done launching nodes', break the program with Ctrl-C." +echo -e "Observe, if SIGINT and SIGTERM are issued approximately after ${sigint} and ${sigterm} seconds" +echo -e "Make sure the remote machine also has the same version of ros_comm as this one!${normal}" + +sleep 5 + +"${THIS_DIR}/../scripts/roslaunch" --sigint-timeout ${sigint} --sigterm-timeout ${sigterm} \ + "${THIS_DIR}/xml/manual-test-remote-timeouts.launch" \ + address:=${address} env_loader:=${env_loader} user:=${user} timeout:=${timeout} \ No newline at end of file diff --git a/tools/roslaunch/test/signal_logger.py b/tools/roslaunch/test/signal_logger.py new file mode 100755 --- /dev/null +++ b/tools/roslaunch/test/signal_logger.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 + +import os +import signal +import tempfile +import time + +if __name__ == '__main__': + LOG_FILE = os.path.join(tempfile.gettempdir(), "signal.log") + log_stream = open(LOG_FILE, 'w') + + + def handler(signum, _): + log_stream.write("%i %s\n" % (signum, str(time.time()))) + log_stream.flush() + + if signum == signal.SIGTERM: + log_stream.close() + + + signal.signal(signal.SIGINT, handler) + signal.signal(signal.SIGTERM, handler) + + while True: + time.sleep(10) diff --git a/tools/roslaunch/test/unit/test_nodeprocess.py b/tools/roslaunch/test/unit/test_nodeprocess.py --- a/tools/roslaunch/test/unit/test_nodeprocess.py +++ b/tools/roslaunch/test/unit/test_nodeprocess.py @@ -78,6 +78,14 @@ def test_create_master_process(self): self.assertEquals(p.package, 'rosmaster') p = create_master_process(run_id, type, ros_root, port) + self.assertEquals(create_master_process(run_id, type, ros_root, port, sigint_timeout=3).sigint_timeout, 3) + self.assertEquals(create_master_process(run_id, type, ros_root, port, sigint_timeout=1).sigint_timeout, 1) + self.assertRaises(RLException, create_master_process, run_id, type, ros_root, port, sigint_timeout=0) + + self.assertEquals(create_master_process(run_id, type, ros_root, port, sigterm_timeout=3).sigterm_timeout, 3) + self.assertEquals(create_master_process(run_id, type, ros_root, port, sigterm_timeout=1).sigterm_timeout, 1) + self.assertRaises(RLException, create_master_process, run_id, type, ros_root, port, sigterm_timeout=0) + # TODO: have to think more as to the correct environment for the master process @@ -164,6 +172,16 @@ def test_create_node_process(self): n.cwd = 'node' self.assertEquals(create_node_process(run_id, n, master_uri).cwd, 'node') + # sigint timeout + self.assertEquals(create_node_process(run_id, n, master_uri).sigint_timeout, 15) + self.assertEquals(create_node_process(run_id, n, master_uri, sigint_timeout=1).sigint_timeout, 1) + self.assertRaises(RLException, create_node_process, run_id, n, master_uri, sigint_timeout=0) + + # sigterm timeout + self.assertEquals(create_node_process(run_id, n, master_uri).sigterm_timeout, 2) + self.assertEquals(create_node_process(run_id, n, master_uri, sigterm_timeout=1).sigterm_timeout, 1) + self.assertRaises(RLException, create_node_process, run_id, n, master_uri, sigterm_timeout=0) + # test args # - simplest test (no args) @@ -201,7 +219,69 @@ def test_create_node_process(self): self.failIf('SUB_TEST' in p.args) self.assert_('foo' in p.args) self.assert_('subtest' in p.args) - self.assert_('subtest2' in p.args) + self.assert_('subtest2' in p.args) + + def test_local_process_stop_timeouts(self): + from roslaunch.core import Node, Machine + + # have to use real ROS configuration for these tests + ros_root = os.environ['ROS_ROOT'] + rpp = os.environ.get('ROS_PACKAGE_PATH', None) + master_uri = 'http://masteruri:1234' + m = Machine('name1', ros_root, rpp, '1.2.3.4') + + run_id = 'id' + + n = Node('roslaunch', 'signal_logger.py') + n.name = 'logger' + n.machine = m + self.check_stop_timeouts(master_uri, n, run_id, 1.0, 1.0) + self.check_stop_timeouts(master_uri, n, run_id, 0.00001, 1.0) + # shorter sigterm times are risky in the test - the signal file might not get written; but in the wild, it's ok + self.check_stop_timeouts(master_uri, n, run_id, 1.0, 0.001) + self.check_stop_timeouts(master_uri, n, run_id, 2.0, 3.0) + + def check_stop_timeouts(self, master_uri, n, run_id, sigint_timeout, sigterm_timeout): + from roslaunch.nodeprocess import create_node_process, LocalProcess + + import time + import tempfile + import signal + + signal_log_file = os.path.join(tempfile.gettempdir(), "signal.log") + + try: + os.remove(signal_log_file) + except OSError: + pass + + p = create_node_process(run_id, n, master_uri, sigint_timeout=sigint_timeout, sigterm_timeout=sigterm_timeout) + self.assert_(isinstance(p, LocalProcess)) + + p.start() + time.sleep(3) # give it time to start + + before_stop_call_time = time.time() + p.stop() + after_stop_call_time = time.time() + + signals = dict() + + try: + with open(signal_log_file, 'r') as f: + lines = f.readlines() + for line in lines: + sig, timestamp = line.split(" ") + sig = int(sig) + timestamp = float(timestamp) + signals[sig] = timestamp + except IOError: + self.fail("Could not open %s" % signal_log_file) + + self.assertSetEqual({signal.SIGINT, signal.SIGTERM}, set(signals.keys())) + self.assertAlmostEqual(before_stop_call_time, signals[signal.SIGINT], delta=1) + self.assertAlmostEqual(before_stop_call_time, signals[signal.SIGTERM] - sigint_timeout, delta=1) + self.assertAlmostEqual(before_stop_call_time, after_stop_call_time - sigint_timeout - sigterm_timeout, delta=1) def test__cleanup_args(self): # #1595 diff --git a/tools/roslaunch/test/unit/test_roslaunch_parent.py b/tools/roslaunch/test/unit/test_roslaunch_parent.py --- a/tools/roslaunch/test/unit/test_roslaunch_parent.py +++ b/tools/roslaunch/test/unit/test_roslaunch_parent.py @@ -236,6 +236,89 @@ def ftrue(): p.shutdown() self.assert_(pmon.is_shutdown) + +## Test sigint_timeout and sigterm_timeout +# We need real ProcessMonitor here, and we need to spawn real threads +class TestRoslaunchTimeouts(unittest.TestCase): + def setUp(self): + from roslaunch.pmon import ProcessMonitor + self.pmon = ProcessMonitor() + + def test_roslaunchTimeouts(self): + try: + self._subtestTimeouts() + finally: + self.pmon.shutdown() + + def _subtestTimeouts(self): + from roslaunch.parent import ROSLaunchParent + from roslaunch.server import ROSLaunchParentNode + import signal + import tempfile + from threading import Thread + + pmon = self.pmon + pmon.start() + try: + # if there is a core up, we have to use its run id + run_id = get_param('/run_id') + except: + run_id = 'test-rl-parent-timeout-%s' % time.time() + + rl_dir = rospkg.RosPack().get_path('roslaunch') + rl_file = os.path.join(rl_dir, 'resources', 'timeouts.launch') + + sigint_timeout = 2 + sigterm_timeout = 3 + + p = ROSLaunchParent(run_id, [rl_file], is_core=False, port=11312, local_only=True, + sigint_timeout=sigint_timeout, sigterm_timeout=sigterm_timeout) + p._load_config() + p.pm = pmon + p.server = ROSLaunchParentNode(p.config, pmon) + + signal_log_file = os.path.join(tempfile.gettempdir(), "signal.log") + try: + os.remove(signal_log_file) + except OSError: + pass + + def kill_launch(times): + time.sleep(3) # give it time to start + + times.append(time.time()) + p.shutdown() + times.append(time.time()) + + p.start() + + times = [] + t = Thread(target=kill_launch, args=(times,)) + t.start() + + p.spin() + t.join() + + before_stop_call_time, after_stop_call_time = times + + signals = dict() + try: + with open(signal_log_file, 'r') as f: + lines = f.readlines() + for line in lines: + sig, timestamp = line.split(" ") + sig = int(sig) + timestamp = float(timestamp) + signals[sig] = timestamp + except IOError: + self.fail("Could not open %s" % signal_log_file) + + self.assertSetEqual({signal.SIGINT, signal.SIGTERM}, set(signals.keys())) + self.assertAlmostEqual(before_stop_call_time, signals[signal.SIGINT], delta=1.0) + self.assertAlmostEqual(before_stop_call_time, signals[signal.SIGTERM] - sigint_timeout, delta=1) + self.assertAlmostEqual(before_stop_call_time, after_stop_call_time - sigint_timeout - sigterm_timeout, delta=1) + + def kill_parent(p, delay=1.0): # delay execution so that whatever pmon method we're calling has time to enter time.sleep(delay) diff --git a/tools/roslaunch/test/xml/manual-test-remote-timeouts.launch b/tools/roslaunch/test/xml/manual-test-remote-timeouts.launch new file mode 100644 --- /dev/null +++ b/tools/roslaunch/test/xml/manual-test-remote-timeouts.launch @@ -0,0 +1,14 @@ +<launch> + + <!-- To be used with roslaunch/test/manual-test-remote-timeouts.sh --> + + <arg name="address" /> + <arg name="env_loader" /> + <arg name="user" /> + <arg name="timeout" default="10.0" /> + + <machine name="remote" address="$(arg address)" user="$(arg user)" env-loader="$(arg env_loader)" timeout="$(arg timeout)" /> + + <node name="signal_logger" pkg="roslaunch" type="signal_logger.py" machine="remote" output="screen" /> + +</launch> diff --git a/utilities/xmlrpcpp/test/CMakeLists.txt b/utilities/xmlrpcpp/test/CMakeLists.txt --- a/utilities/xmlrpcpp/test/CMakeLists.txt +++ b/utilities/xmlrpcpp/test/CMakeLists.txt @@ -104,16 +104,18 @@ if(NOT WIN32) ../src/XmlRpcSocket.cpp ../src/XmlRpcUtil.cpp ) - if(APPLE) - set_target_properties(test_socket PROPERTIES - LINK_FLAGS - "-Wl,-alias,___wrap_accept,_accept -Wl,-alias,___wrap_bind,_bind -Wl,-alias,___wrap_close,_close -Wl,-alias,___wrap_connect,_connect -Wl,-alias,___wrap_getaddrinfo,_getaddrinfo -Wl,-alias,___wrap_getsockname,_getsockname -Wl,-alias,___wrap_listen,_listen -Wl,-alias,___wrap_read,_read -Wl,-alias,___wrap_setsockopt,_setsockopt -Wl,-alias,___wrap_select,_select -Wl,-alias,___wrap_select,_select$1050 -Wl,-alias,___wrap_socket,_socket -Wl,-alias,___wrap_write,_write -Wl,-alias,___wrap_fcntl,_fcntl -Wl,-alias,___wrap_freeaddrinfo,_freeaddrinfo" - ) - elseif(UNIX) - set_target_properties(test_socket PROPERTIES - LINK_FLAGS - "-Wl,--wrap=accept -Wl,--wrap=bind -Wl,--wrap=close -Wl,--wrap=connect -Wl,--wrap=getaddrinfo -Wl,--wrap=getsockname -Wl,--wrap=listen -Wl,--wrap=read -Wl,--wrap=setsockopt -Wl,--wrap=select -Wl,--wrap=socket -Wl,--wrap=write -Wl,--wrap=fcntl -Wl,--wrap=freeaddrinfo" - ) + if(TARGET test_socket) + if(APPLE) + set_target_properties(test_socket PROPERTIES + LINK_FLAGS + "-Wl,-alias,___wrap_accept,_accept -Wl,-alias,___wrap_bind,_bind -Wl,-alias,___wrap_close,_close -Wl,-alias,___wrap_connect,_connect -Wl,-alias,___wrap_getaddrinfo,_getaddrinfo -Wl,-alias,___wrap_getsockname,_getsockname -Wl,-alias,___wrap_listen,_listen -Wl,-alias,___wrap_read,_read -Wl,-alias,___wrap_setsockopt,_setsockopt -Wl,-alias,___wrap_select,_select -Wl,-alias,___wrap_select,_select$1050 -Wl,-alias,___wrap_socket,_socket -Wl,-alias,___wrap_write,_write -Wl,-alias,___wrap_fcntl,_fcntl -Wl,-alias,___wrap_freeaddrinfo,_freeaddrinfo" + ) + elseif(UNIX) + set_target_properties(test_socket PROPERTIES + LINK_FLAGS + "-Wl,--wrap=accept -Wl,--wrap=bind -Wl,--wrap=close -Wl,--wrap=connect -Wl,--wrap=getaddrinfo -Wl,--wrap=getsockname -Wl,--wrap=listen -Wl,--wrap=read -Wl,--wrap=setsockopt -Wl,--wrap=select -Wl,--wrap=socket -Wl,--wrap=write -Wl,--wrap=fcntl -Wl,--wrap=freeaddrinfo" + ) + endif() endif() endif()
Function recv_buff(sock, b, buff_size) when the remote end is closed, resulting in abnormal reconnection. When I use rosbridge, after I send a message, I close the client connected through rosbridge, which will result in `d = sock.recv(buff_size)` After receiving an empty string. ```python def recv(self, buffersize, flags=None): # real signature unknown; restored from __doc__ """ recv(buffersize[, flags]) -> data Receive up to buffersize bytes from the socket. For the optional flags argument, see the Unix manual. When no data is available, block until at least one byte is available or until the remote end is closed. When the remote end is closed and all data is read, return the empty string. """ pass ``` An exception is thrown, and then the socket is reconnected. Finally, the waiting time exceeds 32S. It is difficult to connect, and the port will be wasted when reconnecting. I refer to the C + + code and find that the processing logic is different. ```cpp int32_t TransportTCP::read(uint8_t* buffer, uint32_t size) { { boost::recursive_mutex::scoped_lock lock(close_mutex_); if (closed_) { ROSCPP_LOG_DEBUG("Tried to read on a closed socket [%d]", sock_); return -1; } } ROS_ASSERT(size > 0); // never read more than INT_MAX since this is the maximum we can report back with the current return type uint32_t read_size = std::min(size, static_cast<uint32_t>(INT_MAX)); int num_bytes = ::recv(sock_, reinterpret_cast<char*>(buffer), read_size, 0); if (num_bytes < 0) { if ( !last_socket_error_is_would_block() ) // !WSAWOULDBLOCK / !EAGAIN && !EWOULDBLOCK { ROSCPP_LOG_DEBUG("recv() on socket [%d] failed with error [%s]", sock_, last_socket_error_string()); close(); } else { num_bytes = 0; } } else if (num_bytes == 0) { ROSCPP_LOG_DEBUG("Socket [%d] received 0/%u bytes, closing", sock_, size); close(); return -1; } return num_bytes; } ``` C + + receives zero and closes the connection directly instead of reconnecting. Can we consider closing it, like this: ```python # add break here if str(e) == "unable to receive data from sender, check sender's logs for details": break ``` ```python def receive_loop(self, msgs_callback): """ Receive messages until shutdown @param msgs_callback: callback to invoke for new messages received @type msgs_callback: fn([msg]) """ # - use assert here as this would be an internal error, aka bug logger.debug("receive_loop for [%s]", self.name) try: while not self.done and not is_shutdown(): try: if self.socket is not None: msgs = self.receive_once() if not self.done and not is_shutdown(): msgs_callback(msgs, self) else: self._reconnect() except TransportException as e: # set socket to None so we reconnect try: if self.socket is not None: try: self.socket.shutdown() except: pass finally: self.socket.close() except: pass self.socket = None # add break here if str(e) == "unable to receive data from sender, check sender's logs for details": break # pass except DeserializationError as e: #TODO: how should we handle reconnect in this case? logerr("[%s] error deserializing incoming request: %s"%self.name, str(e)) rospyerr("[%s] error deserializing incoming request: %s"%self.name, traceback.format_exc()) except: # in many cases this will be a normal hangup, but log internally try: #1467 sometimes we get exceptions due to #interpreter shutdown, so blanket ignore those if #the reporting fails rospydebug("exception in receive loop for [%s], may be normal. Exception is %s",self.name, traceback.format_exc()) except: pass rospydebug("receive_loop[%s]: done condition met, exited loop"%self.name) finally: if not self.done: self.close() ``` The test is OK after I modify it
this sounds reasonable to me. since transport is terminated with 'TransportTerminated' we would not want to reconnect. (in roscpp, if that is neither `EAGAIN` nor `EWOULDBLOCK`, it is closed and done.) > if str(e) == "unable to receive data from sender, check sender's logs for details": instead of that, i would add except catch statement `TransportTerminated` before `TransportException`. @fujitatomoya Thank you. After you fix it, I will pull the code again. I like ROS and hope that the UDP part of ros1 can be updated later.
2022-01-06T18:44:15Z
[]
[]
ros/ros_comm
2,286
ros__ros_comm-2286
[ "2282" ]
dd78ac8af128bb8eb992d6431bb9f994658ea6ab
diff --git a/tools/rosbag/src/rosbag/bag.py b/tools/rosbag/src/rosbag/bag.py --- a/tools/rosbag/src/rosbag/bag.py +++ b/tools/rosbag/src/rosbag/bag.py @@ -2472,6 +2472,7 @@ def reindex(self): if trunc_pos and trunc_pos < total_bytes: f.truncate(trunc_pos) + f.seek(trunc_pos) def _reindex_read_chunk(self, f, chunk_pos, total_bytes): # Read the chunk header
diff --git a/test/test_rosbag/test/test_bag.py b/test/test_rosbag/test/test_bag.py --- a/test/test_rosbag/test/test_bag.py +++ b/test/test_rosbag/test/test_bag.py @@ -261,6 +261,57 @@ def test_reindex_works(self): msgs = list(rosbag.Bag(reindex_filename, 'r')) + def _read_all_records(self, bag_path): + """ + Throw exception if malformed records found, otherwise return number of records in file. + """ + with rosbag.Bag(bag_path, 'r') as b: + file_header_pos = b._file_header_pos + + total_records_read = 0 + with open(bag_path, 'r+b') as f: + f.seek(0, os.SEEK_END) + total_bytes = f.tell() + f.seek(file_header_pos) + while f.tell() < total_bytes: + bag._read_sized(f) + total_records_read += 1 + + return total_records_read + + def test_reindex_leaves_valid_file(self): + orig_filename = '/tmp/test_reindex_leaves_valid_file.bag' + chunk_threshold = 1024 + with rosbag.Bag(orig_filename, 'w', chunk_threshold=chunk_threshold) as b: + for i in range(100): + for j in range(5): + msg = Int32() + msg.data = i + b.write('/topic%d' % j, msg) + file_header_pos = b._file_header_pos + + trunc_filename = '%s.trunc%s' % os.path.splitext(orig_filename) + reindex_filename = '%s.reindex%s' % os.path.splitext(orig_filename) + + num_original_records = self._read_all_records(orig_filename) + + shutil.copy(orig_filename, trunc_filename) + with open(trunc_filename, 'r+b') as f: + # truncating to arbitrary length that is in the middle of a record + f.seek(0, os.SEEK_END) + total_bytes = f.tell() + f.seek(0) + f.truncate(int(total_bytes / 2)) + with self.assertRaises(rosbag.ROSBagException): + self._read_all_records(trunc_filename) + + shutil.copy(trunc_filename, reindex_filename) + with rosbag.Bag(reindex_filename, 'a', allow_unindexed=True) as b: + for _ in b.reindex(): + pass + num_reindexed_records = self._read_all_records(reindex_filename) + + def test_future_version_works(self): fn = '/tmp/test_future_version_2.1.bag'
rosbag reindex outputs invalid bags when run on truncated files The rosbag reindex command is documented as "repairing broken bag files", but when run on a broken bag file it will output a bag that is indexed, but illegal per the ROS bag spec. This script can be used to demonstrate the issue: https://gist.github.com/wkalt/836cd80f1c6a9b0e87e5e24771c7b8e0 Steps to reproduce: ``` wget https://assets.foxglove.dev/demo.bag truncate -s 30000000 demo.bag rosbag reindex demo.bag // using go program above $ go run main.go demo.bag 2022/09/27 09:18:10 got zero length header at offset 29578569 exit status 1 ``` The resulting bag violates the spec in that it is not a sequence of back to back `<header length><header><data length><data>` records, with each header containing an "op" key, as the bag spec mandates. However, ROS tooling accepts the bag due to reliance on the message index, which is well-formed. The index simply does not point at the junk data present in the file, which occurs after the final chunk and its message indexes. This causes problems for tooling that does not use the index and instead performs a linear scan. It is possible this issue does not present if you are lucky enough to truncate the file on a message boundary.
2022-09-30T01:49:19Z
[]
[]
getpelican/pelican
199
getpelican__pelican-199
[ "153" ]
2de789325f3c3cd6224e71da70b7649ac6bbf74c
diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -1,5 +1,6 @@ import argparse import os, sys +import re import time from pelican.generators import (ArticlesGenerator, PagesGenerator, @@ -26,6 +27,42 @@ def __init__(self, settings=None, path=None, theme=None, output_path=None, if self.path.endswith('/'): self.path = self.path[:-1] + if settings.get('CLEAN_URLS', False): + log.warning('Found deprecated `CLEAN_URLS` in settings. Modifing' + ' the following settings for the same behaviour.') + + settings['ARTICLE_URL'] = '{slug}/' + settings['ARTICLE_LANG_URL'] = '{slug}-{lang}/' + settings['PAGE_URL'] = 'pages/{slug}/' + settings['PAGE_LANG_URL'] = 'pages/{slug}-{lang}/' + + for setting in ('ARTICLE_URL', 'ARTICLE_LANG_URL', 'PAGE_URL', + 'PAGE_LANG_URL'): + log.warning("%s = '%s'" % (setting, settings[setting])) + + if settings.get('ARTICLE_PERMALINK_STRUCTURE', False): + log.warning('Found deprecated `ARTICLE_PERMALINK_STRUCTURE` in' + ' settings. Modifing the following settings for' + ' the same behaviour.') + + structure = settings['ARTICLE_PERMALINK_STRUCTURE'] + + # Convert %(variable) into {variable}. + structure = re.sub('%\((\w+)\)s', '{\g<1>}', structure) + + # Convert %x into {date:%x} for strftime + structure = re.sub('(%[A-z])', '{date:\g<1>}', structure) + + # Strip a / prefix + structure = re.sub('^/', '', structure) + + for setting in ('ARTICLE_URL', 'ARTICLE_LANG_URL', 'PAGE_URL', + 'PAGE_LANG_URL', 'ARTICLE_SAVE_AS', + 'ARTICLE_LANG_SAVE_AS', 'PAGE_SAVE_AS', + 'PAGE_LANG_SAVE_AS'): + settings[setting] = os.path.join(structure, settings[setting]) + log.warning("%s = '%s'" % (setting, settings[setting])) + # define the default settings self.settings = settings self.theme = theme or settings['THEME'] diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -24,6 +24,7 @@ def __init__(self, content, metadata=None, settings=None, if not settings: settings = _DEFAULT_CONFIG + self.settings = settings self._content = content self.translations = [] @@ -37,9 +38,9 @@ def __init__(self, content, metadata=None, settings=None, # default author to the one in settings if not defined if not hasattr(self, 'author'): if 'AUTHOR' in settings: - self.author = settings['AUTHOR'] + self.author = Author(settings['AUTHOR'], settings) else: - self.author = getenv('USER', 'John Doe') + self.author = Author(getenv('USER', 'John Doe'), settings) warning(u"Author of `{0}' unknow, assuming that his name is `{1}'".format(filename or self.title, self.author)) # manage languages @@ -55,29 +56,6 @@ def __init__(self, content, metadata=None, settings=None, if not hasattr(self, 'slug') and hasattr(self, 'title'): self.slug = slugify(self.title) - # create save_as from the slug (+lang) - if not hasattr(self, 'save_as') and hasattr(self, 'slug'): - if self.in_default_lang: - if settings.get('CLEAN_URLS', False): - self.save_as = '%s/index.html' % self.slug - else: - self.save_as = '%s.html' % self.slug - - clean_url = '%s/' % self.slug - else: - if settings.get('CLEAN_URLS', False): - self.save_as = '%s-%s/index.html' % (self.slug, self.lang) - else: - self.save_as = '%s-%s.html' % (self.slug, self.lang) - - clean_url = '%s-%s/' % (self.slug, self.lang) - - # change the save_as regarding the settings - if settings.get('CLEAN_URLS', False): - self.url = clean_url - elif hasattr(self, 'save_as'): - self.url = self.save_as - if filename: self.filename = filename @@ -115,6 +93,30 @@ def check_properties(self): if not hasattr(self, prop): raise NameError(prop) + @property + def url_format(self): + return { + 'slug': getattr(self, 'slug', ''), + 'lang': getattr(self, 'lang', 'en'), + 'date': getattr(self, 'date', datetime.datetime.now()), + 'author': self.author, + 'category': getattr(self, 'category', 'misc'), + } + + @property + def url(self): + if self.in_default_lang: + return self.settings.get('PAGE_URL', u'pages/{slug}.html').format(**self.url_format) + + return self.settings.get('PAGE_LANG_URL', u'pages/{slug}-{lang}.html').format(**self.url_format) + + @property + def save_as(self): + if self.in_default_lang: + return self.settings.get('PAGE_SAVE_AS', u'pages/{slug}.html').format(**self.url_format) + + return self.settings.get('PAGE_LANG_SAVE_AS', u'pages/{slug}-{lang}.html').format(**self.url_format) + @property def content(self): if hasattr(self, "_get_content"): @@ -138,10 +140,74 @@ def _set_summary(self, summary): class Article(Page): mandatory_properties = ('title', 'date', 'category') + @property + def url(self): + if self.in_default_lang: + return self.settings.get('ARTICLE_URL', u'{slug}.html').format(**self.url_format) + + return self.settings.get('ARTICLE_LANG_URL', u'{slug}-{lang}.html').format(**self.url_format) + + @property + def save_as(self): + if self.in_default_lang: + return self.settings.get('ARTICLE_SAVE_AS', u'{slug}.html').format(**self.url_format) + + return self.settings.get('ARTICLE_LANG_SAVE_AS', u'{slug}-{lang}.html').format(**self.url_format) + class Quote(Page): base_properties = ('author', 'date') +class URLWrapper(object): + def __init__(self, name, settings): + self.name = unicode(name) + self.settings = settings + + def __hash__(self): + return hash(self.name) + + def __eq__(self, other): + return self.name == unicode(other) + + def __str__(self): + return str(self.name) + + def __unicode__(self): + return self.name + + @property + def url(self): + return '%s.html' % self.name + +class Category(URLWrapper): + @property + def url(self): + return self.settings.get('CATEGORY_URL', u'category/{name}.html').format(name=self.name) + + @property + def save_as(self): + return self.settings.get('CATEGORY_SAVE_AS', u'category/{name}.html').format(name=self.name) + +class Tag(URLWrapper): + def __init__(self, name, *args, **kwargs): + super(Tag, self).__init__(unicode.strip(name), *args, **kwargs) + + @property + def url(self): + return self.settings.get('TAG_URL', u'tag/{name}.html').format(name=self.name) + + @property + def save_as(self): + return self.settings.get('TAG_SAVE_AS', u'tag/{name}.html').format(name=self.name) + +class Author(URLWrapper): + @property + def url(self): + return self.settings.get('AUTHOR_URL', u'author/{name}.html').format(name=self.name) + + @property + def save_as(self): + return self.settings.get('AUTHOR_SAVE_AS', u'author/{name}.html').format(name=self.name) def is_valid_content(content, f): try: diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -13,7 +13,7 @@ from jinja2 import Environment, FileSystemLoader, PrefixLoader, ChoiceLoader from jinja2.exceptions import TemplateNotFound -from pelican.contents import Article, Page, is_valid_content +from pelican.contents import Article, Page, Category, is_valid_content from pelican.log import * from pelican.readers import read_file from pelican.utils import copy, process_translations, open @@ -179,26 +179,26 @@ def generate_pages(self, writer): for tag, articles in self.tags.items(): articles.sort(key=attrgetter('date'), reverse=True) dates = [article for article in self.dates if article in articles] - write('tag/%s.html' % tag, tag_template, self.context, tag=tag, + write(tag.save_as, tag_template, self.context, tag=tag, articles=articles, dates=dates, paginated={'articles': articles, 'dates': dates}, - page_name='tag/%s' % tag) + page_name=u'tag/%s' % tag) category_template = self.get_template('category') for cat, articles in self.categories: dates = [article for article in self.dates if article in articles] - write('category/%s.html' % cat, category_template, self.context, + write(cat.save_as, category_template, self.context, category=cat, articles=articles, dates=dates, paginated={'articles': articles, 'dates': dates}, - page_name='category/%s' % cat) + page_name=u'category/%s' % cat) author_template = self.get_template('author') for aut, articles in self.authors: dates = [article for article in self.dates if article in articles] - write('author/%s.html' % aut, author_template, self.context, + write(aut.save_as, author_template, self.context, author=aut, articles=articles, dates=dates, paginated={'articles': articles, 'dates': dates}, - page_name='author/%s' % aut) + page_name=u'author/%s' % aut) for article in self.drafts: write('drafts/%s.html' % article.slug, article_template, self.context, @@ -212,7 +212,6 @@ def generate_context(self): files = self.get_files(self.path, exclude=['pages',]) all_articles = [] for f in files: - try: content, metadata = read_file(f, settings=self.settings) except Exception, e: @@ -228,7 +227,7 @@ def generate_context(self): category = os.path.basename(os.path.dirname(f)).decode('utf-8') if category != '': - metadata['category'] = unicode(category) + metadata['category'] = Category(category, self.settings) if 'date' not in metadata.keys()\ and self.settings['FALLBACK_ON_FS_DATE']: @@ -239,21 +238,6 @@ def generate_context(self): if not is_valid_content(article, f): continue - add_to_url = u'' - if 'ARTICLE_PERMALINK_STRUCTURE' in self.settings: - article_permalink_structure = self.settings['ARTICLE_PERMALINK_STRUCTURE'] - article_permalink_structure = article_permalink_structure.lstrip('/').replace('%(', "%%(") - - # try to substitute any python datetime directive - add_to_url = article.date.strftime(article_permalink_structure) - # try to substitute any article metadata in rest file - add_to_url = add_to_url % article.__dict__ - add_to_url = [slugify(i) for i in add_to_url.split('/')] - add_to_url = os.path.join(*add_to_url) - - article.url = urlparse.urljoin(add_to_url, article.url) - article.save_as = urlparse.urljoin(add_to_url, article.save_as) - if article.status == "published": if hasattr(article, 'tags'): for tag in article.tags: @@ -348,7 +332,7 @@ def generate_context(self): def generate_output(self, writer): for page in chain(self.translations, self.pages): - writer.write_file('pages/%s' % page.save_as, self.get_template('page'), + writer.write_file(page.save_as, self.get_template('page'), self.context, page=page, relative_urls = self.settings.get('RELATIVE_URLS')) diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -15,25 +15,30 @@ Markdown = False import re +from pelican.contents import Category, Tag, Author, URLWrapper from pelican.utils import get_date, open _METADATA_PROCESSORS = { - 'tags': lambda x: map(unicode.strip, unicode(x).split(',')), - 'date': lambda x: get_date(x), - 'status': unicode.strip, + 'tags': lambda x, y: [Tag(tag, y) for tag in unicode(x).split(',')], + 'date': lambda x, y: get_date(x), + 'status': lambda x,y: unicode.strip(x), + 'category': Category, + 'author': Author, } -def _process_metadata(name, value): - if name.lower() in _METADATA_PROCESSORS: - return _METADATA_PROCESSORS[name.lower()](value) - return value - - class Reader(object): enabled = True extensions = None + def __init__(self, settings): + self.settings = settings + + def process_metadata(self, name, value): + if name.lower() in _METADATA_PROCESSORS: + return _METADATA_PROCESSORS[name.lower()](value, self.settings) + return value + class _FieldBodyTranslator(HTMLTranslator): def astext(self): @@ -51,29 +56,25 @@ def render_node_to_html(document, node): node.walkabout(visitor) return visitor.astext() -def get_metadata(document): - """Return the dict containing document metadata""" - output = {} - for docinfo in document.traverse(docutils.nodes.docinfo): - for element in docinfo.children: - if element.tagname == 'field': # custom fields (e.g. summary) - name_elem, body_elem = element.children - name = name_elem.astext() - value = render_node_to_html(document, body_elem) - else: # standard fields (e.g. address) - name = element.tagname - value = element.astext() - - output[name] = _process_metadata(name, value) - return output - - class RstReader(Reader): enabled = bool(docutils) extension = "rst" def _parse_metadata(self, document): - return get_metadata(document) + """Return the dict containing document metadata""" + output = {} + for docinfo in document.traverse(docutils.nodes.docinfo): + for element in docinfo.children: + if element.tagname == 'field': # custom fields (e.g. summary) + name_elem, body_elem = element.children + name = name_elem.astext() + value = render_node_to_html(document, body_elem) + else: # standard fields (e.g. address) + name = element.tagname + value = element.astext() + + output[name] = self.process_metadata(name, value) + return output def _get_publisher(self, filename): extra_params = {'initial_header_level': '2'} @@ -110,7 +111,7 @@ def read(self, filename): metadata = {} for name, value in md.Meta.items(): name = name.lower() - metadata[name] = _process_metadata(name, value[0]) + metadata[name] = self.process_metadata(name, value[0]) return content, metadata @@ -126,7 +127,7 @@ def read(self, filename): key = i.split(':')[0][5:].strip() value = i.split(':')[-1][:-3].strip() name = key.lower() - metadata[name] = _process_metadata(name, value) + metadata[name] = self.process_metadata(name, value) return content, metadata @@ -140,7 +141,7 @@ def read_file(filename, fmt=None, settings=None): fmt = filename.split('.')[-1] if fmt not in _EXTENSIONS.keys(): raise TypeError('Pelican does not know how to parse %s' % filename) - reader = _EXTENSIONS[fmt]() + reader = _EXTENSIONS[fmt](settings) settings_key = '%s_EXTENSIONS' % fmt.upper() if settings and settings_key in settings: reader.extensions = settings[settings_key] diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -26,7 +26,14 @@ 'REVERSE_ARCHIVE_ORDER': False, 'REVERSE_CATEGORY_ORDER': False, 'DELETE_OUTPUT_DIRECTORY': False, - 'CLEAN_URLS': False, # use /blah/ instead /blah.html in urls + 'ARTICLE_URL': '{slug}.html', + 'ARTICLE_SAVE_AS': '{slug}.html', + 'ARTICLE_LANG_URL': '{slug}-{lang}.html', + 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html', + 'PAGE_URL': 'pages/{slug}.html', + 'PAGE_SAVE_AS': 'pages/{slug}.html', + 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html', + 'PAGE_LANG_SAVE_AS': 'pages/{slug}-{lang}.html', 'RELATIVE_URLS': True, 'DEFAULT_LANG': 'en', 'TAG_CLOUD_STEPS': 4,
diff --git a/tests/test_contents.py b/tests/test_contents.py --- a/tests/test_contents.py +++ b/tests/test_contents.py @@ -60,12 +60,12 @@ def test_save_as(self): """ # if a title is defined, save_as should be set page = Page(**self.page_kwargs) - page.save_as = 'foo-bar.html' + self.assertEqual(page.save_as, "pages/foo-bar.html") # if a language is defined, save_as should include it accordingly self.page_kwargs['metadata'].update({'lang': 'fr', }) page = Page(**self.page_kwargs) - self.assertEqual(page.save_as, "foo-bar-fr.html") + self.assertEqual(page.save_as, "pages/foo-bar-fr.html") def test_datetime(self): """If DATETIME is set to a tuple, it should be used to override LOCALE diff --git a/tests/test_readers.py b/tests/test_readers.py --- a/tests/test_readers.py +++ b/tests/test_readers.py @@ -19,7 +19,7 @@ def _filename(*args): class RstReaderTest(unittest2.TestCase): def test_article_with_metadata(self): - reader = readers.RstReader() + reader = readers.RstReader({}) content, metadata = reader.read(_filename('article_with_metadata.rst')) expected = { 'category': 'yeah',
clean urls feature without needs a proxy Hi Alexis, I have already developed the CLEAN_URLS_NO_PROXY option. These are my commits: *Fixed categories urls and save_as filename. Also, I've move the url generator snippet into a new local function called 'generate_urls'. Issue #147 - Cleaning pages url and category items into the menu. Issue #147 - Keeping the simple theme up-to-date for using CLEAN_URLS_NO_PROXY option. Issue #147 - Cleaning the archives, categories, and tags urls. Issue #145 - Including documentation about the CLEAN_URLS_NO_PROXY option. Issue #147 Hope you find it useful and looking forward to your feedback :-) Now, it's time to sleep for me :-D Thanks and best regards, Manuel Viera.
I already [have a branch](https://github.com/borgar/pelican/compare/configurable-urls) that solves <del>all</del><ins>some</ins> of these, and a few more related issues, with a lot less code. It clashes with this on a few design issues. I think it would be best if we join our efforts here. Anyway, here are some thoughts on this pull, and the direction this is going: I think CLEAN_URLS_NO_PROXY doesn't make any sense. Either the user has set preferred permalink structure or he hasn't. If the user has set up a permalink structure, why should he _also_ need to specify that he wants the permalinks to work? Why `article.author_url`? Why not `article.author.url`? Same goes for tags. I think it would be better if tags and authors inherited from a simple class that has save_as/url resolution methods. _Note: This is not solved on my branch._ Also related: I think `ARTICLE_PERMALINK_STRUCTURE` is a bad setting name. Why not just `ARTICLE_URL` (which is consistent with the feed URL settings). I want to be able to separately configure tags, archives, pages... `CATEGORY_URL` is way nicer than `CATEGORY_PERMALINK_STRUCTURE` especially when you have 6 of these. ;-) I don't like how it is assumed that I always want the slug to come last. In fact, on my branch the code is simpler because I have removed this limit. However, I have decided to use a Jekyl style URL templates, rather than native Python style. My opinion is that while the Python style ones are more flexible (and faster?), they will cause more problems in the long run because when user does this: `PAGE_URL = "/%(lang)s/%(slug)s/"` then Pelican will crash if a page doesn't have `lang` set. "I think CLEAN_URLS_NO_PROXY doesn't make any sense. Either the user has set preferred permalink structure or he hasn't. If the user has set up a permalink structure, why should he also need to specify that he wants the permalinks to work?" → This was mainly done for compatibility reasons, I didn't wanted pelican to change it's behaviour in new versions to keep old configuration working. After giving it some thoughts, I finally think that's probably better to break compatibility in this version and put a warning in the release note. About `article.author.url`, and URL resolution in general, it's a problem on its own, as we discussed on the irc channel. We should probably go for something like in flask or django, or any other framework, which have an url resolution mechanism and an `url_for` function passed to the templates. This would allow us to define the URL scheme in one place, and easily. This does not have to be as complicated as in django/flask but that's IMO the way to go. With this in mind, `article.author_url` sounds good to me to temporary solve the problem. You solution using objects is good as well but does not really resolve it completely (what about pages? what about categories? etc.) +1 on `ARTICLE_URL`. About the Jekyl way to specify URLs, you probably can use a defaultdict or something similar to avoid raising an exception when stuff is not defined. Anyway, I'm +1 on having you both working on this. I was looking at the issues in the issue list and I see there is a duplicate between this and issue #147. Shouldn't these be merged? Any news on this one? Both of your solutions seems good to me, I thik what seems the best is to take some implementation ideas from @borgar and update the pull request @mviera made What do you think? I did a really dirty prototype of a Django-like `url` tag where any item can be passed into the tag. I'm not sold on this method, mostly because Pelican doesn't have "views" so the API is really strange. The templates get a whole bunch of redundant stuff like this: ``` <a href="{% url "article" article %}">... <a href="{% url "tag" tag %}">... ``` My guess is that Django started to move away from this and pushing the `{{ article.get_absolute_url }}` methods because of this redundancy. :-) We could also implement this as a filter: ``` <a href="{% article|url %}">... ``` But it seems to me like we might as well just attach an URL property to any of the objects that require them and make sure that their `__str__` (or whatever Jinja is using) is the same as before (so doing `{{ tag }}` would still just print `"mytag"`). This would mean backwards compatibility for the themes until people can update their themes with the new property: `{{ tag.url }}` I've also changed my mind on the formatting issue. I really like the full control over the date format and think we should use @mviera style URL strings. I'll see if I can dedicate some time to work on merging what we have. I definitely like `{{ obj.url }}` but the problem I can see is that currently, `{{ obj.url }}` is returning the last bit of the url (see https://github.com/ametaireau/pelican/blob/master/pelican/themes/notmyidea/templates/base.html#L34) and not the full path to it, so that would change it's behaviour and thus make it backward incompatible. Or, we can, in the new version of pelican, use an url filter, which is taking care of all this, keeping the `url` attribute as is for now, and having internally another `full_url` attribute, for instance. That's nice you can find some time to work in this :) Not a fan of using tags to retrieve URLs. +1 on just having `.url` properties.
2011-12-24T00:54:05Z
[]
[]
getpelican/pelican
262
getpelican__pelican-262
[ "177" ]
352a67d162e9bb9b94d466774aafdf3291e4b565
diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -22,6 +22,7 @@ 'STATIC_PATHS': ['images', ], 'THEME_STATIC_PATHS': ['static', ], 'FEED': 'feeds/all.atom.xml', + 'FEED_MAIN_URL': 'feeds/all.atom.xml', 'CATEGORY_FEED': 'feeds/%s.atom.xml', 'TRANSLATION_FEED': 'feeds/all-%s.atom.xml', 'FEED_MAX_ITEMS': '', @@ -71,26 +72,45 @@ def read_settings(filename=None): + if filename: + local_settings = get_settings_from_file(filename) + else: + local_settings = _DEFAULT_CONFIG + configured_settings = configure_settings(local_settings, None, filename) + return configured_settings + + +def get_settings_from_file(filename, default_settings=None): """Load a Python file into a dictionary. """ - context = _DEFAULT_CONFIG.copy() + if default_settings == None: + default_settings = _DEFAULT_CONFIG + context = default_settings.copy() if filename: tempdict = {} execfile(filename, tempdict) for key in tempdict: if key.isupper(): context[key] = tempdict[key] + return context + - # Make the paths relative to the settings file +def configure_settings(settings, default_settings=None, filename=None): + """Provide optimizations, error checking, and warnings for loaded settings""" + if default_settings is None: + default_settings = _DEFAULT_CONFIG + + # Make the paths relative to the settings file + if filename: for path in ['PATH', 'OUTPUT_PATH']: - if path in context: - if context[path] is not None and not isabs(context[path]): - context[path] = os.path.abspath(os.path.normpath( - os.path.join(os.path.dirname(filename), context[path])) + if path in settings: + if settings[path] is not None and not isabs(settings[path]): + settings[path] = os.path.abspath(os.path.normpath( + os.path.join(os.path.dirname(filename), settings[path])) ) # if locales is not a list, make it one - locales = context['LOCALE'] + locales = settings['LOCALE'] if isinstance(locales, basestring): locales = [locales] @@ -108,11 +128,20 @@ def read_settings(filename=None): else: logger.warn("LOCALE option doesn't contain a correct value") - if not 'TIMEZONE' in context: + # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL + if ('SITEURL' in settings) and (not 'FEED_DOMAIN' in settings): + settings['FEED_DOMAIN'] = settings['SITEURL'] + + # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined + if (('FEED' in settings) or ('FEED_RSS' in settings)) and (not 'FEED_DOMAIN' in settings): + logger.warn("Since feed URLs should always be absolute, you should specify " + "FEED_DOMAIN in your settings. (e.g., 'FEED_DOMAIN = " + "http://www.example.com')") + + if not 'TIMEZONE' in settings: logger.warn("No timezone information specified in the settings. Assuming" " your timezone is UTC for feed generation. Check " "http://docs.notmyidea.org/alexis/pelican/settings.html#timezone " "for more information") - # set the locale - return context + return settings diff --git a/samples/pelican.conf.py b/samples/pelican.conf.py --- a/samples/pelican.conf.py +++ b/samples/pelican.conf.py @@ -29,7 +29,7 @@ DEFAULT_METADATA = (('yeah', 'it is'),) # static paths will be copied under the same name -STATIC_PATHS = ["pictures",] +STATIC_PATHS = ["pictures", ] # A list of files to copy from the source to the destination FILES_TO_COPY = (('extra/robots.txt', 'robots.txt'),) @@ -37,4 +37,3 @@ # foobar will not be used, because it's not in caps. All configuration keys # have to be in caps foobar = "barbaz" -
diff --git a/tests/test_settings.py b/tests/test_settings.py --- a/tests/test_settings.py +++ b/tests/test_settings.py @@ -1,12 +1,13 @@ from os.path import dirname, abspath, join -from pelican.settings import read_settings, _DEFAULT_CONFIG +from pelican.settings import read_settings, configure_settings, _DEFAULT_CONFIG from .support import unittest -class TestSettingsFromFile(unittest.TestCase): - """Providing a file, it should read it, replace the default values and - append new values to the settings, if any +class TestSettingsConfiguration(unittest.TestCase): + """Provided a file, it should read it, replace the default values, + append new values to the settings (if any), and apply basic settings + optimizations. """ def setUp(self): self.PATH = abspath(dirname(__file__)) @@ -31,3 +32,15 @@ def test_read_empty_settings(self): """providing no file should return the default values.""" settings = read_settings(None) self.assertDictEqual(settings, _DEFAULT_CONFIG) + + def test_configure_settings(self): + """Manipulations to settings should be applied correctly.""" + + # FEED_DOMAIN, if undefined, should default to SITEURL + settings = {'SITEURL': 'http://blog.notmyidea.org', 'LOCALE': ''} + configure_settings(settings) + self.assertEqual(settings['FEED_DOMAIN'], 'http://blog.notmyidea.org') + + settings = {'FEED_DOMAIN': 'http://feeds.example.com', 'LOCALE': ''} + configure_settings(settings) + self.assertEqual(settings['FEED_DOMAIN'], 'http://feeds.example.com')
Add support for Google feedburner Hi all, Do you have any plan to add support for [Google feedburner](http://feedburner.google.com/)? It is very useful to track feed subscribers, provide a HTMLized feed page and so on. To use this, Pelican need to show a feedburner's address to visitors while providing a feed to the feedburner as before. Regards.
It seems really simple to do: add a setting in your settings file and update the theme to use it if defined. Do you want to give it a try? I would be glad to merge it Any news ? In thinking about potential ways of implementing this capability, it occurred to me that it probably can and should be accomplished in a way that's not specific to FeedBurner. For example, let's say you want to host your feed(s) on a sub-domain, such as http://feeds.example.com/ -- as it stands now, the existing Jinja templates assume you always want to use the same fully-qualified domain name (FQDN) as your main site (e.g., www.example.com). Even for those who are planning to use FeedBurner, there are [good reasons](http://www.scottw.com/feedburner-cname) to use your own sub-domain for this purpose. To that end, I have added the following entries to my settings file: ``` FEED_DOMAIN = "http://feeds.example.com" # default -> SITEURL FEED_MAIN_URL = "main" # default -> FEED FEED_RSS_MAIN_URL = "main-rss" # default -> FEED_RSS ``` I refer to the latter two as "main" feed URLs in order to distinguish the `all.atom.xml` and `all.rss.xml` feeds from the other tag, category, and language feed types. So in this particular case, the goal is for the main Atom feed to be available at: http://feeds.example.com/main The relevant `<head>` links in the `base.html` template look like: ``` <link href="{{ FEED_DOMAIN }}/{{ FEED_MAIN_URL }}" type="application/atom+xml" rel="alternate" title="{{ SITENAME }} Atom Feed" /> {% if FEED_RSS %} <link href="{{ FEED_DOMAIN }}/{{ FEED_RSS_MAIN_URL }}" type="application/atom+xml" rel="alternate" title="{{ SITENAME }} RSS Feed" /> {% endif %} ``` ... producing, in our example case, the following rendered link: ``` <link href="http://feeds.example.com/main" type="application/atom+xml" rel="alternate" title="ExampleBlog Atom Feed" /> ``` Any thoughts on this before I issue a pull request?
2012-03-17T03:39:55Z
[]
[]
getpelican/pelican
270
getpelican__pelican-270
[ "129", "154", "171", "233" ]
c522ce7fbcb37a8ca141c6b2604ba85bfef00125
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -1,6 +1,10 @@ # -*- coding: utf-8 -*- import sys, os +sys.path.append(os.path.abspath('..')) + +from pelican import __version__, __major__ + # -- General configuration ----------------------------------------------------- templates_path = ['_templates'] extensions = ['sphinx.ext.autodoc',] @@ -9,12 +13,11 @@ project = u'Pelican' copyright = u'2010, Alexis Metaireau and contributors' exclude_patterns = ['_build'] -version = "2" -release = version +version = __version__ +release = __major__ # -- Options for HTML output --------------------------------------------------- -sys.path.append(os.path.abspath('_themes')) html_theme_path = ['_themes'] html_theme = 'pelican' @@ -40,7 +43,7 @@ ('index', 'pelican', u'pelican documentation', [u'Alexis Métaireau'], 1), ('pelican-themes', 'pelican-themes', u'A theme manager for Pelican', - [u'Mickaël Raybaud'], 'en.1'), - ('fr/pelican-themes', 'pelican-themes', u'Un gestionnaire de thèmes pour Pelican', - [u'Mickaël Raybaud'], 'fr.1') + [u'Mickaël Raybaud'], 1), + ('themes', 'pelican-theming', u'How to create themes for Pelican', + [u'The Pelican contributors'], 1) ] diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -1,17 +1,21 @@ import argparse import os +import sys +import re import time from pelican import signals from pelican.generators import (ArticlesGenerator, PagesGenerator, StaticGenerator, PdfGenerator) -from pelican.settings import read_settings +from pelican.settings import read_settings, _DEFAULT_CONFIG from pelican.utils import clean_output_dir, files_changed from pelican.writers import Writer from pelican import log -VERSION = "2.7.2" +__major__ = 3 +__minor__ = 0 +__version__ = "{0}.{1}".format(__major__, __minor__) class Pelican(object): @@ -20,9 +24,12 @@ def __init__(self, settings=None, path=None, theme=None, output_path=None, """Read the settings, and performs some checks on the environment before doing anything else. """ + if settings is None: + settings = _DEFAULT_CONFIG + self.path = path or settings['PATH'] if not self.path: - raise Exception('you need to specify a path containing the content' + raise Exception('You need to specify a path containing the content' ' (see pelican --help for more information)') if self.path.endswith('/'): @@ -30,11 +37,15 @@ def __init__(self, settings=None, path=None, theme=None, output_path=None, # define the default settings self.settings = settings + + self._handle_deprecation() + self.theme = theme or settings['THEME'] output_path = output_path or settings['OUTPUT_PATH'] self.output_path = os.path.realpath(output_path) self.markup = markup or settings['MARKUP'] - self.delete_outputdir = delete_outputdir or settings['DELETE_OUTPUT_DIRECTORY'] + self.delete_outputdir = delete_outputdir \ + or settings['DELETE_OUTPUT_DIRECTORY'] # find the theme in pelican.theme if the given one does not exists if not os.path.exists(self.theme): @@ -44,7 +55,7 @@ def __init__(self, settings=None, path=None, theme=None, output_path=None, self.theme = theme_path else: raise Exception("Impossible to find the theme %s" % theme) - + self.init_plugins() signals.initialized.send(self) @@ -52,13 +63,52 @@ def init_plugins(self): self.plugins = self.settings['PLUGINS'] for plugin in self.plugins: # if it's a string, then import it - if isinstance(plugin, str): + if isinstance(plugin, basestring): log.debug("Loading plugin `{0}' ...".format(plugin)) plugin = __import__(plugin, globals(), locals(), 'module') log.debug("Registering plugin `{0}' ...".format(plugin.__name__)) plugin.register() + def _handle_deprecation(self): + + if self.settings.get('CLEAN_URLS', False): + log.warning('Found deprecated `CLEAN_URLS` in settings. Modifing' + ' the following settings for the same behaviour.') + + self.settings['ARTICLE_URL'] = '{slug}/' + self.settings['ARTICLE_LANG_URL'] = '{slug}-{lang}/' + self.settings['PAGE_URL'] = 'pages/{slug}/' + self.settings['PAGE_LANG_URL'] = 'pages/{slug}-{lang}/' + + for setting in ('ARTICLE_URL', 'ARTICLE_LANG_URL', 'PAGE_URL', + 'PAGE_LANG_URL'): + log.warning("%s = '%s'" % (setting, self.settings[setting])) + + if self.settings.get('ARTICLE_PERMALINK_STRUCTURE', False): + log.warning('Found deprecated `ARTICLE_PERMALINK_STRUCTURE` in' + ' settings. Modifing the following settings for' + ' the same behaviour.') + + structure = self.settings['ARTICLE_PERMALINK_STRUCTURE'] + + # Convert %(variable) into {variable}. + structure = re.sub('%\((\w+)\)s', '{\g<1>}', structure) + + # Convert %x into {date:%x} for strftime + structure = re.sub('(%[A-z])', '{date:\g<1>}', structure) + + # Strip a / prefix + structure = re.sub('^/', '', structure) + + for setting in ('ARTICLE_URL', 'ARTICLE_LANG_URL', 'PAGE_URL', + 'PAGE_LANG_URL', 'ARTICLE_SAVE_AS', + 'ARTICLE_LANG_SAVE_AS', 'PAGE_SAVE_AS', + 'PAGE_LANG_SAVE_AS'): + self.settings[setting] = os.path.join(structure, + self.settings[setting]) + log.warning("%s = '%s'" % (setting, self.settings[setting])) + def run(self): """Run the generators and return""" @@ -79,9 +129,9 @@ def run(self): if hasattr(p, 'generate_context'): p.generate_context() - # erase the directory if it is not the source and if that's + # erase the directory if it is not the source and if that's # explicitely asked - if (self.delete_outputdir and + if (self.delete_outputdir and not os.path.realpath(self.path).startswith(self.output_path)): clean_output_dir(self.output_path) @@ -91,7 +141,6 @@ def run(self): if hasattr(p, 'generate_output'): p.generate_output(writer) - def get_generator_classes(self): generators = [ArticlesGenerator, PagesGenerator, StaticGenerator] if self.settings['PDF_GENERATOR']: @@ -100,45 +149,51 @@ def get_generator_classes(self): def get_writer(self): return Writer(self.output_path, settings=self.settings) - def main(): parser = argparse.ArgumentParser(description="""A tool to generate a - static blog, with restructured text input files.""") + static blog, with restructured text input files.""", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument(dest='path', nargs='?', - help='Path where to find the content files') + help='Path where to find the content files.') parser.add_argument('-t', '--theme-path', dest='theme', help='Path where to find the theme templates. If not specified, it' 'will use the default one included with pelican.') parser.add_argument('-o', '--output', dest='output', - help='Where to output the generated files. If not specified, a directory' - ' will be created, named "output" in the current path.') - parser.add_argument('-m', '--markup', default=None, dest='markup', - help='the list of markup language to use (rst or md). Please indicate ' - 'them separated by commas') - parser.add_argument('-s', '--settings', dest='settings', default='', - help='the settings of the application.') - parser.add_argument('-d', '--delete-output-directory', dest='delete_outputdir', + help='Where to output the generated files. If not specified, a ' + 'directory will be created, named "output" in the current path.') + parser.add_argument('-m', '--markup', dest='markup', + help='The list of markup language to use (rst or md). Please indicate ' + 'them separated by commas.') + parser.add_argument('-s', '--settings', dest='settings', + help='The settings of the application.') + parser.add_argument('-d', '--delete-output-directory', + dest='delete_outputdir', action='store_true', help='Delete the output directory.') - parser.add_argument('-v', '--verbose', action='store_const', const=log.INFO, dest='verbosity', - help='Show all messages') - parser.add_argument('-q', '--quiet', action='store_const', const=log.CRITICAL, dest='verbosity', - help='Show only critical errors') - parser.add_argument('-D', '--debug', action='store_const', const=log.DEBUG, dest='verbosity', - help='Show all message, including debug messages') - parser.add_argument('--version', action='version', version=VERSION, - help='Print the pelican version and exit') - parser.add_argument('-r', '--autoreload', dest='autoreload', action='store_true', - help="Relaunch pelican each time a modification occurs on the content" - "files") + parser.add_argument('-v', '--verbose', action='store_const', + const=log.INFO, dest='verbosity', + help='Show all messages.') + parser.add_argument('-q', '--quiet', action='store_const', + const=log.CRITICAL, dest='verbosity', + help='Show only critical errors.') + parser.add_argument('-D', '--debug', action='store_const', + const=log.DEBUG, dest='verbosity', + help='Show all message, including debug messages.') + parser.add_argument('--version', action='version', version=__version__, + help='Print the pelican version and exit.') + parser.add_argument('-r', '--autoreload', dest='autoreload', + action='store_true', + help="Relaunch pelican each time a modification occurs" + " on the content files.") args = parser.parse_args() log.init(args.verbosity) - # Split the markup languages only if some have been given. Otherwise, populate - # the variable with None. - markup = [a.strip().lower() for a in args.markup.split(',')] if args.markup else None + # Split the markup languages only if some have been given. Otherwise, + # populate the variable with None. + markup = [a.strip().lower() for a in args.markup.split(',')]\ + if args.markup else None settings = read_settings(args.settings) @@ -168,8 +223,9 @@ def main(): else: pelican.run() except Exception, e: - log.critical(str(e)) + log.critical(unicode(e)) - -if __name__ == '__main__': - main() + if (args.verbosity == log.DEBUG): + raise + else: + sys.exit(getattr(e, 'exitcode', 1)) diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -1,7 +1,14 @@ # -*- coding: utf-8 -*- -from pelican.utils import slugify, truncate_html_words -from pelican.log import * +from datetime import datetime +from os import getenv +from sys import platform, stdin +import functools +import locale + +from pelican.log import warning, error from pelican.settings import _DEFAULT_CONFIG +from pelican.utils import slugify, truncate_html_words + class Page(object): """Represents a page @@ -11,29 +18,34 @@ class Page(object): """ mandatory_properties = ('title',) - def __init__(self, content, metadata=None, settings=None, filename=None): + def __init__(self, content, metadata=None, settings=None, + filename=None): # init parameters if not metadata: metadata = {} if not settings: settings = _DEFAULT_CONFIG + self.settings = settings self._content = content self.translations = [] - self.status = "published" # default value - local_metadata = dict(settings.get('DEFAULT_METADATA', ())) local_metadata.update(metadata) # set metadata as attributes for key, value in local_metadata.items(): setattr(self, key.lower(), value) - + # default author to the one in settings if not defined if not hasattr(self, 'author'): if 'AUTHOR' in settings: - self.author = settings['AUTHOR'] + self.author = Author(settings['AUTHOR'], settings) + else: + title = filename.decode('utf-8') if filename else self.title + self.author = Author(getenv('USER', 'John Doe'), settings) + warning(u"Author of `{0}' unknown, assuming that his name is " + "`{1}'".format(title, self.author)) # manage languages self.in_default_lang = True @@ -48,21 +60,6 @@ def __init__(self, content, metadata=None, settings=None, filename=None): if not hasattr(self, 'slug') and hasattr(self, 'title'): self.slug = slugify(self.title) - # create save_as from the slug (+lang) - if not hasattr(self, 'save_as') and hasattr(self, 'slug'): - if self.in_default_lang: - self.save_as = '%s.html' % self.slug - clean_url = '%s/' % self.slug - else: - self.save_as = '%s-%s.html' % (self.slug, self.lang) - clean_url = '%s-%s/' % (self.slug, self.lang) - - # change the save_as regarding the settings - if settings.get('CLEAN_URLS', False): - self.url = clean_url - elif hasattr(self, 'save_as'): - self.url = self.save_as - if filename: self.filename = filename @@ -73,16 +70,29 @@ def __init__(self, content, metadata=None, settings=None, filename=None): else: self.date_format = settings['DEFAULT_DATE_FORMAT'] + if isinstance(self.date_format, tuple): + locale.setlocale(locale.LC_ALL, self.date_format[0]) + self.date_format = self.date_format[1] + if hasattr(self, 'date'): - self.locale_date = self.date.strftime(self.date_format.encode('ascii','xmlcharrefreplace')).decode('utf') + encoded_date = self.date.strftime( + self.date_format.encode('ascii', 'xmlcharrefreplace')) - # manage summary - if not hasattr(self, 'summary'): - self.summary = property(lambda self: truncate_html_words(self.content, 50)).__get__(self, Page) + if platform == 'win32': + self.locale_date = encoded_date.decode(stdin.encoding) + else: + self.locale_date = encoded_date.decode('utf') # manage status if not hasattr(self, 'status'): self.status = settings['DEFAULT_STATUS'] + if not settings['WITH_FUTURE_DATES']: + if hasattr(self, 'date') and self.date > datetime.now(): + self.status = 'draft' + + # store the summary metadata if it is set + if 'summary' in metadata: + self._summary = metadata['summary'] def check_properties(self): """test that each mandatory property is set.""" @@ -90,6 +100,24 @@ def check_properties(self): if not hasattr(self, prop): raise NameError(prop) + @property + def url_format(self): + return { + 'slug': getattr(self, 'slug', ''), + 'lang': getattr(self, 'lang', 'en'), + 'date': getattr(self, 'date', datetime.now()), + 'author': self.author, + 'category': getattr(self, 'category', 'misc'), + } + + def _expand_settings(self, key): + fq_key = ('%s_%s' % (self.__class__.__name__, key)).upper() + return self.settings[fq_key].format(**self.url_format) + + def get_url_setting(self, key): + key = key if self.in_default_lang else 'lang_%s' % key + return self._expand_settings(key) + @property def content(self): if hasattr(self, "_get_content"): @@ -98,6 +126,24 @@ def content(self): content = self._content return content + def _get_summary(self): + """Returns the summary of an article, based on the summary metadata + if it is set, else troncate the content.""" + if hasattr(self, '_summary'): + return self._summary + else: + return truncate_html_words(self.content, 50) + + def _set_summary(self, summary): + """Dummy function""" + pass + + summary = property(_get_summary, _set_summary, "Summary of the article." + "Based on the content. Can't be set") + + url = property(functools.partial(get_url_setting, key='url')) + save_as = property(functools.partial(get_url_setting, key='save_as')) + class Article(Page): mandatory_properties = ('title', 'date', 'category') @@ -107,10 +153,53 @@ class Quote(Page): base_properties = ('author', 'date') +class URLWrapper(object): + def __init__(self, name, settings): + self.name = unicode(name) + self.slug = slugify(self.name) + self.settings = settings + + def as_dict(self): + return self.__dict__ + + def __hash__(self): + return hash(self.name) + + def __eq__(self, other): + return self.name == unicode(other) + + def __str__(self): + return str(self.name) + + def __unicode__(self): + return self.name + + def _from_settings(self, key): + setting = "%s_%s" % (self.__class__.__name__.upper(), key) + return self.settings[setting].format(**self.as_dict()) + + url = property(functools.partial(_from_settings, key='URL')) + save_as = property(functools.partial(_from_settings, key='SAVE_AS')) + + +class Category(URLWrapper): + pass + + +class Tag(URLWrapper): + def __init__(self, name, *args, **kwargs): + super(Tag, self).__init__(unicode.strip(name), *args, **kwargs) + + +class Author(URLWrapper): + pass + + def is_valid_content(content, f): try: content.check_properties() return True except NameError, e: - error(u"Skipping %s: impossible to find informations about '%s'" % (f, e)) + error(u"Skipping %s: impossible to find informations about '%s'"\ + % (f, e)) return False diff --git a/pelican/generators.py b/pelican/generators.py old mode 100755 new mode 100644 --- a/pelican/generators.py +++ b/pelican/generators.py @@ -1,20 +1,21 @@ # -*- coding: utf-8 -*- -from operator import attrgetter, itemgetter -from itertools import chain -from functools import partial -from datetime import datetime -from collections import defaultdict import os +import datetime import math import random -from jinja2 import Environment, FileSystemLoader +from collections import defaultdict +from functools import partial +from itertools import chain +from operator import attrgetter, itemgetter + +from jinja2 import Environment, FileSystemLoader, PrefixLoader, ChoiceLoader from jinja2.exceptions import TemplateNotFound -from pelican.utils import copy, get_relative_path, process_translations, open -from pelican.contents import Article, Page, is_valid_content +from pelican.contents import Article, Page, Category, is_valid_content +from pelican.log import warning, error, debug, info from pelican.readers import read_file -from pelican.log import * +from pelican.utils import copy, process_translations, open from pelican import signals @@ -31,12 +32,24 @@ def __init__(self, *args, **kwargs): # templates cache self._templates = {} - self._templates_path = os.path.expanduser(os.path.join(self.theme, 'templates')) + self._templates_path = os.path.expanduser( + os.path.join(self.theme, 'templates')) + + theme_path = os.path.dirname(os.path.abspath(__file__)) + + simple_loader = FileSystemLoader(os.path.join(theme_path, + "themes", "simple", "templates")) self._env = Environment( - loader=FileSystemLoader(self._templates_path), + loader=ChoiceLoader([ + FileSystemLoader(self._templates_path), + simple_loader, # implicit inheritance + PrefixLoader({'!simple': simple_loader}) # explicit one + ]), extensions=self.settings.get('JINJA_EXTENSIONS', []), ) + debug('template list: {0}'.format(self._env.list_templates())) + # get custom Jinja filters from user settings custom_filters = self.settings.get('JINJA_FILTERS', {}) self._env.filters.update(custom_filters) @@ -50,8 +63,8 @@ def get_template(self, name): try: self._templates[name] = self._env.get_template(name + '.html') except TemplateNotFound: - raise Exception('[templates] unable to load %s.html from %s' % ( - name, self._templates_path)) + raise Exception('[templates] unable to load %s.html from %s' \ + % (name, self._templates_path)) return self._templates[name] def get_files(self, path, exclude=[], extensions=None): @@ -67,7 +80,7 @@ def get_files(self, path, exclude=[], extensions=None): try: iter = os.walk(path, followlinks=True) - except TypeError: # python 2.5 does not support followlinks + except TypeError: # python 2.5 does not support followlinks iter = os.walk(path) for root, dirs, temp_files in iter: @@ -94,11 +107,12 @@ class ArticlesGenerator(Generator): def __init__(self, *args, **kwargs): """initialize properties""" - self.articles = [] # only articles in default language + self.articles = [] # only articles in default language self.translations = [] self.dates = {} self.tags = defaultdict(list) self.categories = defaultdict(list) + self.authors = defaultdict(list) super(ArticlesGenerator, self).__init__(*args, **kwargs) self.drafts = [] signals.article_generator_init.send(self) @@ -106,49 +120,52 @@ def __init__(self, *args, **kwargs): def generate_feeds(self, writer): """Generate the feeds from the current context, and output files.""" - writer.write_feed(self.articles, self.context, self.settings['FEED']) + if self.settings.get('FEED'): + writer.write_feed(self.articles, self.context, + self.settings['FEED']) - if 'FEED_RSS' in self.settings: + if self.settings.get('FEED_RSS'): writer.write_feed(self.articles, self.context, - self.settings['FEED_RSS'], feed_type='rss') + self.settings['FEED_RSS'], feed_type='rss') for cat, arts in self.categories: arts.sort(key=attrgetter('date'), reverse=True) - writer.write_feed(arts, self.context, - self.settings['CATEGORY_FEED'] % cat) + if self.settings.get('CATEGORY_FEED'): + writer.write_feed(arts, self.context, + self.settings['CATEGORY_FEED'] % cat) - if 'CATEGORY_FEED_RSS' in self.settings: + if self.settings.get('CATEGORY_FEED_RSS'): writer.write_feed(arts, self.context, - self.settings['CATEGORY_FEED_RSS'] % cat, - feed_type='rss') + self.settings['CATEGORY_FEED_RSS'] % cat, + feed_type='rss') - if 'TAG_FEED' in self.settings: + if self.settings.get('TAG_FEED') or self.settings.get('TAG_FEED_RSS'): for tag, arts in self.tags.items(): arts.sort(key=attrgetter('date'), reverse=True) - writer.write_feed(arts, self.context, - self.settings['TAG_FEED'] % tag) - - if 'TAG_FEED_RSS' in self.settings: + if self.settings.get('TAG_FEED'): writer.write_feed(arts, self.context, - self.settings['TAG_FEED_RSS'] % tag, feed_type='rss') + self.settings['TAG_FEED'] % tag) - translations_feeds = defaultdict(list) - for article in chain(self.articles, self.translations): - translations_feeds[article.lang].append(article) + if self.settings.get('TAG_FEED_RSS'): + writer.write_feed(arts, self.context, + self.settings['TAG_FEED_RSS'] % tag, + feed_type='rss') - for lang, items in translations_feeds.items(): - items.sort(key=attrgetter('date'), reverse=True) - writer.write_feed(items, self.context, - self.settings['TRANSLATION_FEED'] % lang) + if self.settings.get('TRANSLATION_FEED'): + translations_feeds = defaultdict(list) + for article in chain(self.articles, self.translations): + translations_feeds[article.lang].append(article) + for lang, items in translations_feeds.items(): + items.sort(key=attrgetter('date'), reverse=True) + writer.write_feed(items, self.context, + self.settings['TRANSLATION_FEED'] % lang) def generate_pages(self, writer): """Generate the pages on the disk""" - write = partial( - writer.write_file, - relative_urls = self.settings.get('RELATIVE_URLS') - ) + write = partial(writer.write_file, + relative_urls=self.settings.get('RELATIVE_URLS')) # to minimize the number of relative path stuff modification # in writer, articles pass first @@ -163,55 +180,69 @@ def generate_pages(self, writer): paginated = {} if template in PAGINATED_TEMPLATES: paginated = {'articles': self.articles, 'dates': self.dates} - write('%s.html' % template, self.get_template(template), self.context, - blog=True, paginated=paginated, page_name=template) + + write('%s.html' % template, self.get_template(template), + self.context, blog=True, paginated=paginated, + page_name=template) # and subfolders after that tag_template = self.get_template('tag') for tag, articles in self.tags.items(): articles.sort(key=attrgetter('date'), reverse=True) dates = [article for article in self.dates if article in articles] - write('tag/%s.html' % tag, tag_template, self.context, tag=tag, + write(tag.save_as, tag_template, self.context, tag=tag, articles=articles, dates=dates, paginated={'articles': articles, 'dates': dates}, - page_name='tag/%s' % tag) + page_name=u'tag/%s' % tag) category_template = self.get_template('category') for cat, articles in self.categories: dates = [article for article in self.dates if article in articles] - write('category/%s.html' % cat, category_template, self.context, + write(cat.save_as, category_template, self.context, category=cat, articles=articles, dates=dates, paginated={'articles': articles, 'dates': dates}, - page_name='category/%s' % cat) + page_name=u'category/%s' % cat) - for article in self.drafts: - write('drafts/%s.html' % article.slug, article_template, self.context, - article=article, category=article.category) + author_template = self.get_template('author') + for aut, articles in self.authors: + dates = [article for article in self.dates if article in articles] + write(aut.save_as, author_template, self.context, + author=aut, articles=articles, dates=dates, + paginated={'articles': articles, 'dates': dates}, + page_name=u'author/%s' % aut) + for article in self.drafts: + write('drafts/%s.html' % article.slug, article_template, + self.context, article=article, category=article.category) def generate_context(self): """change the context""" - # return the list of files to use - files = self.get_files(self.path, exclude=['pages',]) all_articles = [] - for f in files: - content, metadata = read_file(f) + for f in self.get_files( + os.path.join(self.path, self.settings['ARTICLE_DIR']), + exclude=self.settings['ARTICLE_EXCLUDES']): + try: + content, metadata = read_file(f, settings=self.settings) + except Exception, e: + warning(u'Could not process %s\n%s' % (f, str(e))) + continue # if no category is set, use the name of the path as a category - if 'category' not in metadata.keys(): + if 'category' not in metadata: if os.path.dirname(f) == self.path: category = self.settings['DEFAULT_CATEGORY'] else: - category = os.path.basename(os.path.dirname(f)) + category = os.path.basename(os.path.dirname(f))\ + .decode('utf-8') if category != '': - metadata['category'] = unicode(category) + metadata['category'] = Category(category, self.settings) - if 'date' not in metadata.keys()\ - and self.settings['FALLBACK_ON_FS_DATE']: - metadata['date'] = datetime.fromtimestamp(os.stat(f).st_ctime) + if 'date' not in metadata and self.settings['FALLBACK_ON_FS_DATE']: + metadata['date'] = datetime.datetime.fromtimestamp( + os.stat(f).st_ctime) signals.article_generate_context.send(self, metadata=metadata) article = Article(content, metadata, settings=self.settings, @@ -232,7 +263,7 @@ def generate_context(self): for article in self.articles: # only main articles are listed in categories, not translations self.categories[article.category].append(article) - + self.authors[article.author].append(article) # sort the articles by date self.articles.sort(key=attrgetter('date'), reverse=True) @@ -246,21 +277,20 @@ def generate_context(self): for tag in getattr(article, 'tags', []): tag_cloud[tag] += 1 - tag_cloud = sorted(tag_cloud.items(), key = itemgetter(1), reverse = True) + tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True) tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')] tags = map(itemgetter(1), tag_cloud) if tags: - max_count = max(tags) + max_count = max(tags) steps = self.settings.get('TAG_CLOUD_STEPS') # calculate word sizes self.tag_cloud = [ ( tag, - int( - math.floor(steps - (steps - 1) * math.log(count) / (math.log(max_count)or 1)) - ) + int(math.floor(steps - (steps - 1) * math.log(count) + / (math.log(max_count)or 1))) ) for tag, count in tag_cloud ] @@ -271,9 +301,13 @@ def generate_context(self): # order the categories per name self.categories = list(self.categories.items()) - self.categories.sort(reverse=self.settings.get('REVERSE_CATEGORY_ORDER')) - self._update_context(('articles', 'dates', 'tags', 'categories', 'tag_cloud')) + self.categories.sort(reverse=self.settings['REVERSE_CATEGORY_ORDER']) + self.authors = list(self.authors.items()) + self.authors.sort() + + self._update_context(('articles', 'dates', 'tags', 'categories', + 'tag_cloud', 'authors')) def generate_output(self, writer): self.generate_feeds(writer) @@ -289,8 +323,14 @@ def __init__(self, *args, **kwargs): def generate_context(self): all_pages = [] - for f in self.get_files(os.sep.join((self.path, 'pages'))): - content, metadata = read_file(f) + for f in self.get_files( + os.path.join(self.path, self.settings['PAGE_DIR']), + exclude=self.settings['PAGE_EXCLUDES']): + try: + content, metadata = read_file(f) + except Exception, e: + error(u'Could not process %s\n%s' % (f, str(e))) + continue page = Page(content, metadata, settings=self.settings, filename=f) if not is_valid_content(page, f): @@ -304,9 +344,9 @@ def generate_context(self): def generate_output(self, writer): for page in chain(self.translations, self.pages): - writer.write_file('pages/%s' % page.save_as, self.get_template('page'), + writer.write_file(page.save_as, self.get_template('page'), self.context, page=page, - relative_urls = self.settings.get('RELATIVE_URLS')) + relative_urls=self.settings.get('RELATIVE_URLS')) class StaticGenerator(Generator): @@ -317,8 +357,8 @@ def _copy_paths(self, paths, source, destination, output_path, final_path=None): """Copy all the paths from source to destination""" for path in paths: - copy(path, source, os.path.join(output_path, destination), final_path, - overwrite=True) + copy(path, source, os.path.join(output_path, destination), + final_path, overwrite=True) def generate_output(self, writer): self._copy_paths(self.settings['STATIC_PATHS'], self.path, @@ -328,7 +368,8 @@ def generate_output(self, writer): # copy all the files needed for source, destination in self.settings['FILES_TO_COPY']: - copy(source, self.path, self.output_path, destination, overwrite=True) + copy(source, self.path, self.output_path, destination, + overwrite=True) class PdfGenerator(Generator): @@ -337,7 +378,8 @@ class PdfGenerator(Generator): def __init__(self, *args, **kwargs): try: from rst2pdf.createpdf import RstToPdf - self.pdfcreator = RstToPdf(breakside=0, stylesheets=['twelvepoint']) + self.pdfcreator = RstToPdf(breakside=0, + stylesheets=['twelvepoint']) except ImportError: raise Exception("unable to find rst2pdf") super(PdfGenerator, self).__init__(*args, **kwargs) @@ -345,9 +387,10 @@ def __init__(self, *args, **kwargs): def _create_pdf(self, obj, output_path): if obj.filename.endswith(".rst"): filename = obj.slug + ".pdf" - output_pdf=os.path.join(output_path, filename) + output_pdf = os.path.join(output_path, filename) # print "Generating pdf for", obj.filename, " in ", output_pdf - self.pdfcreator.createPdf(text=open(obj.filename), output=output_pdf) + with open(obj.filename) as f: + self.pdfcreator.createPdf(text=f, output=output_pdf) info(u' [ok] writing %s' % output_pdf) def generate_context(self): @@ -358,11 +401,12 @@ def generate_output(self, writer=None): # since we write our own files info(u' Generating PDF files...') pdf_path = os.path.join(self.output_path, 'pdf') - try: - os.mkdir(pdf_path) - except OSError: - error("Couldn't create the pdf output folder in " + pdf_path) - pass + if not os.path.exists(pdf_path): + try: + os.mkdir(pdf_path) + except OSError: + error("Couldn't create the pdf output folder in " + pdf_path) + pass for article in self.context['articles']: self._create_pdf(article, pdf_path) diff --git a/pelican/log.py b/pelican/log.py --- a/pelican/log.py +++ b/pelican/log.py @@ -1,30 +1,33 @@ -from logging import CRITICAL, ERROR, WARN, INFO, DEBUG +import os +import sys +from logging import CRITICAL, ERROR, WARN, INFO, DEBUG from logging import critical, error, info, warning, warn, debug from logging import Formatter, getLogger, StreamHandler -import sys -import os -global ANSI -ANSI = { - 'gray' : lambda(text) : u'\033[1;30m' + unicode(text) + u'\033[1;m', - 'red' : lambda(text) : u'\033[1;31m' + unicode(text) + u'\033[1;m', - 'green' : lambda(text) : u'\033[1;32m' + unicode(text) + u'\033[1;m', - 'yellow' : lambda(text) : u'\033[1;33m' + unicode(text) + u'\033[1;m', - 'blue' : lambda(text) : u'\033[1;34m' + unicode(text) + u'\033[1;m', - 'magenta' : lambda(text) : u'\033[1;35m' + unicode(text) + u'\033[1;m', - 'cyan' : lambda(text) : u'\033[1;36m' + unicode(text) + u'\033[1;m', - 'white' : lambda(text) : u'\033[1;37m' + unicode(text) + u'\033[1;m', - 'bgred' : lambda(text) : u'\033[1;41m' + unicode(text) + u'\033[1;m', - 'bggreen' : lambda(text) : u'\033[1;42m' + unicode(text) + u'\033[1;m', - 'bgbrown' : lambda(text) : u'\033[1;43m' + unicode(text) + u'\033[1;m', - 'bgblue' : lambda(text) : u'\033[1;44m' + unicode(text) + u'\033[1;m', - 'bgmagenta' : lambda(text) : u'\033[1;45m' + unicode(text) + u'\033[1;m', - 'bgcyan' : lambda(text) : u'\033[1;46m' + unicode(text) + u'\033[1;m', - 'bggray' : lambda(text) : u'\033[1;47m' + unicode(text) + u'\033[1;m', - 'bgyellow' : lambda(text) : u'\033[1;43m' + unicode(text) + u'\033[1;m', - 'bggrey' : lambda(text) : u'\033[1;100m' + unicode(text) + u'\033[1;m' + +RESET_TERM = u'\033[0;m' + + +def start_color(index): + return u'\033[1;{0}m'.format(index) + + +def term_color(color): + code = COLOR_CODES[color] + return lambda text: start_color(code) + unicode(text) + RESET_TERM + + +COLOR_CODES = { + 'red': 31, + 'yellow': 33, + 'cyan': 36, + 'white': 37, + 'bgred': 41, + 'bggrey': 100, } +ANSI = dict((col, term_color(col)) for col in COLOR_CODES) + class ANSIFormatter(Formatter): """ @@ -62,17 +65,16 @@ def format(self, record): class DummyFormatter(object): """ A dummy class. - Return an instance of the appropriate formatter (ANSIFormatter if sys.stdout.isatty() is True, else TextFormatter) + Return an instance of the appropriate formatter (ANSIFormatter if + sys.stdout.isatty() is True, else TextFormatter) """ def __new__(cls, *args, **kwargs): - if os.isatty(sys.stdout.fileno()): # thanks to http://stackoverflow.com/questions/2086961/how-can-i-determine-if-a-python-script-is-executed-from-crontab/2087031#2087031 + if os.isatty(sys.stdout.fileno())\ + and not sys.platform.startswith('win'): return ANSIFormatter(*args, **kwargs) else: - return TextFormatter( *args, **kwargs) - - - + return TextFormatter(*args, **kwargs) def init(level=None, logger=getLogger(), handler=StreamHandler()): @@ -93,15 +95,15 @@ def init(level=None, logger=getLogger(), handler=StreamHandler()): __all__ = [ - "debug", - "info", - "warn", + "debug", + "info", + "warn", "warning", - "error", - "critical", - "DEBUG", - "INFO", - "WARN", - "ERROR", + "error", + "critical", + "DEBUG", + "INFO", + "WARN", + "ERROR", "CRITICAL" -] +] diff --git a/pelican/paginator.py b/pelican/paginator.py --- a/pelican/paginator.py +++ b/pelican/paginator.py @@ -1,6 +1,7 @@ # From django.core.paginator from math import ceil + class Paginator(object): def __init__(self, object_list, per_page, orphans=0): self.object_list = object_list @@ -39,6 +40,7 @@ def _get_page_range(self): return range(1, self.num_pages + 1) page_range = property(_get_page_range) + class Page(object): def __init__(self, object_list, number, paginator): self.object_list = object_list @@ -82,4 +84,3 @@ def end_index(self): if self.number == self.paginator.num_pages: return self.paginator.count return self.number * self.paginator.per_page - diff --git a/pelican/plugins/gravatar.py b/pelican/plugins/gravatar.py --- a/pelican/plugins/gravatar.py +++ b/pelican/plugins/gravatar.py @@ -2,39 +2,39 @@ from pelican import signals """ -Gravata plugin for Pelican -========================== +Gravatar plugin for Pelican +=========================== -Simply add author_gravatar variable in article's context, which contain +Simply add author_gravatar variable in article's context, which contains the gravatar url. Settings: --------- -Add AUTHOR_EMAIL to your settings file to define default author email +Add AUTHOR_EMAIL to your settings file to define default author email. Article metadata: ------------------ :email: article's author email -If one of them are defined the author_gravatar variable is added to +If one of them are defined, the author_gravatar variable is added to article's context. """ def add_gravatar(generator, metadata): - + #first check email if 'email' not in metadata.keys()\ and 'AUTHOR_EMAIL' in generator.settings.keys(): metadata['email'] = generator.settings['AUTHOR_EMAIL'] - + #then add gravatar url if 'email' in metadata.keys(): gravatar_url = "http://www.gravatar.com/avatar/" + \ hashlib.md5(metadata['email'].lower()).hexdigest() metadata["author_gravatar"] = gravatar_url - -def register(): + +def register(): signals.article_generate_context.connect(add_gravatar) diff --git a/pelican/plugins/html_rst_directive.py b/pelican/plugins/html_rst_directive.py --- a/pelican/plugins/html_rst_directive.py +++ b/pelican/plugins/html_rst_directive.py @@ -10,7 +10,7 @@ ---------- .. html:: - + (HTML code) @@ -25,12 +25,12 @@ <input type="hidden" name="lang" value="en" /> <input type="submit" value="Seeks !" id="search_button" /> </form> - + A contact form: .. html:: - + <form method="GET" action="mailto:some email"> <p> <input type="text" placeholder="Subject" name="subject"> diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -6,32 +6,39 @@ from docutils.writers.html4css1 import HTMLTranslator # import the directives to have pygments support - from pelican import rstdirectives + from pelican import rstdirectives # NOQA except ImportError: core = False try: from markdown import Markdown except ImportError: - Markdown = False + Markdown = False # NOQA import re +from pelican.contents import Category, Tag, Author from pelican.utils import get_date, open _METADATA_PROCESSORS = { - 'tags': lambda x: map(unicode.strip, x.split(',')), - 'date': lambda x: get_date(x), - 'status': unicode.strip, + 'tags': lambda x, y: [Tag(tag, y) for tag in unicode(x).split(',')], + 'date': lambda x, y: get_date(x), + 'status': lambda x, y: unicode.strip(x), + 'category': Category, + 'author': Author, } -def _process_metadata(name, value): - if name.lower() in _METADATA_PROCESSORS: - return _METADATA_PROCESSORS[name.lower()](value) - return value - class Reader(object): enabled = True + extensions = None + + def __init__(self, settings): + self.settings = settings + + def process_metadata(self, name, value): + if name in _METADATA_PROCESSORS: + return _METADATA_PROCESSORS[name](value, self.settings) + return value class _FieldBodyTranslator(HTMLTranslator): @@ -51,33 +58,35 @@ def render_node_to_html(document, node): node.walkabout(visitor) return visitor.astext() -def get_metadata(document): - """Return the dict containing document metadata""" - output = {} - for docinfo in document.traverse(docutils.nodes.docinfo): - for element in docinfo.children: - if element.tagname == 'field': # custom fields (e.g. summary) - name_elem, body_elem = element.children - name = name_elem.astext() - value = render_node_to_html(document, body_elem) - else: # standard fields (e.g. address) - name = element.tagname - value = element.astext() - - output[name] = _process_metadata(name, value) - return output - class RstReader(Reader): enabled = bool(docutils) extension = "rst" def _parse_metadata(self, document): - return get_metadata(document) + """Return the dict containing document metadata""" + output = {} + for docinfo in document.traverse(docutils.nodes.docinfo): + for element in docinfo.children: + if element.tagname == 'field': # custom fields (e.g. summary) + name_elem, body_elem = element.children + name = name_elem.astext() + if name == 'summary': + value = render_node_to_html(document, body_elem) + else: + value = body_elem.astext() + else: # standard fields (e.g. address) + name = element.tagname + value = element.astext() + name = name.lower() + + output[name] = self.process_metadata(name, value) + return output def _get_publisher(self, filename): extra_params = {'initial_header_level': '2'} - pub = docutils.core.Publisher(destination_class=docutils.io.StringOutput) + pub = docutils.core.Publisher( + destination_class=docutils.io.StringOutput) pub.set_components('standalone', 'restructuredtext', 'html') pub.process_programmatic_settings(None, extra_params, None) pub.set_source(source_path=filename) @@ -99,17 +108,18 @@ def read(self, filename): class MarkdownReader(Reader): enabled = bool(Markdown) extension = "md" + extensions = ['codehilite', 'extra'] def read(self, filename): """Parse content and metadata of markdown files""" text = open(filename) - md = Markdown(extensions = ['meta', 'codehilite']) + md = Markdown(extensions=set(self.extensions + ['meta'])) content = md.convert(text) metadata = {} for name, value in md.Meta.items(): name = name.lower() - metadata[name] = _process_metadata(name, value[0]) + metadata[name] = self.process_metadata(name, value[0]) return content, metadata @@ -119,27 +129,42 @@ class HtmlReader(Reader): def read(self, filename): """Parse content and metadata of (x)HTML files""" - content = open(filename) - metadata = {'title':'unnamed'} - for i in self._re.findall(content): - key = i.split(':')[0][5:].strip() - value = i.split(':')[-1][:-3].strip() - name = key.lower() - metadata[name] = _process_metadata(name, value) - - return content, metadata + with open(filename) as content: + metadata = {'title': 'unnamed'} + for i in self._re.findall(content): + key = i.split(':')[0][5:].strip() + value = i.split(':')[-1][:-3].strip() + name = key.lower() + metadata[name] = self.process_metadata(name, value) + return content, metadata _EXTENSIONS = dict((cls.extension, cls) for cls in Reader.__subclasses__()) -def read_file(filename, fmt=None): + +def read_file(filename, fmt=None, settings=None): """Return a reader object using the given format.""" if not fmt: fmt = filename.split('.')[-1] - if fmt not in _EXTENSIONS.keys(): + + if fmt not in _EXTENSIONS: raise TypeError('Pelican does not know how to parse %s' % filename) - reader = _EXTENSIONS[fmt]() + + reader = _EXTENSIONS[fmt](settings) + settings_key = '%s_EXTENSIONS' % fmt.upper() + + if settings and settings_key in settings: + reader.extensions = settings[settings_key] + if not reader.enabled: raise ValueError("Missing dependencies for %s" % fmt) - return reader.read(filename) + + content, metadata = reader.read(filename) + + # eventually filter the content with typogrify if asked so + if settings and settings['TYPOGRIFY']: + from typogrify import Typogrify + content = Typogrify.typogrify(content) + + return content, metadata diff --git a/pelican/rstdirectives.py b/pelican/rstdirectives.py --- a/pelican/rstdirectives.py +++ b/pelican/rstdirectives.py @@ -35,3 +35,4 @@ def run(self): return [nodes.raw('', parsed, format='html')] directives.register_directive('code-block', Pygments) +directives.register_directive('sourcecode', Pygments) diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- import os +from os.path import isabs import locale from pelican import log @@ -7,24 +8,43 @@ DEFAULT_THEME = os.sep.join([os.path.dirname(os.path.abspath(__file__)), "themes/notmyidea"]) _DEFAULT_CONFIG = {'PATH': None, + 'ARTICLE_DIR': '', + 'ARTICLE_EXCLUDES': ('pages',), + 'PAGE_DIR': 'pages', + 'PAGE_EXCLUDES': (), 'THEME': DEFAULT_THEME, 'OUTPUT_PATH': 'output/', 'MARKUP': ('rst', 'md'), - 'STATIC_PATHS': ['images',], - 'THEME_STATIC_PATHS': ['static',], + 'STATIC_PATHS': ['images', ], + 'THEME_STATIC_PATHS': ['static', ], 'FEED': 'feeds/all.atom.xml', 'CATEGORY_FEED': 'feeds/%s.atom.xml', 'TRANSLATION_FEED': 'feeds/all-%s.atom.xml', + 'FEED_MAX_ITEMS': '', 'SITENAME': 'A Pelican Blog', 'DISPLAY_PAGES_ON_MENU': True, 'PDF_GENERATOR': False, 'DEFAULT_CATEGORY': 'misc', 'FALLBACK_ON_FS_DATE': True, + 'WITH_FUTURE_DATES': True, 'CSS_FILE': 'main.css', 'REVERSE_ARCHIVE_ORDER': False, 'REVERSE_CATEGORY_ORDER': False, 'DELETE_OUTPUT_DIRECTORY': False, - 'CLEAN_URLS': False, # use /blah/ instead /blah.html in urls + 'ARTICLE_URL': '{slug}.html', + 'ARTICLE_SAVE_AS': '{slug}.html', + 'ARTICLE_LANG_URL': '{slug}-{lang}.html', + 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html', + 'PAGE_URL': 'pages/{slug}.html', + 'PAGE_SAVE_AS': 'pages/{slug}.html', + 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html', + 'PAGE_LANG_SAVE_AS': 'pages/{slug}-{lang}.html', + 'CATEGORY_URL': 'category/{name}.html', + 'CATEGORY_SAVE_AS': 'category/{name}.html', + 'TAG_URL': 'tag/{slug}.html', + 'TAG_SAVE_AS': 'tag/{slug}.html', + 'AUTHOR_URL': u'author/{slug}.html', + 'AUTHOR_SAVE_AS': u'author/{slug}.html', 'RELATIVE_URLS': True, 'DEFAULT_LANG': 'en', 'TAG_CLOUD_STEPS': 4, @@ -35,17 +55,19 @@ 'DEFAULT_DATE_FORMAT': '%a %d %B %Y', 'DATE_FORMATS': {}, 'JINJA_EXTENSIONS': [], - 'LOCALE': '', # default to user locale - 'WITH_PAGINATION': False, - 'DEFAULT_PAGINATION': 5, + 'LOCALE': '', # default to user locale + 'DEFAULT_PAGINATION': False, 'DEFAULT_ORPHANS': 0, 'DEFAULT_METADATA': (), 'FILES_TO_COPY': (), 'DEFAULT_STATUS': 'published', + 'ARTICLE_PERMALINK_STRUCTURE': '', + 'TYPOGRIFY': False, 'PLUGINS': [], - } + } -def read_settings(filename): + +def read_settings(filename=None): """Load a Python file into a dictionary. """ context = _DEFAULT_CONFIG.copy() @@ -56,6 +78,14 @@ def read_settings(filename): if key.isupper(): context[key] = tempdict[key] + # Make the paths relative to the settings file + for path in ['PATH', 'OUTPUT_PATH']: + if path in context: + if context[path] is not None and not isabs(context[path]): + context[path] = os.path.abspath(os.path.normpath( + os.path.join(os.path.dirname(filename), context[path])) + ) + # if locales is not a list, make it one locales = context['LOCALE'] @@ -69,17 +99,17 @@ def read_settings(filename): for locale_ in locales: try: locale.setlocale(locale.LC_ALL, locale_) - break # break if it is successfull + break # break if it is successfull except locale.Error: pass else: log.warn("LOCALE option doesn't contain a correct value") - # Make the paths relative to the settings file - for path in ['PATH', 'OUTPUT_PATH']: - if path in context: - if context[path] is not None and not os.path.isabs(context[path]): - context[path] = os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(filename), context[path]))) + if not 'TIMEZONE' in context: + log.warn("No timezone information specified in the settings. Assuming" + " your timezone is UTC for feed generation. Check " + "http://docs.notmyidea.org/alexis/pelican/settings.html#timezone " + "for more information") # set the locale return context diff --git a/pelican/tools/__init__.py b/pelican/tools/__init__.py new file mode 100644 diff --git a/pelican/tools/pelican_import.py b/pelican/tools/pelican_import.py new file mode 100755 --- /dev/null +++ b/pelican/tools/pelican_import.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python + +import argparse +import os +import subprocess +import sys +import time + +from codecs import open + +from pelican.utils import slugify + + +def wp2fields(xml): + """Opens a wordpress XML file, and yield pelican fields""" + from BeautifulSoup import BeautifulStoneSoup + + xmlfile = open(xml, encoding='utf-8').read() + soup = BeautifulStoneSoup(xmlfile) + items = soup.rss.channel.findAll('item') + + for item in items: + if item.fetch('wp:status')[0].contents[0] == "publish": + title = item.title.contents[0] + content = item.fetch('content:encoded')[0].contents[0] + filename = item.fetch('wp:post_name')[0].contents[0] + + raw_date = item.fetch('wp:post_date')[0].contents[0] + date_object = time.strptime(raw_date, "%Y-%m-%d %H:%M:%S") + date = time.strftime("%Y-%m-%d %H:%M", date_object) + + author = item.fetch('dc:creator')[0].contents[0].title() + + categories = [cat.contents[0] for cat in item.fetch(domain='category')] + # caturl = [cat['nicename'] for cat in item.fetch(domain='category')] + + tags = [tag.contents[0].title() for tag in item.fetch(domain='tag', nicename=None)] + + yield (title, content, filename, date, author, categories, tags, "html") + +def dc2fields(file): + """Opens a Dotclear export file, and yield pelican fields""" + from BeautifulSoup import BeautifulStoneSoup + + in_cat = False + in_post = False + category_list = {} + posts = [] + + with open(file, 'r', encoding='utf-8') as f: + + for line in f: + # remove final \n + line = line[:-1] + + if line.startswith('[category'): + in_cat = True + elif line.startswith('[post'): + in_post = True + elif in_cat: + fields = line.split('","') + if not line: + in_cat = False + else: + # remove 1st and last "" + fields[0] = fields[0][1:] + # fields[-1] = fields[-1][:-1] + category_list[fields[0]]=fields[2] + elif in_post: + if not line: + in_post = False + break + else: + posts.append(line) + + print("%i posts read." % len(posts)) + + for post in posts: + fields = post.split('","') + + # post_id = fields[0][1:] + # blog_id = fields[1] + # user_id = fields[2] + cat_id = fields[3] + # post_dt = fields[4] + # post_tz = fields[5] + post_creadt = fields[6] + # post_upddt = fields[7] + # post_password = fields[8] + # post_type = fields[9] + post_format = fields[10] + # post_url = fields[11] + # post_lang = fields[12] + post_title = fields[13] + post_excerpt = fields[14] + post_excerpt_xhtml = fields[15] + post_content = fields[16] + post_content_xhtml = fields[17] + # post_notes = fields[18] + # post_words = fields[19] + # post_status = fields[20] + # post_selected = fields[21] + # post_position = fields[22] + # post_open_comment = fields[23] + # post_open_tb = fields[24] + # nb_comment = fields[25] + # nb_trackback = fields[26] + post_meta = fields[27] + # redirect_url = fields[28][:-1] + + # remove seconds + post_creadt = ':'.join(post_creadt.split(':')[0:2]) + + author = "" + categories = [] + tags = [] + + if cat_id: + categories = [category_list[id].strip() for id in cat_id.split(',')] + + # Get tags related to a post + tag = post_meta.replace('{', '').replace('}', '').replace('a:1:s:3:\\"tag\\";a:', '').replace('a:0:', '') + if len(tag) > 1: + if int(tag[:1]) == 1: + newtag = tag.split('"')[1] + tags.append(unicode(BeautifulStoneSoup(newtag,convertEntities=BeautifulStoneSoup.HTML_ENTITIES ))) + else: + i=1 + j=1 + while(i <= int(tag[:1])): + newtag = tag.split('"')[j].replace('\\','') + tags.append(unicode(BeautifulStoneSoup(newtag,convertEntities=BeautifulStoneSoup.HTML_ENTITIES ))) + i=i+1 + if j < int(tag[:1])*2: + j=j+2 + + """ + dotclear2 does not use markdown by default unless you use the markdown plugin + Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown + """ + if post_format == "markdown": + content = post_excerpt + post_content + else: + content = post_excerpt_xhtml + post_content_xhtml + content = content.replace('\\n', '') + post_format = "html" + + yield (post_title, content, slugify(post_title), post_creadt, author, categories, tags, post_format) + + +def feed2fields(file): + """Read a feed and yield pelican fields""" + import feedparser + d = feedparser.parse(file) + for entry in d.entries: + date = (time.strftime("%Y-%m-%d %H:%M", entry.updated_parsed) + if hasattr(entry, "updated_parsed") else None) + author = entry.author if hasattr(entry, "author") else None + tags = [e['term'] for e in entry.tags] if hasattr(entry, "tags") else None + + slug = slugify(entry.title) + yield (entry.title, entry.description, slug, date, author, [], tags, "html") + + +def build_header(title, date, author, categories, tags): + """Build a header from a list of fields""" + header = '%s\n%s\n' % (title, '#' * len(title)) + if date: + header += ':date: %s\n' % date + if categories: + header += ':category: %s\n' % ', '.join(categories) + if tags: + header += ':tags: %s\n' % ', '.join(tags) + header += '\n' + return header + +def build_markdown_header(title, date, author, categories, tags): + """Build a header from a list of fields""" + header = 'Title: %s\n' % title + if date: + header += 'Date: %s\n' % date + if categories: + header += 'Category: %s\n' % ', '.join(categories) + if tags: + header += 'Tags: %s\n' % ', '.join(tags) + header += '\n' + return header + +def fields2pelican(fields, out_markup, output_path, dircat=False): + for title, content, filename, date, author, categories, tags, in_markup in fields: + if (in_markup == "markdown") or (out_markup == "markdown") : + ext = '.md' + header = build_markdown_header(title, date, author, categories, tags) + else: + out_markup = "rst" + ext = '.rst' + header = build_header(title, date, author, categories, tags) + + filename = os.path.basename(filename) + + # option to put files in directories with categories names + if dircat and (len(categories) == 1): + catname = slugify(categories[0]) + out_filename = os.path.join(output_path, catname, filename+ext) + if not os.path.isdir(os.path.join(output_path, catname)): + os.mkdir(os.path.join(output_path, catname)) + else: + out_filename = os.path.join(output_path, filename+ext) + + print(out_filename) + + if in_markup == "html": + html_filename = os.path.join(output_path, filename+'.html') + + with open(html_filename, 'w', encoding='utf-8') as fp: + # Replace simple newlines with <br />+newline so that the HTML file + # represents the original post more accurately + content = content.replace("\n", "<br />\n") + fp.write(content) + + cmd = 'pandoc --normalize --reference-links --from=html --to={0} -o "{1}" "{2}"'.format( + out_markup, out_filename, html_filename) + + try: + rc = subprocess.call(cmd, shell=True) + if rc < 0: + print("Child was terminated by signal %d" % -rc) + exit() + elif rc > 0: + print("Please, check your Pandoc installation.") + exit() + except OSError, e: + print("Pandoc execution failed: %s" % e) + exit() + + os.remove(html_filename) + + with open(out_filename, 'r', encoding='utf-8') as fs: + content = fs.read() + if out_markup == "markdown": + # In markdown, to insert a <br />, end a line with two or more spaces & then a end-of-line + content = content.replace("\\\n ", " \n") + content = content.replace("\\\n", " \n") + + with open(out_filename, 'w', encoding='utf-8') as fs: + fs.write(header + content) + + +def main(): + parser = argparse.ArgumentParser( + description="Transform feed, Wordpress or Dotclear files to rst files." + "Be sure to have pandoc installed", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument(dest='input', help='The input file to read') + parser.add_argument('--wpfile', action='store_true', dest='wpfile', + help='Wordpress XML export') + parser.add_argument('--dotclear', action='store_true', dest='dotclear', + help='Dotclear export') + parser.add_argument('--feed', action='store_true', dest='feed', + help='Feed to parse') + parser.add_argument('-o', '--output', dest='output', default='output', + help='Output path') + parser.add_argument('-m', '--markup', dest='markup', default='rst', + help='Output markup format (supports rst & markdown)') + parser.add_argument('--dir-cat', action='store_true', dest='dircat', + help='Put files in directories with categories name') + args = parser.parse_args() + + input_type = None + if args.wpfile: + input_type = 'wordpress' + elif args.dotclear: + input_type = 'dotclear' + elif args.feed: + input_type = 'feed' + else: + print("You must provide either --wpfile, --dotclear or --feed options") + exit() + + if not os.path.exists(args.output): + try: + os.mkdir(args.output) + except OSError: + print("Unable to create the output folder: " + args.output) + exit() + + if input_type == 'wordpress': + fields = wp2fields(args.input) + elif input_type == 'dotclear': + fields = dc2fields(args.input) + elif input_type == 'feed': + fields = feed2fields(args.input) + + fields2pelican(fields, args.markup, args.output, dircat=args.dircat or False) diff --git a/pelican/tools/pelican_quickstart.py b/pelican/tools/pelican_quickstart.py new file mode 100755 --- /dev/null +++ b/pelican/tools/pelican_quickstart.py @@ -0,0 +1,264 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- # + +import os +import string +import argparse + +from pelican import __version__ + +TEMPLATES = { + 'Makefile' : ''' +PELICAN=$pelican +PELICANOPTS=$pelicanopts + +BASEDIR=$$(PWD) +INPUTDIR=$$(BASEDIR)/src +OUTPUTDIR=$$(BASEDIR)/output +CONFFILE=$$(BASEDIR)/pelican.conf.py + +FTP_HOST=$ftp_host +FTP_USER=$ftp_user +FTP_TARGET_DIR=$ftp_target_dir + +SSH_HOST=$ssh_host +SSH_USER=$ssh_user +SSH_TARGET_DIR=$ssh_target_dir + +DROPBOX_DIR=$dropbox_dir + +help: +\t@echo 'Makefile for a pelican Web site ' +\t@echo ' ' +\t@echo 'Usage: ' +\t@echo ' make html (re)generate the web site ' +\t@echo ' make clean remove the generated files ' +\t@echo ' ftp_upload upload the web site using FTP ' +\t@echo ' ssh_upload upload the web site using SSH ' +\t@echo ' dropbox_upload upload the web site using Dropbox ' +\t@echo ' ' + + +html: clean $$(OUTPUTDIR)/index.html +\t@echo 'Done' + +$$(OUTPUTDIR)/%.html: +\t$$(PELICAN) $$(INPUTDIR) -o $$(OUTPUTDIR) -s $$(CONFFILE) $$(PELICANOPTS) + +clean: +\trm -fr $$(OUTPUTDIR) +\tmkdir $$(OUTPUTDIR) + +dropbox_upload: $$(OUTPUTDIR)/index.html +\tcp -r $$(OUTPUTDIR)/* $$(DROPBOX_DIR) + +ssh_upload: $$(OUTPUTDIR)/index.html +\tscp -r $$(OUTPUTDIR)/* $$(SSH_USER)@$$(SSH_HOST):$$(SSH_TARGET_DIR) + +ftp_upload: $$(OUTPUTDIR)/index.html +\tlftp ftp://$$(FTP_USER)@$$(FTP_HOST) -e "mirror -R $$(OUTPUT_DIR)/* $$(FTP_TARGET_DIR) ; quit" + +github: $$(OUTPUTDIR)/index.html +\tghp-import $$(OUTPUTDIR) +\tgit push origin gh-pages + +.PHONY: html help clean ftp_upload ssh_upload dropbox_upload github +''', + + 'pelican.conf.py': '''#!/usr/bin/env python +# -*- coding: utf-8 -*- # + +AUTHOR = u"$author" +SITENAME = u"$sitename" +SITEURL = '/' + +TIMEZONE = 'Europe/Paris' + +DEFAULT_LANG='$lang' + +# Blogroll +LINKS = ( + ('Pelican', 'http://docs.notmyidea.org/alexis/pelican/'), + ('Python.org', 'http://python.org'), + ('Jinja2', 'http://jinja.pocoo.org'), + ('You can modify those links in your config file', '#') + ) + +# Social widget +SOCIAL = ( + ('You can add links in your config file', '#'), + ) + +DEFAULT_PAGINATION = $default_pagination +''' +} + +CONF = { + 'pelican' : 'pelican', + 'pelicanopts' : '', + 'basedir': '.', + 'ftp_host': 'localhost', + 'ftp_user': 'anonymous', + 'ftp_target_dir': '/', + 'ssh_host': 'locahost', + 'ssh_user': 'root', + 'ssh_target_dir': '/var/www', + 'dropbox_dir' : '~/Dropbox/Public/', + 'default_pagination' : 10, + 'lang': 'en' +} + + +def ask(question, answer=str, default=None, l=None): + if answer == str: + r = '' + while True: + if default: + r = raw_input('> {0} [{1}] '.format(question, default)) + else: + r = raw_input('> {0} '.format(question, default)) + + r = r.strip() + + if len(r) <= 0: + if default: + r = default + break + else: + print('You must enter something') + else: + if l and len(r) != l: + print('You must enter a {0} letters long string'.format(l)) + else: + break + + return r + + elif answer == bool: + r = None + while True: + if default is True: + r = raw_input('> {0} (Y/n) '.format(question)) + elif default is False: + r = raw_input('> {0} (y/N) '.format(question)) + else: + r = raw_input('> {0} (y/n) '.format(question)) + + r = r.strip().lower() + + if r in ('y', 'yes'): + r = True + break + elif r in ('n', 'no'): + r = False + break + elif not r: + r = default + break + else: + print("You must answer `yes' or `no'") + return r + elif answer == int: + r = None + while True: + if default: + r = raw_input('> {0} [{1}] '.format(question, default)) + else: + r = raw_input('> {0} '.format(question)) + + r = r.strip() + + if not r: + r = default + break + + try: + r = int(r) + break + except: + print('You must enter an integer') + return r + else: + raise NotImplemented('Arguent `answer` must be str, bool or integer') + + +def main(): + parser = argparse.ArgumentParser( + description="A kickstarter for pelican", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-p', '--path', default=".", + help="The path to generate the blog into") + parser.add_argument('-t', '--title', metavar="title", + help='Set the title of the website') + parser.add_argument('-a', '--author', metavar="author", + help='Set the author name of the website') + parser.add_argument('-l', '--lang', metavar="lang", + help='Set the default lang of the website') + + args = parser.parse_args() + + print('''Welcome to pelican-quickstart v{v}. + +This script will help you creating a new Pelican based website. + +Please answer the following questions so this script can generate the files needed by Pelican. + + '''.format(v=__version__)) + + CONF['basedir'] = os.path.abspath(ask('Where do you want to create your new Web site ?', answer=str, default=args.path)) + CONF['sitename'] = ask('How will you call your Web site ?', answer=str, default=args.title) + CONF['author'] = ask('Who will be the author of this Web site ?', answer=str, default=args.author) + CONF['lang'] = ask('What will be the default language of this Web site ?', str, args.lang or CONF['lang'], 2) + + CONF['with_pagination'] = ask('Do you want to enable article pagination ?', bool, bool(CONF['default_pagination'])) + + if CONF['with_pagination']: + CONF['default_pagination'] = ask('So how many articles per page do you want ?', int, CONF['default_pagination']) + else: + CONF['default_pagination'] = False + + mkfile = ask('Do you want to generate a Makefile to easily manage your website ?', bool, True) + + if mkfile: + if ask('Do you want to upload your website using FTP ?', answer=bool, default=False): + CONF['ftp_host'] = ask('What is the hostname of your FTP server ?', str, CONF['ftp_host']) + CONF['ftp_user'] = ask('What is your username on this server ?', str, CONF['ftp_user']) + CONF['ftp_traget_dir'] = ask('Where do you want to put your website on this server ?', str, CONF['ftp_target_dir']) + + if ask('Do you want to upload your website using SSH ?', answer=bool, default=False): + CONF['ssh_host'] = ask('What is the hostname of your SSH server ?', str, CONF['ssh_host']) + CONF['ssh_user'] = ask('What is your username on this server ?', str, CONF['ssh_user']) + CONF['ssh_traget_dir'] = ask('Where do you want to put your website on this server ?', str, CONF['ssh_target_dir']) + + if ask('Do you want to upload your website using Dropbox ?', answer=bool, default=False): + CONF['dropbox_dir'] = ask('Where is your Dropbox directory ?', str, CONF['dropbox_dir']) + + try: + os.makedirs(os.path.join(CONF['basedir'], 'src')) + except OSError, e: + print('Error: {0}'.format(e)) + + try: + os.makedirs(os.path.join(CONF['basedir'], 'output')) + except OSError, e: + print('Error: {0}'.format(e)) + + conf = string.Template(TEMPLATES['pelican.conf.py']) + try: + with open(os.path.join(CONF['basedir'], 'pelican.conf.py'), 'w') as fd: + fd.write(conf.safe_substitute(CONF)) + fd.close() + except OSError, e: + print('Error: {0}'.format(e)) + + if mkfile: + Makefile = string.Template(TEMPLATES['Makefile']) + + try: + with open(os.path.join(CONF['basedir'], 'Makefile'), 'w') as fd: + fd.write(Makefile.safe_substitute(CONF)) + fd.close() + except OSError, e: + print('Error: {0}'.format(e)) + + print('Done. Your new project is available at %s' % CONF['basedir']) diff --git a/tools/pelican-themes b/pelican/tools/pelican_themes.py similarity index 93% rename from tools/pelican-themes rename to pelican/tools/pelican_themes.py --- a/tools/pelican-themes +++ b/pelican/tools/pelican_themes.py @@ -1,8 +1,10 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -import os, sys, shutil import argparse +import os +import shutil +import sys try: import pelican @@ -38,11 +40,11 @@ def main(): excl= parser.add_mutually_exclusive_group() excl.add_argument('-l', '--list', dest='action', action="store_const", const='list', - help="Show the themes already installed and exit") + help="Show the themes already installed and exit") excl.add_argument('-p', '--path', dest='action', action="store_const", const='path', - help="Show the themes path and exit") + help="Show the themes path and exit") excl.add_argument('-V', '--version', action='version', version='pelican-themes v{0}'.format(__version__), - help='Print the version of this script') + help='Print the version of this script') parser.add_argument('-i', '--install', dest='to_install', nargs='+', metavar="theme path", @@ -52,16 +54,16 @@ def main(): parser.add_argument('-s', '--symlink', dest='to_symlink', nargs='+', metavar="theme path", help="Same as `--install', but create a symbolic link instead of copying the theme. Useful for theme development") parser.add_argument('-c', '--clean', dest='clean', action="store_true", - help="Remove the broken symbolic links of the theme path") + help="Remove the broken symbolic links of the theme path") parser.add_argument('-v', '--verbose', dest='verbose', action="store_true", - help="Verbose output") + help="Verbose output") args = parser.parse_args() - + if args.action: if args.action is 'list': list_themes(args.verbose) @@ -93,7 +95,7 @@ def main(): if args.clean: if args.verbose: print('Cleaning the themes directory...') - + clean(v=args.verbose) else: print('No argument given... exiting.') @@ -142,7 +144,7 @@ def remove(theme_name, v=False): print('Removing directory `' + target + "'") shutil.rmtree(target) elif os.path.exists(target): - err(target + ' : not a valid theme') + err(target + ' : not a valid theme') else: err(target + ' : no such file or directory') @@ -210,6 +212,3 @@ def clean(v=False): c+=1 print("\nRemoved {0} broken links".format(c)) - -if __name__ == '__main__': - main() diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -1,9 +1,11 @@ # -*- coding: utf-8 -*- -import re import os +import pytz +import re import shutil -from datetime import datetime + from codecs import open as _open +from datetime import datetime from itertools import groupby from operator import attrgetter from pelican.log import warning, info @@ -12,10 +14,14 @@ def get_date(string): """Return a datetime object from a string. - If no format matches the given date, raise a ValuEerror + If no format matches the given date, raise a ValueError. """ - formats = ['%Y-%m-%d %H:%M', '%Y/%m/%d %H:%M', '%Y-%m-%d', '%Y/%m/%d', - '%d/%m/%Y', '%d.%m.%Y', '%d.%m.%Y %H:%M'] + string = re.sub(' +', ' ', string) + formats = ['%Y-%m-%d %H:%M', '%Y/%m/%d %H:%M', + '%Y-%m-%d', '%Y/%m/%d', + '%d-%m-%Y', '%Y-%d-%m', # Weird ones + '%d/%m/%Y', '%d.%m.%Y', + '%d.%m.%Y %H:%M', '%Y-%m-%d %H:%M:%S'] for date_format in formats: try: return datetime.strptime(string, date_format) @@ -42,6 +48,7 @@ def slugify(value): value = unicode(re.sub('[^\w\s-]', '', value).strip().lower()) return re.sub('[-\s]+', '-', value) + def copy(path, source, destination, destination_path=None, overwrite=False): """Copy path from origin to destination. @@ -51,15 +58,15 @@ def copy(path, source, destination, destination_path=None, overwrite=False): :param source: the source dir :param destination: the destination dir :param destination_path: the destination path (optional) - :param overwrite: wether to overwrite the destination if already exists or not - + :param overwrite: whether to overwrite the destination if already exists + or not """ if not destination_path: destination_path = path source_ = os.path.abspath(os.path.expanduser(os.path.join(source, path))) destination_ = os.path.abspath( - os.path.expanduser(os.path.join(destination, destination_path))) + os.path.expanduser(os.path.join(destination, destination_path))) if os.path.isdir(source_): try: @@ -75,6 +82,7 @@ def copy(path, source, destination, destination_path=None, overwrite=False): shutil.copy(source_, destination_) info('copying %s to %s' % (source_, destination_)) + def clean_output_dir(path): """Remove all the files from the output directory""" @@ -102,7 +110,8 @@ def truncate_html_words(s, num, end_text='...'): length = int(num) if length <= 0: return u'' - html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area', 'hr', 'input') + html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area', + 'hr', 'input') # Set up regular expressions re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U) @@ -140,8 +149,9 @@ def truncate_html_words(s, num, end_text='...'): except ValueError: pass else: - # SGML: An end tag closes, back to the matching start tag, all unclosed intervening start tags with omitted end tags - open_tags = open_tags[i+1:] + # SGML: An end tag closes, back to the matching start tag, + # all unclosed intervening start tags with omitted end tags + open_tags = open_tags[i + 1:] else: # Add it to the start of the open tags list open_tags.insert(0, tagname) @@ -159,13 +169,11 @@ def truncate_html_words(s, num, end_text='...'): def process_translations(content_list): - """ Finds all translation and returns - tuple with two lists (index, translations). - Index list includes items in default language - or items which have no variant in default language. + """ Finds all translation and returns tuple with two lists (index, + translations). Index list includes items in default language or items + which have no variant in default language. - Also, for each content_list item, it - sets attribute 'translations' + Also, for each content_list item, it sets attribute 'translations' """ content_list.sort(key=attrgetter('slug')) grouped_by_slugs = groupby(content_list, attrgetter('slug')) @@ -175,10 +183,7 @@ def process_translations(content_list): for slug, items in grouped_by_slugs: items = list(items) # find items with default language - default_lang_items = filter( - attrgetter('in_default_lang'), - items - ) + default_lang_items = filter(attrgetter('in_default_lang'), items) len_ = len(default_lang_items) if len_ > 1: warning(u'there are %s variants of "%s"' % (len_, slug)) @@ -188,7 +193,7 @@ def process_translations(content_list): default_lang_items = items[:1] if not slug: - warning('empty slug for %r' %( default_lang_items[0].filename,)) + warning('empty slug for %r' % (default_lang_items[0].filename,)) index.extend(default_lang_items) translations.extend(filter( lambda x: x not in default_lang_items, @@ -205,9 +210,6 @@ def process_translations(content_list): def files_changed(path, extensions): """Return True if the files have changed since the last check""" - def with_extension(f): - return any(f.endswith(ext) for ext in extensions) - def file_times(path): """Return the last time files have been modified""" for root, dirs, files in os.walk(path): @@ -222,3 +224,15 @@ def file_times(path): LAST_MTIME = mtime return True return False + + +def set_date_tzinfo(d, tz_name=None): + """ Date without tzinfo shoudbe utc. + This function set the right tz to date that aren't utc and don't have + tzinfo. + """ + if tz_name is not None: + tz = pytz.timezone(tz_name) + return tz.localize(d) + else: + return d diff --git a/pelican/writers.py b/pelican/writers.py --- a/pelican/writers.py +++ b/pelican/writers.py @@ -1,15 +1,15 @@ # -*- coding: utf-8 -*- from __future__ import with_statement import os -import re from codecs import open from functools import partial import locale +import re from feedgenerator import Atom1Feed, Rss201rev2Feed -from pelican.utils import get_relative_path from pelican.paginator import Paginator -from pelican.log import * +from pelican.log import info +from pelican.utils import get_relative_path, set_date_tzinfo class Writer(object): @@ -28,22 +28,23 @@ def _create_new_feed(self, feed_type, context): description=context.get('SITESUBTITLE', '')) return feed - def _add_item_to_the_feed(self, feed, item): feed.add_item( title=item.title, link='%s/%s' % (self.site_url, item.url), + unique_id='%s/%s' % (self.site_url, item.url), description=item.content, categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', 'John Doe'), - pubdate=item.date) + pubdate=set_date_tzinfo(item.date, + self.settings.get('TIMEZONE', None))) def write_feed(self, elements, context, filename=None, feed_type='atom'): """Generate a feed with the list of articles provided - Return the feed. If no output_path or filename is specified, just return - the feed object. + Return the feed. If no output_path or filename is specified, just + return the feed object. :param elements: the articles to put on the feed. :param context: the context to get the feed metadata. @@ -54,12 +55,15 @@ def write_feed(self, elements, context, filename=None, feed_type='atom'): locale.setlocale(locale.LC_ALL, 'C') try: self.site_url = context.get('SITEURL', get_relative_path(filename)) - self.feed_url= '%s/%s' % (self.site_url, filename) + self.feed_url = '%s/%s' % (self.site_url, filename) feed = self._create_new_feed(feed_type, context) - for item in elements: - self._add_item_to_the_feed(feed, item) + max_items = len(elements) + if self.settings['FEED_MAX_ITEMS']: + max_items = min(self.settings['FEED_MAX_ITEMS'], max_items) + for i in xrange(max_items): + self._add_item_to_the_feed(feed, elements[i]) if filename: complete_path = os.path.join(self.output_path, filename) @@ -85,7 +89,7 @@ def write_file(self, name, template, context, relative_urls=True, :param context: dict to pass to the templates. :param relative_urls: use relative urls or absolutes ones :param paginated: dict of article list to paginate - must have the - same length (same list in different orders) + same length (same list in different orders) :param **kwargs: additional variables to pass to the templates """ @@ -111,7 +115,8 @@ def _write_file(template, localcontext, output_path, name): localcontext['SITEURL'] = get_relative_path(name) localcontext.update(kwargs) - self.update_context_contents(name, localcontext) + if relative_urls: + self.update_context_contents(name, localcontext) # check paginated paginated = paginated or {} @@ -121,12 +126,12 @@ def _write_file(template, localcontext, output_path, name): for key in paginated.iterkeys(): object_list = paginated[key] - if self.settings.get('WITH_PAGINATION'): + if self.settings.get('DEFAULT_PAGINATION'): paginators[key] = Paginator(object_list, self.settings.get('DEFAULT_PAGINATION'), self.settings.get('DEFAULT_ORPHANS')) else: - paginators[key] = Paginator(object_list, len(object_list), 0) + paginators[key] = Paginator(object_list, len(object_list)) # generated pages, and write for page_num in range(paginators.values()[0].num_pages): @@ -134,16 +139,17 @@ def _write_file(template, localcontext, output_path, name): paginated_name = name for key in paginators.iterkeys(): paginator = paginators[key] - page = paginator.page(page_num+1) - paginated_localcontext.update({'%s_paginator' % key: paginator, - '%s_page' % key: page}) + page = paginator.page(page_num + 1) + paginated_localcontext.update( + {'%s_paginator' % key: paginator, + '%s_page' % key: page}) if page_num > 0: ext = '.' + paginated_name.rsplit('.')[-1] paginated_name = paginated_name.replace(ext, - '%s%s' % (page_num + 1, ext)) + '%s%s' % (page_num + 1, ext)) _write_file(template, paginated_localcontext, self.output_path, - paginated_name) + paginated_name) else: # no pagination _write_file(template, localcontext, self.output_path, name) @@ -154,8 +160,8 @@ def update_context_contents(self, name, context): relative paths. :param name: name of the file to output. - :param context: dict that will be passed to the templates, which need to - be updated. + :param context: dict that will be passed to the templates, which need + to be updated. """ def _update_content(name, input): """Change all the relatives paths of the input content to relatives @@ -166,25 +172,27 @@ def _update_content(name, input): """ content = input._content - hrefs = re.compile(r'<\s*[^\>]*href\s*=(^!#)\s*(["\'])(.*?)\1') - srcs = re.compile(r'<\s*[^\>]*src\s*=\s*(["\'])(.*?)\1') - - matches = hrefs.findall(content) - matches.extend(srcs.findall(content)) - relative_paths = [] - for found in matches: - found = found[1] - if found not in relative_paths: - relative_paths.append(found) - - for relative_path in relative_paths: - if not ":" in relative_path: # we don't want to rewrite protocols - dest_path = os.sep.join((get_relative_path(name), "static", - relative_path)) - content = content.replace(relative_path, dest_path) - - return content - + hrefs = re.compile(r""" + (?P<markup><\s*[^\>]* # match tag with src and href attr + (?:href|src)\s*=\s* + ) + (?P<quote>["\']) # require value to be quoted + (?![#?]) # don't match fragment or query URLs + (?![a-z]+:) # don't match protocol URLS + (?P<path>.*?) # the url value + \2""", re.X) + + def replacer(m): + relative_path = m.group('path') + dest_path = os.path.normpath( + os.sep.join((get_relative_path(name), "static", + relative_path))) + + return m.group('markup') + m.group('quote') + dest_path \ + + m.group('quote') + + return hrefs.sub(replacer, content) + if context is None: return if hasattr(context, 'values'): @@ -203,4 +211,4 @@ def _update_content(name, input): if relative_path not in paths: paths.append(relative_path) setattr(item, "_get_content", - partial(_update_content, name, item)) + partial(_update_content, name, item)) diff --git a/samples/pelican.conf.py b/samples/pelican.conf.py --- a/samples/pelican.conf.py +++ b/samples/pelican.conf.py @@ -2,13 +2,14 @@ AUTHOR = u'Alexis Métaireau' SITENAME = u"Alexis' log" SITEURL = 'http://blog.notmyidea.org' +TIMEZONE = "Europe/Paris" GITHUB_URL = 'http://github.com/ametaireau/' DISQUS_SITENAME = "blog-notmyidea" PDF_GENERATOR = False REVERSE_CATEGORY_ORDER = True LOCALE = "" -DEFAULT_PAGINATION = 2 +DEFAULT_PAGINATION = 4 FEED_RSS = 'feeds/all.rss.xml' CATEGORY_FEED_RSS = 'feeds/%s.rss.xml' @@ -36,3 +37,4 @@ # foobar will not be used, because it's not in caps. All configuration keys # have to be in caps foobar = "barbaz" + diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,25 +1,34 @@ #!/usr/bin/env python from setuptools import setup -import sys -VERSION = "2.7.2" # find a better way to do so. +requires = ['feedgenerator', 'jinja2', 'pygments', 'docutils', 'pytz', 'blinker'] -requires = ['feedgenerator', 'jinja2', 'pygments', 'docutils', 'blinker'] -if sys.version_info < (2,7): +try: + import argparse +except ImportError: requires.append('argparse') +entry_points = { + 'console_scripts': [ + 'pelican = pelican:main', + 'pelican-import = pelican.tools.pelican_import:main', + 'pelican-quickstart = pelican.tools.pelican_quickstart:main', + 'pelican-themes = pelican.tools.pelican_themes:main' + ] +} + setup( name = "pelican", - version = VERSION, - url = 'http://alexis.notmyidea.org/pelican/', + version = "3.0", + url = 'http://pelican.notmyidea.org/', author = 'Alexis Metaireau', author_email = '[email protected]', - description = "A tool to generate a static blog, with restructured text (or markdown) input files.", + description = "A tool to generate a static blog from reStructuredText or Markdown input files.", long_description=open('README.rst').read(), - packages = ['pelican', 'pelican.plugins'], + packages = ['pelican', 'pelican.tools', 'pelican.plugins'], include_package_data = True, install_requires = requires, - scripts = ['bin/pelican', 'tools/pelican-themes'], + entry_points = entry_points, classifiers = ['Development Status :: 5 - Production/Stable', 'Environment :: Console', 'License :: OSI Approved :: GNU Affero General Public License v3', diff --git a/tools/importer.py b/tools/importer.py deleted file mode 100755 --- a/tools/importer.py +++ /dev/null @@ -1,121 +0,0 @@ -#! /usr/bin/env python - -from pelican.utils import slugify - -from codecs import open -import os -import argparse -import time - - -def wp2fields(xml): - """Opens a wordpress XML file, and yield pelican fields""" - from BeautifulSoup import BeautifulStoneSoup - - xmlfile = open(xml, encoding='utf-8').read() - soup = BeautifulStoneSoup(xmlfile) - items = soup.rss.channel.findAll('item') - - for item in items: - if item.fetch('wp:status')[0].contents[0] == "publish": - title = item.title.contents[0] - content = item.fetch('content:encoded')[0].contents[0] - filename = item.fetch('wp:post_name')[0].contents[0] - - raw_date = item.fetch('wp:post_date')[0].contents[0] - date_object = time.strptime(raw_date, "%Y-%m-%d %H:%M:%S") - date = time.strftime("%Y-%m-%d %H:%M", date_object) - - author = item.fetch('dc:creator')[0].contents[0].title() - categories = [(cat['nicename'],cat.contents[0]) for cat in item.fetch(domain='category')] - - tags = [tag.contents[0].title() for tag in item.fetch(domain='tag', nicename=None)] - - yield (title, content, filename, date, author, categories, tags) - -def feed2fields(file): - """Read a feed and yield pelican fields""" - import feedparser - d = feedparser.parse(file) - for entry in d.entries: - date = (time.strftime("%Y-%m-%d %H:%M", entry.updated_parsed) - if hasattr(entry, "updated_parsed") else None) - author = entry.author if hasattr(entry, "author") else None - tags = [e['term'] for e in entry.tags] if hasattr(entry, "tags") else None - - slug = slugify(entry.title) - yield (entry.title, entry.description, slug, date, author, [], tags) - - -def build_header(title, date, author, categories, tags): - """Build a header from a list of fields""" - header = '%s\n%s\n' % (title, '#' * len(title)) - if date: - header += ':date: %s\n' % date - if categories: - header += ':category: %s\n' % ', '.join(categories) - if tags: - header += ':tags: %s\n' % ', '.join(tags) - header += '\n' - return header - - -def fields2pelican(fields, output_path): - for title, content, filename, date, author, categories, tags in fields: - html_filename = os.path.join(output_path, filename+'.html') - - if(len(categories) == 1): - rst_filename = os.path.join(output_path, categories[0][0], filename+'.rst') - if not os.path.isdir(os.path.join(output_path, categories[0][0])): - os.mkdir(os.path.join(output_path, categories[0][0])) - else: - rst_filename = os.path.join(output_path, filename+'.rst') - - with open(html_filename, 'w', encoding='utf-8') as fp: - fp.write(content) - - os.system('pandoc --from=html --to=rst -o %s %s' % (rst_filename, - html_filename)) - - os.remove(html_filename) - - with open(rst_filename, 'r', encoding='utf-8') as fs: - content = fs.read() - with open(rst_filename, 'w', encoding='utf-8') as fs: - categories = [x[1] for x in categories] - header = build_header(title, date, author, categories, tags) - fs.write(header + content) - - -def main(input_type, input, output_path): - if input_type == 'wordpress': - fields = wp2fields(input) - elif input_type == 'feed': - fields = feed2fields(input) - - fields2pelican(fields, output_path) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="Transform even feed or XML files to rst files." - "Be sure to have pandoc installed") - - parser.add_argument(dest='input', help='The input file to read') - parser.add_argument('--wpfile', action='store_true', dest='wpfile', - help='Wordpress XML export') - parser.add_argument('--feed', action='store_true', dest='feed', - help='feed to parse') - parser.add_argument('-o', '--output', dest='output', default='output', - help='Output path') - args = parser.parse_args() - - input_type = None - if args.wpfile: - input_type = 'wordpress' - elif args.feed: - input_type = 'feed' - else: - print "you must provide either --wpfile or --feed options" - exit() - main(input_type, args.input, args.output)
diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py deleted file mode 100644 --- a/pelican/tests/test_contents.py +++ /dev/null @@ -1,52 +0,0 @@ -from unittest2 import TestCase - -from pelican.contents import Page -from pelican.settings import _DEFAULT_CONFIG - -class TestPage(TestCase): - - def test_use_args(self): - # creating a page with arguments passed to the connstructor should use - # them to initialise object's attributes - metadata = {'foo': 'bar', 'foobar': 'baz'} - page = Page('content', metadata=metadata) - for key, value in metadata.items(): - self.assertTrue(hasattr(page, key)) - self.assertEqual(value, getattr(page, key)) - self.assertEqual(page.content, "content") - - def test_mandatory_properties(self): - # if the title is not set, must throw an exception - page = Page('content') - with self.assertRaises(NameError) as cm: - page.check_properties() - - page = Page('content', metadata={'title': 'foobar'}) - page.check_properties() - - def test_slug(self): - # if a title is given, it should be used to generate the slug - page = Page('content', {'title': 'foobar is foo'}) - self.assertEqual(page.slug, 'foobar-is-foo') - - def test_defaultlang(self): - # if no lang is given, default to the default one - page = Page('content') - self.assertEqual(page.lang, _DEFAULT_CONFIG['DEFAULT_LANG']) - - # it is possible to specify the lang in the metadata infos - page = Page('content', {'lang': 'fr'}) - self.assertEqual(page.lang, 'fr') - - def test_save_as(self): - # if a lang is not the default lang, save_as should be set accordingly - page = Page('content', {'title': 'foobar', 'lang': 'fr'}) #default lang is en - self.assertEqual(page.save_as, "foobar-fr.html") - - # otherwise, if a title is defined, save_as should be set - page = Page('content', {'title': 'foobar'}) - page.save_as = 'foobar.html' - - # if no title is given, there is no save_as - page = Page('content') - self.assertFalse(hasattr(page, 'save_as')) diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py deleted file mode 100644 --- a/pelican/tests/test_readers.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding: utf-8 -import unittest2 -import os -import datetime -from pelican import readers - -CUR_DIR = os.path.dirname(__file__) -CONTENT_PATH = os.path.join(CUR_DIR, '..', '..', 'samples', 'content') - -def _filename(*args): - return os.path.join(CONTENT_PATH, *args) - - -class RstReaderTest(unittest2.TestCase): - - def test_metadata(self): - reader = readers.RstReader() - content, metadata = reader.read(_filename('super_article.rst')) - expected = { - 'category': 'yeah', - 'author': u'Alexis Métaireau', - 'title': 'This is a super article !', - 'summary': 'Multi-line metadata should be supported\nas well as <strong>inline markup</strong>.', - 'date': datetime.datetime(2010, 12, 2, 10, 14), - 'tags': ['foo', 'bar', 'foobar'], - } - self.assertDictEqual(metadata, expected) diff --git a/pelican/tests/test_settings.py b/pelican/tests/test_settings.py deleted file mode 100644 --- a/pelican/tests/test_settings.py +++ /dev/null @@ -1,34 +0,0 @@ -from unittest2 import TestCase -import os - -from pelican.settings import read_settings, _DEFAULT_CONFIG - -SETTINGS = os.sep.join([os.path.dirname(os.path.abspath(__file__)), - "../../samples/pelican.conf.py"]) - -class SettingsTest(TestCase): - - - def test_read_settings(self): - # providing a file, it should read it, replace the default values and append - # new values to the settings, if any - settings = read_settings(SETTINGS) - - # overwrite existing settings - self.assertEqual(settings.get('SITENAME'), u"Alexis' log") - - # add new settings - self.assertEqual(settings.get('SITEURL'), 'http://blog.notmyidea.org') - - # keep default settings if not defined - self.assertEqual(settings.get('DEFAULT_CATEGORY'), - _DEFAULT_CONFIG['DEFAULT_CATEGORY']) - - # do not copy keys not in caps - self.assertNotIn('foobar', settings) - - - def test_empty_read_settings(self): - # providing no file should return the default values - settings = read_settings(None) - self.assertDictEqual(settings, _DEFAULT_CONFIG) diff --git a/pelican/tests/__init__.py b/pelican/themes/notmyidea/templates/authors.html similarity index 100% rename from pelican/tests/__init__.py rename to pelican/themes/notmyidea/templates/authors.html diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 diff --git a/tests/content/article.rst b/tests/content/article.rst new file mode 100644 --- /dev/null +++ b/tests/content/article.rst @@ -0,0 +1,4 @@ +Article title +############# + +This is some content. With some stuff to "typogrify". diff --git a/tests/content/article_with_metadata.rst b/tests/content/article_with_metadata.rst new file mode 100644 --- /dev/null +++ b/tests/content/article_with_metadata.rst @@ -0,0 +1,12 @@ + +This is a super article ! +######################### + +:tags: foo, bar, foobar +:date: 2010-12-02 10:14 +:category: yeah +:author: Alexis Métaireau +:summary: + Multi-line metadata should be supported + as well as **inline markup**. +:custom_field: http://notmyidea.org diff --git a/tests/content/article_with_uppercase_metadata.rst b/tests/content/article_with_uppercase_metadata.rst new file mode 100644 --- /dev/null +++ b/tests/content/article_with_uppercase_metadata.rst @@ -0,0 +1,6 @@ + +This is a super article ! +######################### + +:Category: Yeah + diff --git a/tests/default_conf.py b/tests/default_conf.py new file mode 100644 --- /dev/null +++ b/tests/default_conf.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +AUTHOR = u'Alexis Métaireau' +SITENAME = u"Alexis' log" +SITEURL = 'http://blog.notmyidea.org' +TIMEZONE = 'UTC' + +GITHUB_URL = 'http://github.com/ametaireau/' +DISQUS_SITENAME = "blog-notmyidea" +PDF_GENERATOR = False +REVERSE_CATEGORY_ORDER = True +LOCALE = "" +DEFAULT_PAGINATION = 2 + +FEED_RSS = 'feeds/all.rss.xml' +CATEGORY_FEED_RSS = 'feeds/%s.rss.xml' + +LINKS = (('Biologeek', 'http://biologeek.org'), + ('Filyb', "http://filyb.info/"), + ('Libert-fr', "http://www.libert-fr.com"), + ('N1k0', "http://prendreuncafe.com/blog/"), + (u'Tarek Ziadé', "http://ziade.org/blog"), + ('Zubin Mithra', "http://zubin71.wordpress.com/"),) + +SOCIAL = (('twitter', 'http://twitter.com/ametaireau'), + ('lastfm', 'http://lastfm.com/user/akounet'), + ('github', 'http://github.com/ametaireau'),) + +# global metadata to all the contents +DEFAULT_METADATA = (('yeah', 'it is'),) + +# static paths will be copied under the same name +STATIC_PATHS = ["pictures",] + +# A list of files to copy from the source to the destination +FILES_TO_COPY = (('extra/robots.txt', 'robots.txt'),) + +# foobar will not be used, because it's not in caps. All configuration keys +# have to be in caps +foobar = "barbaz" diff --git a/tests/support.py b/tests/support.py new file mode 100644 --- /dev/null +++ b/tests/support.py @@ -0,0 +1,26 @@ +from contextlib import contextmanager +from tempfile import mkdtemp +from shutil import rmtree + +from pelican.contents import Article + + +@contextmanager +def temporary_folder(): + """creates a temporary folder, return it and delete it afterwards. + + This allows to do something like this in tests: + + >>> with temporary_folder() as d: + # do whatever you want + """ + tempdir = mkdtemp() + yield tempdir + rmtree(tempdir) + + +def get_article(title, slug, content, lang, extra_metadata=None): + metadata = {'slug': slug, 'title': title, 'lang': lang} + if extra_metadata is not None: + metadata.update(extra_metadata) + return Article(content, metadata=metadata) diff --git a/tests/test_contents.py b/tests/test_contents.py new file mode 100644 --- /dev/null +++ b/tests/test_contents.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +from __future__ import with_statement +try: + from unittest2 import TestCase, skip +except ImportError, e: + from unittest import TestCase, skip # NOQA + +from pelican.contents import Page +from pelican.settings import _DEFAULT_CONFIG + +from jinja2.utils import generate_lorem_ipsum + +# generate one paragraph, enclosed with <p> +TEST_CONTENT = str(generate_lorem_ipsum(n=1)) +TEST_SUMMARY = generate_lorem_ipsum(n=1, html=False) + +class TestPage(TestCase): + + def setUp(self): + super(TestPage, self).setUp() + self.page_kwargs = { + 'content': TEST_CONTENT, + 'metadata': { + 'summary': TEST_SUMMARY, + 'title': 'foo bar', + 'author': 'Blogger', + }, + } + + def test_use_args(self): + """Creating a page with arguments passed to the constructor should use + them to initialise object's attributes. + + """ + metadata = {'foo': 'bar', 'foobar': 'baz', 'title': 'foobar', } + page = Page(TEST_CONTENT, metadata=metadata) + for key, value in metadata.items(): + self.assertTrue(hasattr(page, key)) + self.assertEqual(value, getattr(page, key)) + self.assertEqual(page.content, TEST_CONTENT) + + def test_mandatory_properties(self): + """If the title is not set, must throw an exception.""" + self.assertRaises(AttributeError, Page, 'content') + page = Page(**self.page_kwargs) + page.check_properties() + + def test_summary_from_metadata(self): + """If a :summary: metadata is given, it should be used.""" + page = Page(**self.page_kwargs) + self.assertEqual(page.summary, TEST_SUMMARY) + + def test_slug(self): + """If a title is given, it should be used to generate the slug.""" + page = Page(**self.page_kwargs) + self.assertEqual(page.slug, 'foo-bar') + + def test_defaultlang(self): + """If no lang is given, default to the default one.""" + page = Page(**self.page_kwargs) + self.assertEqual(page.lang, _DEFAULT_CONFIG['DEFAULT_LANG']) + + # it is possible to specify the lang in the metadata infos + self.page_kwargs['metadata'].update({'lang': 'fr', }) + page = Page(**self.page_kwargs) + self.assertEqual(page.lang, 'fr') + + def test_save_as(self): + """If a lang is not the default lang, save_as should be set + accordingly. + + """ + # if a title is defined, save_as should be set + page = Page(**self.page_kwargs) + self.assertEqual(page.save_as, "pages/foo-bar.html") + + # if a language is defined, save_as should include it accordingly + self.page_kwargs['metadata'].update({'lang': 'fr', }) + page = Page(**self.page_kwargs) + self.assertEqual(page.save_as, "pages/foo-bar-fr.html") + + def test_datetime(self): + """If DATETIME is set to a tuple, it should be used to override LOCALE + """ + from datetime import datetime + from sys import platform + dt = datetime(2015, 9, 13) + # make a deep copy of page_kawgs + page_kwargs = dict([(key, self.page_kwargs[key]) for key in + self.page_kwargs]) + for key in page_kwargs: + if not isinstance(page_kwargs[key], dict): + break + page_kwargs[key] = dict([(subkey, page_kwargs[key][subkey]) + for subkey in page_kwargs[key]]) + # set its date to dt + page_kwargs['metadata']['date'] = dt + page = Page(**page_kwargs) + + self.assertEqual(page.locale_date, + unicode(dt.strftime(_DEFAULT_CONFIG['DEFAULT_DATE_FORMAT']), + 'utf-8')) + + page_kwargs['settings'] = dict([(x, _DEFAULT_CONFIG[x]) for x in + _DEFAULT_CONFIG]) + + # I doubt this can work on all platforms ... + if platform == "win32": + locale = 'jpn' + else: + locale = 'ja_JP.utf8' + page_kwargs['settings']['DATE_FORMATS'] = {'jp': (locale, + '%Y-%m-%d(%a)')} + page_kwargs['metadata']['lang'] = 'jp' + + import locale as locale_module + try: + page = Page(**page_kwargs) + self.assertEqual(page.locale_date, u'2015-09-13(\u65e5)') + # above is unicode in Japanese: 2015-09-13(��) + except locale_module.Error: + # The constructor of ``Page`` will try to set the locale to + # ``ja_JP.utf8``. But this attempt will failed when there is no + # such locale in the system. You can see which locales there are + # in your system with ``locale -a`` command. + # + # Until we find some other method to test this functionality, we + # will simply skip this test. + skip("There is no locale %s in this system." % locale) diff --git a/tests/test_generators.py b/tests/test_generators.py new file mode 100644 --- /dev/null +++ b/tests/test_generators.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +try: + import unittest2 as unittest +except ImportError, e: + import unittest # NOQA + +from pelican.generators import ArticlesGenerator +from pelican.settings import _DEFAULT_CONFIG + +from mock import MagicMock + + +class TestArticlesGenerator(unittest.TestCase): + + def test_generate_feeds(self): + + generator = ArticlesGenerator(None, {'FEED': _DEFAULT_CONFIG['FEED']}, + None, _DEFAULT_CONFIG['THEME'], None, + None) + writer = MagicMock() + generator.generate_feeds(writer) + writer.write_feed.assert_called_with([], None, 'feeds/all.atom.xml') + + generator = ArticlesGenerator(None, {'FEED': None}, None, + _DEFAULT_CONFIG['THEME'], None, None) + writer = MagicMock() + generator.generate_feeds(writer) + self.assertFalse(writer.write_feed.called) diff --git a/tests/test_pelican.py b/tests/test_pelican.py new file mode 100644 --- /dev/null +++ b/tests/test_pelican.py @@ -0,0 +1,31 @@ +import unittest +import os + +from support import temporary_folder + +from pelican import Pelican +from pelican.settings import read_settings + +SAMPLES_PATH = os.path.abspath(os.sep.join( + (os.path.dirname(os.path.abspath(__file__)), "..", "samples"))) + +INPUT_PATH = os.path.join(SAMPLES_PATH, "content") +SAMPLE_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf.py") + + +class TestPelican(unittest.TestCase): + # general functional testing for pelican. Basically, this test case tries + # to run pelican in different situations and see how it behaves + + def test_basic_generation_works(self): + # when running pelican without settings, it should pick up the default + # ones and generate the output without raising any exception / issuing + # any warning. + with temporary_folder() as temp_path: + pelican = Pelican(path=INPUT_PATH, output_path=temp_path) + pelican.run() + + # the same thing with a specified set of settins should work + with temporary_folder() as temp_path: + pelican = Pelican(path=INPUT_PATH, output_path=temp_path, + settings=read_settings(SAMPLE_CONFIG)) diff --git a/tests/test_readers.py b/tests/test_readers.py new file mode 100644 --- /dev/null +++ b/tests/test_readers.py @@ -0,0 +1,65 @@ +# coding: utf-8 +try: + import unittest2 as unittest +except ImportError, e: + import unittest + +import datetime +import os + +from pelican import readers + +CUR_DIR = os.path.dirname(__file__) +CONTENT_PATH = os.path.join(CUR_DIR, 'content') + + +def _filename(*args): + return os.path.join(CONTENT_PATH, *args) + + +class RstReaderTest(unittest.TestCase): + + def test_article_with_metadata(self): + reader = readers.RstReader({}) + content, metadata = reader.read(_filename('article_with_metadata.rst')) + expected = { + 'category': 'yeah', + 'author': u'Alexis Métaireau', + 'title': 'This is a super article !', + 'summary': 'Multi-line metadata should be supported\nas well as'\ + ' <strong>inline markup</strong>.', + 'date': datetime.datetime(2010, 12, 2, 10, 14), + 'tags': ['foo', 'bar', 'foobar'], + 'custom_field': 'http://notmyidea.org', + } + + for key, value in expected.items(): + self.assertEquals(value, metadata[key], key) + + def test_article_metadata_key_lowercase(self): + """Keys of metadata should be lowercase.""" + reader = readers.RstReader({}) + content, metadata = reader.read(_filename('article_with_uppercase_metadata.rst')) + + self.assertIn('category', metadata, "Key should be lowercase.") + self.assertEquals('Yeah', metadata.get('category'), "Value keeps cases.") + + def test_typogrify(self): + # if nothing is specified in the settings, the content should be + # unmodified + content, _ = readers.read_file(_filename('article.rst')) + expected = "<p>This is some content. With some stuff to "\ + "&quot;typogrify&quot;.</p>\n" + + self.assertEqual(content, expected) + + try: + # otherwise, typogrify should be applied + content, _ = readers.read_file(_filename('article.rst'), + settings={'TYPOGRIFY': True}) + expected = "<p>This is some content. With some stuff to&nbsp;"\ + "&#8220;typogrify&#8221;.</p>\n" + + self.assertEqual(content, expected) + except ImportError: + return unittest.skip('need the typogrify distribution') diff --git a/tests/test_settings.py b/tests/test_settings.py new file mode 100644 --- /dev/null +++ b/tests/test_settings.py @@ -0,0 +1,37 @@ +try: + import unittest2 +except ImportError, e: + import unittest as unittest2 + +from os.path import dirname, abspath, join + +from pelican.settings import read_settings, _DEFAULT_CONFIG + + +class TestSettingsFromFile(unittest2.TestCase): + """Providing a file, it should read it, replace the default values and + append new values to the settings, if any + """ + def setUp(self): + self.PATH = abspath(dirname(__file__)) + default_conf = join(self.PATH, 'default_conf.py') + self.settings = read_settings(default_conf) + + def test_overwrite_existing_settings(self): + self.assertEqual(self.settings.get('SITENAME'), u"Alexis' log") + self.assertEqual(self.settings.get('SITEURL'), + 'http://blog.notmyidea.org') + + def test_keep_default_settings(self): + """keep default settings if not defined""" + self.assertEqual(self.settings.get('DEFAULT_CATEGORY'), + _DEFAULT_CONFIG['DEFAULT_CATEGORY']) + + def test_dont_copy_small_keys(self): + """do not copy keys not in caps.""" + self.assertNotIn('foobar', self.settings) + + def test_read_empty_settings(self): + """providing no file should return the default values.""" + settings = read_settings(None) + self.assertDictEqual(settings, _DEFAULT_CONFIG) diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +try: + import unittest2 as unittest +except ImportError: + import unittest # NOQA + +import os +import datetime +import time + +from pelican import utils +from support import get_article + + +class TestUtils(unittest.TestCase): + + def test_get_date(self): + # valid ones + date = datetime.datetime(year=2012, month=11, day=22) + date_hour = datetime.datetime(year=2012, month=11, day=22, hour=22, + minute=11) + date_hour_sec = datetime.datetime(year=2012, month=11, day=22, hour=22, + minute=11, second=10) + dates = {'2012-11-22': date, + '2012/11/22': date, + '2012-11-22 22:11': date_hour, + '2012/11/22 22:11': date_hour, + '22-11-2012': date, + '22/11/2012': date, + '22.11.2012': date, + '2012-22-11': date, + '22.11.2012 22:11': date_hour, + '2012-11-22 22:11:10': date_hour_sec} + + for value, expected in dates.items(): + self.assertEquals(utils.get_date(value), expected, value) + + # invalid ones + invalid_dates = ('2010-110-12', 'yay') + for item in invalid_dates: + self.assertRaises(ValueError, utils.get_date, item) + + def test_slugify(self): + + samples = (('this is a test', 'this-is-a-test'), + ('this is a test', 'this-is-a-test'), + (u'this → is ← a ↑ test', 'this-is-a-test'), + ('this--is---a test', 'this-is-a-test')) + + for value, expected in samples: + self.assertEquals(utils.slugify(value), expected) + + def test_get_relative_path(self): + + samples = (('/test/test', '../../.'), + ('/test/test/', '../../../.'), + ('/', '../.')) + + for value, expected in samples: + self.assertEquals(utils.get_relative_path(value), expected) + + def test_process_translations(self): + # create a bunch of articles + fr_article1 = get_article(lang='fr', slug='yay', title='Un titre', + content='en français') + en_article1 = get_article(lang='en', slug='yay', title='A title', + content='in english') + + articles = [fr_article1, en_article1] + + index, trans = utils.process_translations(articles) + + self.assertIn(en_article1, index) + self.assertIn(fr_article1, trans) + self.assertNotIn(en_article1, trans) + self.assertNotIn(fr_article1, index) + + def test_files_changed(self): + "Test if file changes are correctly detected" + + path = os.path.join(os.path.dirname(__file__), 'content') + filename = os.path.join(path, 'article_with_metadata.rst') + changed = utils.files_changed(path, 'rst') + self.assertEquals(changed, True) + + changed = utils.files_changed(path, 'rst') + self.assertEquals(changed, False) + + t = time.time() + os.utime(filename, (t, t)) + changed = utils.files_changed(path, 'rst') + self.assertEquals(changed, True) + self.assertAlmostEqual(utils.LAST_MTIME, t, places=2)
Add a quickstart script A quickstart script (`pelican-quickstart`?) could ask questions and generate a settings.py file accordingly, to ease the blog creation process, like the `sphinx-quickstart` script does. The idea was originally submited by @thibautd Skribit no longer available Skribit shut down his service on July 31st, 2011, then is no longer available. Would not it be better to eliminate their support in Pelican? Clean the code for this service... it's an a idea. Well, the code and the templates. Unicode Problems Pelican has trouble with unicode characters in file names. ``` CRITICAL: 'ascii' codec can't decode byte 0xcc in position 18: ordinal not in range(128) ``` Critical error with latest version of pelican command Running 174f1b50c8 version of "pelican -s sample.conf.py" produces the following error: ``` CRITICAL: __init__() takes at most 3 arguments (5 given) ``` I installed Pelican via two different methods, but both methods yielded the same error above. Installation method 1: ``` mkvirtualenv pelican git clone git://github.com/ametaireau/pelican.git cd pelican python setup.py develop ``` Installation method 2: ``` mkvirtualenv pelican pip install -e git://github.com/ametaireau/pelican#egg=pelican ``` These errors occurred with a stock version of the sample pelican.conf.py settings file and with the default theme. I suspect this problem was introduced in commit 8009324a3b from pull request #230. (I don't see any tests associated with these new changes; perhaps some tests would be helpful in preventing this from recurring in the future.)
I think we could init some tools with this script : - A git/hg repos for the blog with a post-hook automatically installed, so pelican regenerates the blog at each commit. (We could ask the user if he wants to enable this feature) - A Makefile to push the blog online, like the one posted in #128 Nice idea ! --- EDIT: Maybe we should look `sphinx-quickstart`... Hello, I've started writing a script: https://gist.github.com/1025236 It write a minimal pelican settings file with a `Makefile` which can generate the blog and upload it over FTP or SSH, or with Dropbox. Feel free to hack it I think you should use argparse. The script could read config variables from command line, and if not present as arguments, from answers. You should also import from future : ``` from __future__ import unicode_literals, print_function ``` To make your print() function works with Python 2.6+. I'll take a look later, these are just first impression after reading quickly, I didn't test it. I disagree with the unicode_literals import. For supporting Python3 as well, there is distribute with 2to3. Pelican does not need the Py3 way of handling strings / bytes, so this import is useless. Stick with the 2.x way of handling unicode when using 2.x. See also http://lucumr.pocoo.org/2011/1/22/forwards-compatible-python/ @Skami18: please stop the French strings in your code (e.g. "Les différents profils, facultatif"). English is understood by way more people. @derdon: Sorry, it was a part of my config file and i've forgotten to remove comments @thibautd: Good idea, argparse is used by pelican, so i can use it. A part from that, i'm running Python 2.6 and i got no problem with the `print` function, even without using the `__future__` module. <hr /> A little update: https://gist.github.com/1025236/dfa695e67482477907c79ae709ab827b20b18b04
2012-03-20T00:35:11Z
[]
[]
getpelican/pelican
280
getpelican__pelican-280
[ "275" ]
0ed6cf77434f8e8a86d11bf632c1253bee1e9b32
diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -128,9 +128,15 @@ def configure_settings(settings, default_settings=None, filename=None): else: logger.warn("LOCALE option doesn't contain a correct value") - # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL - if ('SITEURL' in settings) and (not 'FEED_DOMAIN' in settings): - settings['FEED_DOMAIN'] = settings['SITEURL'] + if ('SITEURL' in settings): + # If SITEURL has a trailing slash, remove it and provide a warning + siteurl = settings['SITEURL'] + if (siteurl[len(siteurl) - 1:] == '/'): + settings['SITEURL'] = siteurl[:-1] + logger.warn("Removed extraneous trailing slash from SITEURL.") + # If SITEURL is defined but FEED_DOMAIN isn't, set FEED_DOMAIN = SITEURL + if not 'FEED_DOMAIN' in settings: + settings['FEED_DOMAIN'] = settings['SITEURL'] # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined if (('FEED' in settings) or ('FEED_RSS' in settings)) and (not 'FEED_DOMAIN' in settings):
diff --git a/tests/test_settings.py b/tests/test_settings.py --- a/tests/test_settings.py +++ b/tests/test_settings.py @@ -36,6 +36,11 @@ def test_read_empty_settings(self): def test_configure_settings(self): """Manipulations to settings should be applied correctly.""" + # SITEURL should not have a trailing slash + settings = {'SITEURL': 'http://blog.notmyidea.org/', 'LOCALE': ''} + configure_settings(settings) + self.assertEqual(settings['SITEURL'], 'http://blog.notmyidea.org') + # FEED_DOMAIN, if undefined, should default to SITEURL settings = {'SITEURL': 'http://blog.notmyidea.org', 'LOCALE': ''} configure_settings(settings)
Feeds generate malformed urls (with double slash) When setting a site url that ends with a slash (like http://feldboris.alwaysdata.net/blog/), pelican will generate url in feeds with double slash (like http://feldboris.alwaysdata.net/blog//feeds/all.atom.xml) just after site url. W3C feeds validator show a warning with these double slashs. It's maybe the cause of a rss problem (my rss client show old feeds entries as unread) as post fields also contains double slash now.
well, the obvious solution seems to not add a / at the end of the siteurl. Does it cause any problem to not have it? I noticed the same problem and eventually realized that I just needed to omit the trailing slash. Perhaps we could make this more clear in the docs for the SITEURL setting, explaining that the value should not have a trailing slash. We can even detect this in pelican and throw a warning + remove it?
2012-03-23T14:26:38Z
[]
[]
getpelican/pelican
389
getpelican__pelican-389
[ "380" ]
b2ff07d58cc0949cba20da5b4ceffe8979a6c479
diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -357,10 +357,13 @@ class PagesGenerator(Generator): def __init__(self, *args, **kwargs): self.pages = [] + self.hidden_pages = [] + self.hidden_translations = [] super(PagesGenerator, self).__init__(*args, **kwargs) def generate_context(self): all_pages = [] + hidden_pages = [] for f in self.get_files( os.path.join(self.path, self.settings['PAGE_DIR']), exclude=self.settings['PAGE_EXCLUDES']): @@ -373,15 +376,25 @@ def generate_context(self): filename=f) if not is_valid_content(page, f): continue - all_pages.append(page) + if page.status == "published": + all_pages.append(page) + elif page.status == "hidden": + hidden_pages.append(page) + else: + logger.warning(u"Unknown status %s for file %s, skipping it." % + (repr(unicode.encode(page.status, 'utf-8')), + repr(f))) + self.pages, self.translations = process_translations(all_pages) + self.hidden_pages, self.hidden_translations = process_translations(hidden_pages) self._update_context(('pages', )) self.context['PAGES'] = self.pages def generate_output(self, writer): - for page in chain(self.translations, self.pages): + for page in chain(self.translations, self.pages, + self.hidden_translations, self.hidden_pages): writer.write_file(page.save_as, self.get_template('page'), self.context, page=page, relative_urls=self.settings.get('RELATIVE_URLS'))
diff --git a/tests/TestPages/bad_page.rst b/tests/TestPages/bad_page.rst new file mode 100644 --- /dev/null +++ b/tests/TestPages/bad_page.rst @@ -0,0 +1,8 @@ +This is a test bad page +####################### + +:status: invalid + +The quick brown fox jumped over the lazy dog's back. + +The status here is invalid, the page should not render. diff --git a/tests/TestPages/hidden_page.rst b/tests/TestPages/hidden_page.rst new file mode 100644 --- /dev/null +++ b/tests/TestPages/hidden_page.rst @@ -0,0 +1,8 @@ +This is a test hidden page +########################## + +:status: hidden + +The quick brown fox jumped over the lazy dog's back. + +This page is hidden diff --git a/tests/TestPages/hidden_page_markdown.md b/tests/TestPages/hidden_page_markdown.md new file mode 100644 --- /dev/null +++ b/tests/TestPages/hidden_page_markdown.md @@ -0,0 +1,12 @@ +title: This is a markdown test hidden page +status: hidden + +Test Markdown File Header +========================= + +Used for pelican test +--------------------- + +The quick brown fox jumped over the lazy dog's back. + +This page is hidden diff --git a/tests/TestPages/page.rst b/tests/TestPages/page.rst new file mode 100644 --- /dev/null +++ b/tests/TestPages/page.rst @@ -0,0 +1,4 @@ +This is a test page +################### + +The quick brown fox jumped over the lazy dog's back. diff --git a/tests/TestPages/page_markdown.md b/tests/TestPages/page_markdown.md new file mode 100644 --- /dev/null +++ b/tests/TestPages/page_markdown.md @@ -0,0 +1,9 @@ +title: This is a markdown test page + +Test Markdown File Header +========================= + +Used for pelican test +--------------------- + +The quick brown fox jumped over the lazy dog's back. diff --git a/tests/test_generators.py b/tests/test_generators.py --- a/tests/test_generators.py +++ b/tests/test_generators.py @@ -4,7 +4,7 @@ import os import re -from pelican.generators import ArticlesGenerator, LessCSSGenerator +from pelican.generators import ArticlesGenerator, LessCSSGenerator, PagesGenerator from pelican.settings import _DEFAULT_CONFIG from .support import unittest, temporary_folder, skipIfNoExecutable @@ -94,6 +94,48 @@ def test_direct_templates_save_as_false(self): write.assert_called_count == 0 +class TestPageGenerator(unittest.TestCase): + """ + Every time you want to test for a new field; + Make sure the test pages in "TestPages" have all the fields + Add it to distilled in distill_pages_for_test + Then update the assertItemsEqual in test_generate_context to match expected + """ + + def distill_pages_for_test(self, pages): + distilled = [] + for page in pages: + distilled.append([ + page.title, + page.status + ] + ) + return distilled + + def test_generate_context(self): + settings = _DEFAULT_CONFIG.copy() + + settings['PAGE_DIR'] = 'TestPages' + generator = PagesGenerator(settings.copy(), settings, CUR_DIR, + _DEFAULT_CONFIG['THEME'], None, + _DEFAULT_CONFIG['MARKUP']) + generator.generate_context() + pages = self.distill_pages_for_test(generator.pages) + hidden_pages = self.distill_pages_for_test(generator.hidden_pages) + + pages_expected = [ + [u'This is a test page', 'published'], + [u'This is a markdown test page', 'published'] + ] + hidden_pages_expected = [ + [u'This is a test hidden page', 'hidden'], + [u'This is a markdown test hidden page', 'hidden'] + ] + + self.assertItemsEqual(pages_expected,pages) + self.assertItemsEqual(hidden_pages_expected,hidden_pages) + + class TestLessCSSGenerator(unittest.TestCase): LESS_CONTENT = """
"Hidden" pages I would like to use the same engine to be able to render my error pages (404, 50x, ect) but I don't want them to show on the menu with the rest of them. Maybe there should be a way to mark certain pages for exclusion from being added to the pages variable seen by templates? I will poke around to see if there is a more optimal way of handling this.
You can set the PAGE_EXCLUDES parameter in your settings file. do PAGE_EXCLUDES=('404.html', '505.html',).. I see this is undocumented so it's not your fault for not seeing this This feature does not cover what I need. This feature takes a directory (I did extensive testing to figure this out) and excludes it from being built completely. I want my 404.md and 50x.md files to be build as pages but not added to the menu even when `DISPLAY_PAGES_ON_MENU` is set. Oh, so you mean like, add a metadata field like :hidden: true I think something similar is there already, at least with drafts. Exactly! `status: draft` only works for articles though; placing them in the drafts folder and removing them from any lists. If the hidden or draft status worked for pages it would be perfect. I am going to see if I can hack the feature in myself if there is no complaints! I was about to implement that but I'll go to sleep instead, hack away :) Basically, see how the :status: draft is used. you'll want to add a similar mechanism in PageGenerator. in generators.py Make sure that status would get a default value if not specified. It should be almost 1:1 as in articles. Good luck!
2012-06-27T02:33:29Z
[]
[]
getpelican/pelican
404
getpelican__pelican-404
[ "376" ]
dc21efbe10dc6552f882bdf69e4a210da100977a
diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -21,6 +21,7 @@ class Page(object): :param content: the string to parse, containing the original content. """ mandatory_properties = ('title',) + default_template = 'page' def __init__(self, content, metadata=None, settings=None, filename=None): @@ -44,6 +45,9 @@ def __init__(self, content, metadata=None, settings=None, # also keep track of the metadata attributes available self.metadata = local_metadata + #default template if it's not defined in page + self.template = self._get_template() + # default author to the one in settings if not defined if not hasattr(self, 'author'): if 'AUTHOR' in settings: @@ -153,9 +157,16 @@ def _set_summary(self, summary): url = property(functools.partial(get_url_setting, key='url')) save_as = property(functools.partial(get_url_setting, key='save_as')) + def _get_template(self): + if hasattr(self, 'template') and self.template is not None: + return self.template + else: + return self.default_template + class Article(Page): mandatory_properties = ('title', 'date', 'category') + default_template = 'article' class Quote(Page): diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -167,11 +167,9 @@ def generate_feeds(self, writer): def generate_articles(self, write): """Generate the articles.""" - article_template = self.get_template('article') for article in chain(self.translations, self.articles): - write(article.save_as, - article_template, self.context, article=article, - category=article.category) + write(article.save_as, self.get_template(article.template), + self.context, article=article, category=article.category) def generate_direct_templates(self, write): """Generate direct templates pages""" @@ -222,10 +220,10 @@ def generate_authors(self, write): def generate_drafts(self, write): """Generate drafts pages.""" - article_template = self.get_template('article') for article in self.drafts: - write('drafts/%s.html' % article.slug, article_template, - self.context, article=article, category=article.category) + write('drafts/%s.html' % article.slug, + self.get_template(article.template), self.context, + article=article, category=article.category) def generate_pages(self, writer): """Generate the pages on the disk""" @@ -385,7 +383,6 @@ def generate_context(self): (repr(unicode.encode(page.status, 'utf-8')), repr(f))) - self.pages, self.translations = process_translations(all_pages) self.hidden_pages, self.hidden_translations = process_translations(hidden_pages) @@ -395,7 +392,7 @@ def generate_context(self): def generate_output(self, writer): for page in chain(self.translations, self.pages, self.hidden_translations, self.hidden_pages): - writer.write_file(page.save_as, self.get_template('page'), + writer.write_file(page.save_as, self.get_template(page.template), self.context, page=page, relative_urls=self.settings.get('RELATIVE_URLS'))
diff --git a/tests/TestPages/hidden_page_with_template.rst b/tests/TestPages/hidden_page_with_template.rst new file mode 100644 --- /dev/null +++ b/tests/TestPages/hidden_page_with_template.rst @@ -0,0 +1,11 @@ +This is a test hidden page with a custom template +################################################# + +:status: hidden +:template: custom + +The quick brown fox jumped over the lazy dog's back. + +This page is hidden + +This page has a custom template to be called when rendered diff --git a/tests/TestPages/page_with_template.rst b/tests/TestPages/page_with_template.rst new file mode 100644 --- /dev/null +++ b/tests/TestPages/page_with_template.rst @@ -0,0 +1,8 @@ +This is a test page with a preset template +########################################## + +:template: custom + +The quick brown fox jumped over the lazy dog's back. + +This article has a custom template to be called when rendered diff --git a/tests/content/article_with_template.rst b/tests/content/article_with_template.rst new file mode 100644 --- /dev/null +++ b/tests/content/article_with_template.rst @@ -0,0 +1,8 @@ +Article with template +##################### + +:template: custom + +This article has a custom template to be called when rendered + +This is some content. With some stuff to "typogrify". diff --git a/tests/test_contents.py b/tests/test_contents.py --- a/tests/test_contents.py +++ b/tests/test_contents.py @@ -2,7 +2,7 @@ from .support import unittest -from pelican.contents import Page +from pelican.contents import Page, Article from pelican.settings import _DEFAULT_CONFIG from pelican.utils import truncate_html_words @@ -135,6 +135,17 @@ def test_datetime(self): # will simply skip this test. unittest.skip("There is no locale %s in this system." % locale) + def test_template(self): + """ + Pages default to page, metadata overwrites + """ + default_page = Page(**self.page_kwargs) + self.assertEqual('page', default_page.template) + page_kwargs = self._copy_page_kwargs() + page_kwargs['metadata']['template'] = 'custom' + custom_page = Page(**page_kwargs) + self.assertEqual('custom', custom_page.template) + def _copy_page_kwargs(self): # make a deep copy of page_kwargs page_kwargs = dict([(key, self.page_kwargs[key]) for key in @@ -146,3 +157,15 @@ def _copy_page_kwargs(self): for subkey in page_kwargs[key]]) return page_kwargs + +class TestArticle(TestPage): + def test_template(self): + """ + Articles default to article, metadata overwrites + """ + default_article = Article(**self.page_kwargs) + self.assertEqual('article', default_article.template) + article_kwargs = self._copy_page_kwargs() + article_kwargs['metadata']['template'] = 'custom' + custom_article = Article(**article_kwargs) + self.assertEqual('custom', custom_article.template) diff --git a/tests/test_generators.py b/tests/test_generators.py --- a/tests/test_generators.py +++ b/tests/test_generators.py @@ -13,6 +13,37 @@ class TestArticlesGenerator(unittest.TestCase): + def setUp(self): + super(TestArticlesGenerator, self).setUp() + self.generator = None + + def get_populated_generator(self): + """ + We only need to pull all the test articles once, but read from it + for each test. + """ + if self.generator is None: + settings = _DEFAULT_CONFIG.copy() + settings['ARTICLE_DIR'] = 'content' + settings['DEFAULT_CATEGORY'] = 'Default' + self.generator = ArticlesGenerator(settings.copy(), settings, + CUR_DIR, _DEFAULT_CONFIG['THEME'], None, + _DEFAULT_CONFIG['MARKUP']) + self.generator.generate_context() + return self.generator + + def distill_articles(self, articles): + distilled = [] + for page in articles: + distilled.append([ + page.title, + page.status, + page.category.name, + page.template + ] + ) + return distilled + def test_generate_feeds(self): generator = ArticlesGenerator(None, {'FEED': _DEFAULT_CONFIG['FEED']}, @@ -93,6 +124,16 @@ def test_direct_templates_save_as_false(self): generator.generate_direct_templates(write) write.assert_called_count == 0 + def test_per_article_template(self): + """ + Custom template articles get the field but standard/unset are None + """ + generator = self.get_populated_generator() + articles = self.distill_articles(generator.articles) + custom_template = ['Article with template', 'published', 'Default', 'custom'] + standard_template = ['This is a super article !', 'published', 'Yeah', 'article'] + self.assertIn(custom_template, articles) + self.assertIn(standard_template, articles) class TestPageGenerator(unittest.TestCase): """ @@ -107,7 +148,8 @@ def distill_pages(self, pages): for page in pages: distilled.append([ page.title, - page.status + page.status, + page.template ] ) return distilled @@ -124,12 +166,14 @@ def test_generate_context(self): hidden_pages = self.distill_pages(generator.hidden_pages) pages_expected = [ - [u'This is a test page', 'published'], - [u'This is a markdown test page', 'published'] + [u'This is a test page', 'published', 'page'], + [u'This is a markdown test page', 'published', 'page'], + [u'This is a test page with a preset template', 'published', 'custom'] ] hidden_pages_expected = [ - [u'This is a test hidden page', 'hidden'], - [u'This is a markdown test hidden page', 'hidden'] + [u'This is a test hidden page', 'hidden', 'page'], + [u'This is a markdown test hidden page', 'hidden', 'page'], + [u'This is a test hidden page with a custom template', 'hidden', 'custom'] ] self.assertItemsEqual(pages_expected,pages)
Request: Specify template in page metadata I posed this request on IRC a while ago and @kylef took an initial shot at it, but I thought it might be good to formalize this in the tracker. I think it would make Pelican a bit more flexible if you could specify the template that you want to render a page with in that page's metadata. So for example in `publications.rst` I could have something like: ``` rst ============= Publications ============= :template: publications ``` and `publications.rst` would be rendered with the jinja2 template `publications.html` that I have in my theme. This would also provide a easy way of tweaking a single post's layout. PS - If someone could tell me what things needed to be changed to accomplish this (or point me in generally the right direction), I'd be happy to attempt to contribute the necessary code.
Any thoughts (is this something useful that others might want, or am I the only one) on this or suggestions on a reasonable approach to tackling this feature? Or is there a way to approximate this feature given the current state of the code that I'm just missing (using DIRECT_TEMPLATES)? A version of the patch that @kylef wrote is the following (it's been modified slightly to properly follow the API where originally a string was being passed rather than a template object): ``` bash diff --git a/pelican/generators.py b/pelican/generators.py index 1ddc13c..bb659e8 100644 --- a/pelican/generators.py +++ b/pelican/generators.py @@ -167,11 +167,10 @@ class ArticlesGenerator(Generator): def generate_articles(self, write): """Generate the articles.""" - article_template = self.get_template('article') + template = getattr(article, 'template', 'article'), for article in chain(self.translations, self.articles): - write(article.save_as, - article_template, self.context, article=article, - category=article.category) + write(article.save_as, self.get_template(template), + self.context, article=article, category=article.category) def generate_direct_templates(self, write): """Generate direct templates pages""" @@ -382,7 +381,11 @@ class PagesGenerator(Generator): def generate_output(self, writer): for page in chain(self.translations, self.pages): - writer.write_file(page.save_as, self.get_template('page'), + template = getattr(page,'template','page') + print self.env.list_templates() + print self.env.loader + writer.write_file(page.save_as, + self.get_template(template), self.context, page=page, relative_urls=self.settings.get('RELATIVE_URLS')) ``` However it doesn't quite work, although I'm not sure if the problem is directly related to the patch or something else with Pelican. Nasically my templating setup is as follows. I have a template base.html which just has the basic scaffold for the site (header, css, js, etc). I then have general.html which extends base.html and has the basic structure of the site (main content block, navigation, footer, etc). I separate this out since my index.html is going to have a different layout and extends base.html directly. So for example, my publications page, templated with publications.html (from above) extends general.html. However when I have this setup, I get the following error when trying to render publications.html ``` bash CRITICAL: 'general' is undefined Traceback (most recent call last): File "/Users/lev/.virtualenvs/pelican-dev/bin/pelican", line 8, in <module> load_entry_point('pelican==3.0', 'console_scripts', 'pelican')() File "/Users/lev/Documents/Projects/github/pelican/pelican/__init__.py", line 263, in main pelican.run() File "/Users/lev/Documents/Projects/github/pelican/pelican/__init__.py", line 152, in run p.generate_output(writer) File "/Users/lev/Documents/Projects/github/pelican/pelican/generators.py", line 391, in generate_output relative_urls=self.settings.get('RELATIVE_URLS')) File "/Users/lev/Documents/Projects/github/pelican/pelican/writers.py", line 171, in write_file _write_file(template, localcontext, self.output_path, name) File "/Users/lev/Documents/Projects/github/pelican/pelican/writers.py", line 117, in _write_file output = template.render(localcontext) File "/Library/Frameworks/Python.framework/Versions/7.3/lib/python2.7/site-packages/jinja2/environment.py", line 894, in render return self.environment.handle_exception(exc_info, True) File "theme/templates/publications.html", line 1, in top-level template code {% extends general.html %} File "/Library/Frameworks/Python.framework/Versions/7.3/lib/python2.7/site-packages/jinja2/environment.py", line 372, in getattr return getattr(obj, attribute) jinja2.exceptions.UndefinedError: 'general' is undefined ``` As far as I can tell from the debugging info, general.html is loaded into my environment: ``` bash DEBUG: template list: ['!simple/archives.html', '!simple/article.html', '!simple/author.html', '!simple/base.html', '!simple/categories.html', '!simple/category.html', '!simple/index.html', '!simple/page.html', '!simple/pagination.html', '!simple/tag.html', '!simple/tags.html', '.general.html.swp', 'analytics.html', 'archives.html', 'article.html', 'author.html', 'base.html', 'categories.html', 'category.html', 'general.html', 'index.html', 'page.html', 'pagination.html', 'publications.html', 'tag.html', 'tags.html'] ``` Any ideas of what might be going on, or how I might be able to fix this? I feel like I must be missing something in how things are being loaded, although I looked through generators.py and the jinja2 docs and it seems like general.html should be available. I'll take the case!
2012-07-07T21:20:11Z
[]
[]
getpelican/pelican
419
getpelican__pelican-419
[ "304" ]
f4ab1b2cd0b3181c39dcb3eee94a85e869bacaf5
diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -308,7 +308,7 @@ def generate_context(self): self.articles.sort(key=attrgetter('date'), reverse=True) self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), - reverse=self.context['REVERSE_ARCHIVE_ORDER']) + reverse=self.context['NEWEST_FIRST_ARCHIVES']) # create tag cloud tag_cloud = defaultdict(int) diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -32,7 +32,7 @@ 'DEFAULT_DATE': 'fs', 'WITH_FUTURE_DATES': True, 'CSS_FILE': 'main.css', - 'REVERSE_ARCHIVE_ORDER': False, + 'NEWEST_FIRST_ARCHIVES': True, 'REVERSE_CATEGORY_ORDER': False, 'DELETE_OUTPUT_DIRECTORY': False, 'ARTICLE_URL': '{slug}.html',
diff --git a/tests/output/basic/archives.html b/tests/output/basic/archives.html --- a/tests/output/basic/archives.html +++ b/tests/output/basic/archives.html @@ -47,14 +47,11 @@ <h1>Archives for A Pelican Blog</h1> <dl> - <dt>Fri 15 October 2010</dt> - <dd><a href="./unbelievable.html">Unbelievable !</a></dd> - - <dt>Wed 20 October 2010</dt> - <dd><a href="./oh-yeah.html">Oh yeah !</a></dd> + <dt>Wed 29 February 2012</dt> + <dd><a href="./second-article.html">Second article</a></dd> - <dt>Thu 02 December 2010</dt> - <dd><a href="./this-is-a-super-article.html">This is a super article !</a></dd> + <dt>Wed 20 April 2011</dt> + <dd><a href="./a-markdown-powered-article.html">A markdown powered article</a></dd> <dt>Thu 17 February 2011</dt> <dd><a href="./article-1.html">Article 1</a></dd> @@ -65,11 +62,14 @@ <h1>Archives for A Pelican Blog</h1> <dt>Thu 17 February 2011</dt> <dd><a href="./article-3.html">Article 3</a></dd> - <dt>Wed 20 April 2011</dt> - <dd><a href="./a-markdown-powered-article.html">A markdown powered article</a></dd> + <dt>Thu 02 December 2010</dt> + <dd><a href="./this-is-a-super-article.html">This is a super article !</a></dd> - <dt>Wed 29 February 2012</dt> - <dd><a href="./second-article.html">Second article</a></dd> + <dt>Wed 20 October 2010</dt> + <dd><a href="./oh-yeah.html">Oh yeah !</a></dd> + + <dt>Fri 15 October 2010</dt> + <dd><a href="./unbelievable.html">Unbelievable !</a></dd> </dl> </section> diff --git a/tests/output/basic/tag/bar.html b/tests/output/basic/tag/bar.html --- a/tests/output/basic/tag/bar.html +++ b/tests/output/basic/tag/bar.html @@ -49,7 +49,7 @@ <h1><a href="../.">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href=".././second-article-fr.html">Deuxième article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 @@ -66,10 +66,10 @@ <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h Translations: - <a href=".././second-article-fr.html">fr</a> + <a href=".././second-article.html">en</a> -</footer><!-- /.post-info --><p>This is some article, in english</p> +</footer><!-- /.post-info --><p>Ceci est un article, en français.</p> </article> @@ -90,8 +90,8 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href=".././second-article-fr.html" rel="bookmark" - title="Permalink to Deuxième article">Deuxième article</a></h1> + <h1><a href=".././second-article.html" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> </header> <div class="entry-content"> @@ -111,13 +111,13 @@ <h1><a href=".././second-article-fr.html" rel="bookmark" Translations: - <a href=".././second-article.html">en</a> + <a href=".././second-article-fr.html">fr</a> </footer><!-- /.post-info --> - <p>Ceci est un article, en français.</p> + <p>This is some article, in english</p> - <a class="readmore" href=".././second-article-fr.html">read more</a> + <a class="readmore" href=".././second-article.html">read more</a> </div><!-- /.entry-content --> </article></li> diff --git a/tests/output/basic/tag/baz.html b/tests/output/basic/tag/baz.html --- a/tests/output/basic/tag/baz.html +++ b/tests/output/basic/tag/baz.html @@ -49,7 +49,7 @@ <h1><a href="../.">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href=".././second-article-fr.html">Deuxième article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 @@ -66,10 +66,10 @@ <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h Translations: - <a href=".././second-article-fr.html">fr</a> + <a href=".././second-article.html">en</a> -</footer><!-- /.post-info --><p>This is some article, in english</p> +</footer><!-- /.post-info --><p>Ceci est un article, en français.</p> </article> @@ -90,8 +90,8 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href=".././second-article-fr.html" rel="bookmark" - title="Permalink to Deuxième article">Deuxième article</a></h1> + <h1><a href=".././second-article.html" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> </header> <div class="entry-content"> @@ -111,13 +111,13 @@ <h1><a href=".././second-article-fr.html" rel="bookmark" Translations: - <a href=".././second-article.html">en</a> + <a href=".././second-article-fr.html">fr</a> </footer><!-- /.post-info --> - <p>Ceci est un article, en français.</p> + <p>This is some article, in english</p> - <a class="readmore" href=".././second-article-fr.html">read more</a> + <a class="readmore" href=".././second-article.html">read more</a> </div><!-- /.entry-content --> </article></li> diff --git a/tests/output/basic/tag/foo.html b/tests/output/basic/tag/foo.html --- a/tests/output/basic/tag/foo.html +++ b/tests/output/basic/tag/foo.html @@ -49,7 +49,7 @@ <h1><a href="../.">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href=".././second-article-fr.html">Deuxième article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 @@ -66,10 +66,10 @@ <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h Translations: - <a href=".././second-article-fr.html">fr</a> + <a href=".././second-article.html">en</a> -</footer><!-- /.post-info --><p>This is some article, in english</p> +</footer><!-- /.post-info --><p>Ceci est un article, en français.</p> </article> @@ -90,8 +90,8 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href=".././second-article-fr.html" rel="bookmark" - title="Permalink to Deuxième article">Deuxième article</a></h1> + <h1><a href=".././second-article.html" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> </header> <div class="entry-content"> @@ -111,13 +111,13 @@ <h1><a href=".././second-article-fr.html" rel="bookmark" Translations: - <a href=".././second-article.html">en</a> + <a href=".././second-article-fr.html">fr</a> </footer><!-- /.post-info --> - <p>Ceci est un article, en français.</p> + <p>This is some article, in english</p> - <a class="readmore" href=".././second-article-fr.html">read more</a> + <a class="readmore" href=".././second-article.html">read more</a> </div><!-- /.entry-content --> </article></li> diff --git a/tests/output/custom/archives.html b/tests/output/custom/archives.html --- a/tests/output/custom/archives.html +++ b/tests/output/custom/archives.html @@ -55,14 +55,11 @@ <h1>Archives for Alexis' log</h1> <dl> - <dt>Fri 15 October 2010</dt> - <dd><a href="./unbelievable.html">Unbelievable !</a></dd> - - <dt>Wed 20 October 2010</dt> - <dd><a href="./oh-yeah.html">Oh yeah !</a></dd> + <dt>Wed 29 February 2012</dt> + <dd><a href="./second-article.html">Second article</a></dd> - <dt>Thu 02 December 2010</dt> - <dd><a href="./this-is-a-super-article.html">This is a super article !</a></dd> + <dt>Wed 20 April 2011</dt> + <dd><a href="./a-markdown-powered-article.html">A markdown powered article</a></dd> <dt>Thu 17 February 2011</dt> <dd><a href="./article-1.html">Article 1</a></dd> @@ -73,11 +70,14 @@ <h1>Archives for Alexis' log</h1> <dt>Thu 17 February 2011</dt> <dd><a href="./article-3.html">Article 3</a></dd> - <dt>Wed 20 April 2011</dt> - <dd><a href="./a-markdown-powered-article.html">A markdown powered article</a></dd> + <dt>Thu 02 December 2010</dt> + <dd><a href="./this-is-a-super-article.html">This is a super article !</a></dd> - <dt>Wed 29 February 2012</dt> - <dd><a href="./second-article.html">Second article</a></dd> + <dt>Wed 20 October 2010</dt> + <dd><a href="./oh-yeah.html">Oh yeah !</a></dd> + + <dt>Fri 15 October 2010</dt> + <dd><a href="./unbelievable.html">Unbelievable !</a></dd> </dl> </section> diff --git a/tests/output/custom/tag/bar.html b/tests/output/custom/tag/bar.html --- a/tests/output/custom/tag/bar.html +++ b/tests/output/custom/tag/bar.html @@ -57,7 +57,7 @@ <h1><a href="../.">Alexis' log </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href=".././second-article-fr.html">Deuxième article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 @@ -74,11 +74,11 @@ <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h Translations: - <a href=".././second-article-fr.html">fr</a> + <a href=".././second-article.html">en</a> -</footer><!-- /.post-info --><p>This is some article, in english</p> -<p>There are <a href=".././second-article.html#disqus_thread">comments</a>.</p> +</footer><!-- /.post-info --><p>Ceci est un article, en français.</p> +<p>There are <a href=".././second-article-fr.html#disqus_thread">comments</a>.</p> </article> </aside><!-- /#featured --> @@ -98,8 +98,8 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href=".././second-article-fr.html" rel="bookmark" - title="Permalink to Deuxième article">Deuxième article</a></h1> + <h1><a href=".././second-article.html" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> </header> <div class="entry-content"> @@ -119,14 +119,14 @@ <h1><a href=".././second-article-fr.html" rel="bookmark" Translations: - <a href=".././second-article.html">en</a> + <a href=".././second-article-fr.html">fr</a> </footer><!-- /.post-info --> - <p>Ceci est un article, en français.</p> + <p>This is some article, in english</p> - <a class="readmore" href=".././second-article-fr.html">read more</a> - <p>There are <a href=".././second-article-fr.html#disqus_thread">comments</a>.</p> + <a class="readmore" href=".././second-article.html">read more</a> + <p>There are <a href=".././second-article.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> </article></li> diff --git a/tests/output/custom/tag/baz.html b/tests/output/custom/tag/baz.html --- a/tests/output/custom/tag/baz.html +++ b/tests/output/custom/tag/baz.html @@ -57,7 +57,7 @@ <h1><a href="../.">Alexis' log </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href=".././second-article-fr.html">Deuxième article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 @@ -74,11 +74,11 @@ <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h Translations: - <a href=".././second-article-fr.html">fr</a> + <a href=".././second-article.html">en</a> -</footer><!-- /.post-info --><p>This is some article, in english</p> -<p>There are <a href=".././second-article.html#disqus_thread">comments</a>.</p> +</footer><!-- /.post-info --><p>Ceci est un article, en français.</p> +<p>There are <a href=".././second-article-fr.html#disqus_thread">comments</a>.</p> </article> </aside><!-- /#featured --> @@ -98,8 +98,8 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href=".././second-article-fr.html" rel="bookmark" - title="Permalink to Deuxième article">Deuxième article</a></h1> + <h1><a href=".././second-article.html" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> </header> <div class="entry-content"> @@ -119,14 +119,14 @@ <h1><a href=".././second-article-fr.html" rel="bookmark" Translations: - <a href=".././second-article.html">en</a> + <a href=".././second-article-fr.html">fr</a> </footer><!-- /.post-info --> - <p>Ceci est un article, en français.</p> + <p>This is some article, in english</p> - <a class="readmore" href=".././second-article-fr.html">read more</a> - <p>There are <a href=".././second-article-fr.html#disqus_thread">comments</a>.</p> + <a class="readmore" href=".././second-article.html">read more</a> + <p>There are <a href=".././second-article.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> </article></li> diff --git a/tests/output/custom/tag/foo.html b/tests/output/custom/tag/foo.html --- a/tests/output/custom/tag/foo.html +++ b/tests/output/custom/tag/foo.html @@ -57,7 +57,7 @@ <h1><a href="../.">Alexis' log </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href=".././second-article-fr.html">Deuxième article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 @@ -74,11 +74,11 @@ <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h Translations: - <a href=".././second-article-fr.html">fr</a> + <a href=".././second-article.html">en</a> -</footer><!-- /.post-info --><p>This is some article, in english</p> -<p>There are <a href=".././second-article.html#disqus_thread">comments</a>.</p> +</footer><!-- /.post-info --><p>Ceci est un article, en français.</p> +<p>There are <a href=".././second-article-fr.html#disqus_thread">comments</a>.</p> </article> </aside><!-- /#featured --> @@ -98,8 +98,8 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href=".././second-article-fr.html" rel="bookmark" - title="Permalink to Deuxième article">Deuxième article</a></h1> + <h1><a href=".././second-article.html" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> </header> <div class="entry-content"> @@ -119,14 +119,14 @@ <h1><a href=".././second-article-fr.html" rel="bookmark" Translations: - <a href=".././second-article.html">en</a> + <a href=".././second-article-fr.html">fr</a> </footer><!-- /.post-info --> - <p>Ceci est un article, en français.</p> + <p>This is some article, in english</p> - <a class="readmore" href=".././second-article-fr.html">read more</a> - <p>There are <a href=".././second-article-fr.html#disqus_thread">comments</a>.</p> + <a class="readmore" href=".././second-article.html">read more</a> + <p>There are <a href=".././second-article.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> </article></li>
make REVERSE_ARCHIVE_ORDER=True default This is a more common scenario than listing the oldest post on top.
this is related to #308 is it still the case with the latest master? there was a major error in the title - fixed Oh, then I have to disagree. I tihnk that listing the archives in order is the right thing to do. that's customizable though. It's far more common and expected to see Archives (and blog posts) listing newer entries first. What's your experience? Oh, after double checking, seems that you're effectively right :) thanks for reconsidering
2012-07-15T00:53:40Z
[]
[]
getpelican/pelican
424
getpelican__pelican-424
[ "395" ]
be2f04f0824682d0f5fe1a53ceaf23979b5849f4
diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -63,6 +63,18 @@ def render_node_to_html(document, node): return visitor.astext() +class PelicanHTMLTranslator(HTMLTranslator): + + def visit_abbreviation(self, node): + attrs = {} + if node.hasattr('explanation'): + attrs['title'] = node['explanation'] + self.body.append(self.starttag(node, 'abbr', '', **attrs)) + + def depart_abbreviation(self, node): + self.body.append('</abbr>') + + class RstReader(Reader): enabled = bool(docutils) file_extensions = ['rst'] @@ -92,6 +104,7 @@ def _get_publisher(self, filename): pub = docutils.core.Publisher( destination_class=docutils.io.StringOutput) pub.set_components('standalone', 'restructuredtext', 'html') + pub.writer.translator_class = PelicanHTMLTranslator pub.process_programmatic_settings(None, extra_params, None) pub.set_source(source_path=filename) pub.publish() diff --git a/pelican/rstdirectives.py b/pelican/rstdirectives.py --- a/pelican/rstdirectives.py +++ b/pelican/rstdirectives.py @@ -1,9 +1,10 @@ # -*- coding: utf-8 -*- -from docutils import nodes -from docutils.parsers.rst import directives, Directive +from docutils import nodes, utils +from docutils.parsers.rst import directives, roles, Directive from pygments.formatters import HtmlFormatter from pygments import highlight from pygments.lexers import get_lexer_by_name, TextLexer +import re INLINESTYLES = False DEFAULT = HtmlFormatter(noclasses=INLINESTYLES) @@ -94,3 +95,18 @@ def run(self): nodes.raw('', '</div>', format='html')] directives.register_directive('youtube', YouTube) + +_abbr_re = re.compile('\((.*)\)$') + +class abbreviation(nodes.Inline, nodes.TextElement): pass + +def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): + text = utils.unescape(text) + m = _abbr_re.search(text) + if m is None: + return [abbreviation(text, text)], [] + abbr = text[:m.start()].strip() + expl = m.group(1) + return [abbreviation(abbr, abbr, explanation=expl)], [] + +roles.register_local_role('abbr', abbr_role)
diff --git a/tests/content/article.rst b/tests/content/article.rst --- a/tests/content/article.rst +++ b/tests/content/article.rst @@ -2,3 +2,5 @@ Article title ############# This is some content. With some stuff to "typogrify". + +Now with added support for :abbr:`TLA (three letter acronym)`. diff --git a/tests/test_readers.py b/tests/test_readers.py --- a/tests/test_readers.py +++ b/tests/test_readers.py @@ -47,7 +47,9 @@ def test_typogrify(self): # unmodified content, _ = readers.read_file(_filename('article.rst')) expected = "<p>This is some content. With some stuff to "\ - "&quot;typogrify&quot;.</p>\n" + "&quot;typogrify&quot;.</p>\n<p>Now with added "\ + 'support for <abbr title="three letter acronym">'\ + 'TLA</abbr>.</p>\n' self.assertEqual(content, expected) @@ -56,7 +58,9 @@ def test_typogrify(self): content, _ = readers.read_file(_filename('article.rst'), settings={'TYPOGRIFY': True}) expected = "<p>This is some content. With some stuff to&nbsp;"\ - "&#8220;typogrify&#8221;.</p>\n" + "&#8220;typogrify&#8221;.</p>\n<p>Now with added "\ + 'support for <abbr title="three letter acronym">'\ + 'TLA</abbr>.</p>\n' self.assertEqual(content, expected) except ImportError:
Add support for acronym/abbr Like in Sphinx, for example: https://bitbucket.org/birkenfeld/sphinx/changeset/f69ec6dd2c54 Docutils has this in the To Do List, but I like Georg's syntax better. http://docutils.sourceforge.net/docs/dev/todo.html
Feel free to issue a pull request for this. The way it should be done is by extending the docutils parser, I think, or by replacing it by some code from sphinx. Is anyone interested in working on this feature sometime in the next few days? We are trying to put together a 3.0 release very soon, and it would be helpful to know whether we should mark this issue for inclusion. Can you just not mark it for inclusion but include it anyway if I have time to pull it together? @djco: Done. Thanks in advance for trying to get it in! @djco any news on this? Haven't gotten to it. Just go ahead and do the release. :) Let me know if anybody wants to step in and implement this for 3.0. Otherwise I'll defer this feature until the next release. Is there an ETA for 3.0? something like: in the next days :) As Alexis said, there's no hard-and-fast ETA, but it'd be nice to get a release out to folks soon. Once all the targeted milestone issues have either been completed or deferred, I think we should do a last round of testing and then package up a release.
2012-07-17T11:35:12Z
[]
[]
getpelican/pelican
476
getpelican__pelican-476
[ "443" ]
8ecebc9310e48555e2245336adff7e41dd90874a
diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -11,7 +11,7 @@ StaticGenerator, PdfGenerator, LessCSSGenerator) from pelican.log import init from pelican.settings import read_settings, _DEFAULT_CONFIG -from pelican.utils import clean_output_dir, files_changed, file_changed +from pelican.utils import clean_output_dir, files_changed, file_changed, NoFilesError from pelican.writers import Writer __major__ = 3 @@ -265,6 +265,7 @@ def main(): try: if args.autoreload: + files_found_error = True while True: try: # Check source dir for changed files ending with the given @@ -274,6 +275,8 @@ def main(): # have. if files_changed(pelican.path, pelican.markup) or \ files_changed(pelican.theme, ['']): + if files_found_error == False: + files_found_error = True pelican.run() # reload also if settings.py changed @@ -287,6 +290,10 @@ def main(): except KeyboardInterrupt: logger.warning("Keyboard interrupt, quitting.") break + except NoFilesError: + if files_found_error == True: + logger.warning("No valid files found in content. Nothing to generate.") + files_found_error = False except Exception, e: logger.warning( "Caught exception \"{}\". Reloading.".format(e) diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -14,6 +14,9 @@ logger = logging.getLogger(__name__) +class NoFilesError(Exception): + pass + def get_date(string): """Return a datetime object from a string. @@ -241,10 +244,13 @@ def file_times(path): yield os.stat(os.path.join(root, f)).st_mtime global LAST_MTIME - mtime = max(file_times(path)) - if mtime > LAST_MTIME: - LAST_MTIME = mtime - return True + try: + mtime = max(file_times(path)) + if mtime > LAST_MTIME: + LAST_MTIME = mtime + return True + except ValueError: + raise NoFilesError("No files with the given extension(s) found.") return False
diff --git a/tests/test_utils.py b/tests/test_utils.py --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -6,6 +6,7 @@ from pelican import utils from .support import get_article, unittest +from pelican.utils import NoFilesError class TestUtils(unittest.TestCase): @@ -74,7 +75,8 @@ def test_process_translations(self): self.assertNotIn(fr_article1, index) def test_files_changed(self): - "Test if file changes are correctly detected" + """Test if file changes are correctly detected + Make sure to handle not getting any files correctly""" path = os.path.join(os.path.dirname(__file__), 'content') filename = os.path.join(path, 'article_with_metadata.rst') @@ -90,6 +92,18 @@ def test_files_changed(self): self.assertEquals(changed, True) self.assertAlmostEqual(utils.LAST_MTIME, t, delta=1) + empty_path = os.path.join(os.path.dirname(__file__), 'empty') + try: + os.mkdir(empty_path) + os.mkdir(os.path.join(empty_path, "empty_folder")) + shutil.copy(__file__, empty_path) + with self.assertRaises(NoFilesError): + utils.files_changed(empty_path, 'rst') + except OSError: + self.fail("OSError Exception in test_files_changed test") + finally: + shutil.rmtree(empty_path, True) + def test_clean_output_dir(self): test_directory = os.path.join(os.path.dirname(__file__), 'clean_output') content = os.path.join(os.path.dirname(__file__), 'content')
Infinite loop on `make serve` Just installed a copy of pelican from the repo (master branch) into a fresh virtualenv. Ran the `pelican-quickstart` script and then `make html` and it worked fine. However whenever I try to `make serve`, initially it was complaining about permissions for `develop_server.sh` which was not allowed to be executed. So I did `chmod +x develop_server.sh` which solved that problem however now whenever I run the `serve`, it gets into infinite loop with this message: `WARNING: Caught exception "max() arg is an empty sequence". Reloading.` And while it keeps printing that line, I can't figure out how to quit the process. I tried `Ctrl-C` and it does not help. So the only way I figured is to actually close the terminal window.
Running ./develop_server.sh stop would fix that, even though text is scrolling you are at a prompt. I think #443 and 451 are the same ticket What I am saying is that make serve just runs pelican -r and python SimpleHTTPServer... these are the same issue if this is just referring to the scroll Sorry for late reply. Just saw the responses. @justinmayer I did make it executable. @miki725 I think perhaps you're misunderstanding how GitHub works here. That's a commit message, in which I fixed the problem that required you to do that. It shouldn't be necessary to manually `chmod +x` that file in the future. I'll defer to @tbunnyman regarding the "infinite loop" topic. This problem is when a directory is empty. So if you do the quickstart, and then do not write any posts, the content directory will be empty. In [pelican.utils.files_changed](https://github.com/getpelican/pelican/blob/master/pelican/utils.py#L232), there is a test to get the max mtime of any of the files in the directory. However, if there are no files in the directory, [the call to max](https://github.com/getpelican/pelican/blob/master/pelican/utils.py#L244) raises an Exception with the message `max() arg is an empty sequence`. It seems like the fix would be to catch the exception and return False in `files_changed`. @streeter +1, this jives with behavior I'm seeing — the infinite loop when I haven't yet written any posts. (which is actually according to the "kickstart a blog" section of the [getting started](http://docs.getpelican.com/en/latest/getting_started.html) doc) @case: The docs do indicate that content is needed (emphasis added): > Once you finish answering all the questions, you can begin adding content to the content folder that has been created for you. [...] **Once you have some content to generate**, you can convert it to HTML via the following command: That said, we can probably improve things further. I think the most expedient solution is to have `develop_server.sh` test for the presence of content, print a helpful error message indicating the lack of content, and exit gracefully.
2012-08-23T19:51:06Z
[]
[]
getpelican/pelican
529
getpelican__pelican-529
[ "528" ]
1580f7fea7938e5e27dd308eea07412e53bdd79f
diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -97,6 +97,17 @@ def copy(path, source, destination, destination_path=None, overwrite=False): def clean_output_dir(path): """Remove all the files from the output directory""" + if not os.path.exists(path): + logger.debug("Directory already removed: %s" % path) + return + + if not os.path.isdir(path): + try: + os.remove(path) + except Exception, e: + logger.error("Unable to delete file %s; %e" % path, e) + return + # remove all the existing content from the output folder for filename in os.listdir(path): file = os.path.join(path, filename)
diff --git a/tests/test_utils.py b/tests/test_utils.py --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -112,3 +112,16 @@ def test_clean_output_dir(self): self.assertTrue(os.path.isdir(test_directory)) self.assertListEqual([], os.listdir(test_directory)) shutil.rmtree(test_directory) + + def test_clean_output_dir_not_there(self): + test_directory = os.path.join(os.path.dirname(__file__), 'does_not_exist') + utils.clean_output_dir(test_directory) + self.assertTrue(not os.path.exists(test_directory)) + + def test_clean_output_dir_is_file(self): + test_directory = os.path.join(os.path.dirname(__file__), 'this_is_a_file') + f = open(test_directory, 'w') + f.write('') + f.close() + utils.clean_output_dir(test_directory) + self.assertTrue(not os.path.exists(test_directory))
Crash when DELETE_OUTPUT_DIRECTORY=True and output dir does not exist If the output directory does not exist, and `DELETE_OUTPUT_DIRECTORY=True` in the settings file, pelican exits with an error: ``` CRITICAL: [Errno 2] No such file or directory: '/path/to/deploy' ``` I have a patch for this, and will submit a pull request.
2012-10-02T23:07:50Z
[]
[]
getpelican/pelican
548
getpelican__pelican-548
[ "477", "511", "521" ]
588171d9b7f6f7a882273ad770ee6d5f7a0daef5
diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -8,10 +8,12 @@ from pelican import signals from pelican.generators import (ArticlesGenerator, PagesGenerator, - StaticGenerator, PdfGenerator, LessCSSGenerator) + StaticGenerator, PdfGenerator, + LessCSSGenerator, SourceFileGenerator) from pelican.log import init -from pelican.settings import read_settings, _DEFAULT_CONFIG -from pelican.utils import clean_output_dir, files_changed, file_changed, NoFilesError +from pelican.settings import read_settings +from pelican.utils import (clean_output_dir, files_changed, file_changed, + NoFilesError) from pelican.writers import Writer __major__ = 3 @@ -23,42 +25,21 @@ class Pelican(object): - def __init__(self, settings=None, path=None, theme=None, output_path=None, - markup=None, delete_outputdir=False, plugin_path=None): - """Read the settings, and performs some checks on the environment - before doing anything else. + def __init__(self, settings): + """ + Pelican initialisation, performs some checks on the environment before + doing anything else. """ - if settings is None: - settings = _DEFAULT_CONFIG - - self.path = path or settings['PATH'] - if not self.path: - raise Exception('You need to specify a path containing the content' - ' (see pelican --help for more information)') - - if self.path.endswith('/'): - self.path = self.path[:-1] # define the default settings self.settings = settings - self._handle_deprecation() - self.theme = theme or settings['THEME'] - output_path = output_path or settings['OUTPUT_PATH'] - self.output_path = os.path.realpath(output_path) - self.markup = markup or settings['MARKUP'] - self.delete_outputdir = delete_outputdir \ - or settings['DELETE_OUTPUT_DIRECTORY'] - - # find the theme in pelican.theme if the given one does not exists - if not os.path.exists(self.theme): - theme_path = os.sep.join([os.path.dirname( - os.path.abspath(__file__)), "themes/%s" % self.theme]) - if os.path.exists(theme_path): - self.theme = theme_path - else: - raise Exception("Impossible to find the theme %s" % theme) + self.path = settings['PATH'] + self.theme = settings['THEME'] + self.output_path = settings['OUTPUT_PATH'] + self.markup = settings['MARKUP'] + self.delete_outputdir = settings['DELETE_OUTPUT_DIRECTORY'] self.init_path() self.init_plugins() @@ -77,7 +58,7 @@ def init_plugins(self): logger.debug("Loading plugin `{0}' ...".format(plugin)) plugin = __import__(plugin, globals(), locals(), 'module') - logger.debug("Registering plugin `{0}' ...".format(plugin.__name__)) + logger.debug("Registering plugin `{0}'".format(plugin.__name__)) plugin.register() def _handle_deprecation(self): @@ -138,8 +119,8 @@ def _handle_deprecation(self): 'Modify CATEGORY_FEED to CATEGORY_FEED_ATOM in your settings and ' 'theme for the same behavior. Temporarily setting ' 'CATEGORY_FEED_ATOM for backwards compatibility.') - self.settings['CATEGORY_FEED_ATOM'] = self.settings['CATEGORY_FEED'] - + self.settings['CATEGORY_FEED_ATOM'] =\ + self.settings['CATEGORY_FEED'] def run(self): """Run the generators and return""" @@ -180,12 +161,28 @@ def run(self): if hasattr(p, 'generate_output'): p.generate_output(writer) + signals.finalized.send(self) + def get_generator_classes(self): generators = [StaticGenerator, ArticlesGenerator, PagesGenerator] if self.settings['PDF_GENERATOR']: generators.append(PdfGenerator) if self.settings['LESS_GENERATOR']: # can be True or PATH to lessc generators.append(LessCSSGenerator) + if self.settings['OUTPUT_SOURCES']: + generators.append(SourceFileGenerator) + + for pair in signals.get_generators.send(self): + (funct, value) = pair + + if not isinstance(value, (tuple, list)): + value = (value, ) + + for v in value: + if isinstance(v, type): + logger.debug('Found generator: {0}'.format(v)) + generators.append(v) + return generators def get_writer(self): @@ -242,11 +239,26 @@ def parse_arguments(): return parser.parse_args() +def get_config(args): + config = {} + if args.path: + config['PATH'] = os.path.abspath(os.path.expanduser(args.path)) + if args.output: + config['OUTPUT_PATH'] = \ + os.path.abspath(os.path.expanduser(args.output)) + if args.markup: + config['MARKUP'] = [a.strip().lower() for a in args.markup.split(',')] + if args.theme: + abstheme = os.path.abspath(os.path.expanduser(args.theme)) + config['THEME'] = abstheme if os.path.exists(abstheme) else args.theme + if args.delete_outputdir is not None: + config['DELETE_OUTPUT_DIRECTORY'] = args.delete_outputdir + return config + + def get_instance(args): - markup = [a.strip().lower() for a in args.markup.split(',')]\ - if args.markup else None - settings = read_settings(args.settings) + settings = read_settings(args.settings, override=get_config(args)) cls = settings.get('PELICAN_CLASS') if isinstance(cls, basestring): @@ -254,15 +266,12 @@ def get_instance(args): module = __import__(module) cls = getattr(module, cls_name) - return cls(settings, args.path, args.theme, args.output, markup, - args.delete_outputdir) + return cls(settings) def main(): args = parse_arguments() init(args.verbosity) - # Split the markup languages only if some have been given. Otherwise, - # populate the variable with None. pelican = get_instance(args) try: @@ -277,7 +286,7 @@ def main(): # have. if files_changed(pelican.path, pelican.markup) or \ files_changed(pelican.theme, ['']): - if files_found_error == False: + if not files_found_error: files_found_error = True pelican.run() @@ -293,9 +302,11 @@ def main(): logger.warning("Keyboard interrupt, quitting.") break except NoFilesError: - if files_found_error == True: - logger.warning("No valid files found in content. Nothing to generate.") + if files_found_error: + logger.warning("No valid files found in content. " + "Nothing to generate.") files_found_error = False + time.sleep(1) # sleep to avoid cpu load except Exception, e: logger.warning( "Caught exception \"{}\". Reloading.".format(e) diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import copy import locale import logging import functools @@ -13,7 +14,7 @@ from pelican.settings import _DEFAULT_CONFIG from pelican.utils import slugify, truncate_html_words, memoized - +from pelican import signals logger = logging.getLogger(__name__) @@ -33,7 +34,7 @@ def __init__(self, content, metadata=None, settings=None, if not metadata: metadata = {} if not settings: - settings = _DEFAULT_CONFIG + settings = copy.deepcopy(_DEFAULT_CONFIG) self.settings = settings self._content = content @@ -60,7 +61,7 @@ def __init__(self, content, metadata=None, settings=None, else: title = filename.decode('utf-8') if filename else self.title self.author = Author(getenv('USER', 'John Doe'), settings) - logger.warning(u"Author of `{0}' unknown, assuming that his "\ + logger.warning(u"Author of `{0}' unknown, assuming that his " "name is " "`{1}'".format(title, self.author)) # manage languages @@ -110,6 +111,8 @@ def __init__(self, content, metadata=None, settings=None, if 'summary' in metadata: self._summary = metadata['summary'] + signals.content_object_init.send(self.__class__, instance=self) + def check_properties(self): """test that each mandatory property is set.""" for prop in self.mandatory_properties: @@ -145,7 +148,7 @@ def _update_content(self, content): (?:href|src)\s*=) (?P<quote>["\']) # require value to be quoted - (?P<path>:(?P<what>.*):(?P<value>.*)) # the url value + (?P<path>\|(?P<what>.*?)\|(?P<value>.*?)) # the url value \2""", re.X) def replacer(m): @@ -168,7 +171,7 @@ def replacer(m): origin = urlparse.urljoin(self._context['SITEURL'], self._context['filenames'][value].url) else: - logger.warning(u"Unable to find {fn}, skipping url"\ + logger.warning(u"Unable to find {fn}, skipping url" "replacement".format(fn=value)) return m.group('markup') + m.group('quote') + origin \ @@ -291,11 +294,27 @@ class Author(URLWrapper): pass +class StaticContent(object): + def __init__(self, src, dst=None, settings=None): + if not settings: + settings = copy.deepcopy(_DEFAULT_CONFIG) + self.src = src + self.url = dst or src + self.filepath = os.path.join(settings['PATH'], src) + self.save_as = os.path.join(settings['OUTPUT_PATH'], self.url) + + def __str__(self): + return str(self.filepath.encode('utf-8', 'replace')) + + def __unicode__(self): + return self.filepath + + def is_valid_content(content, f): try: content.check_properties() return True except NameError, e: - logger.error(u"Skipping %s: impossible to find informations about"\ + logger.error(u"Skipping %s: impossible to find informations about" "'%s'" % (f, e)) return False diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -5,7 +5,9 @@ import logging import datetime import subprocess +import shutil +from codecs import open from collections import defaultdict from functools import partial from itertools import chain @@ -14,9 +16,10 @@ from jinja2 import Environment, FileSystemLoader, PrefixLoader, ChoiceLoader from jinja2.exceptions import TemplateNotFound -from pelican.contents import Article, Page, Category, is_valid_content +from pelican.contents import Article, Page, Category, StaticContent, \ + is_valid_content from pelican.readers import read_file -from pelican.utils import copy, process_translations, open +from pelican.utils import copy, process_translations, mkdir_p from pelican import signals @@ -36,8 +39,11 @@ def __init__(self, *args, **kwargs): # templates cache self._templates = {} - self._templates_path = os.path.expanduser( - os.path.join(self.theme, 'templates')) + self._templates_path = [] + self._templates_path.append(os.path.expanduser( + os.path.join(self.theme, 'templates'))) + self._templates_path += self.settings.get('EXTRA_TEMPLATES_PATHS', []) + theme_path = os.path.dirname(os.path.abspath(__file__)) @@ -57,7 +63,6 @@ def __init__(self, *args, **kwargs): # get custom Jinja filters from user settings custom_filters = self.settings.get('JINJA_FILTERS', {}) self.env.filters.update(custom_filters) - self.settings['PATH'] = self.path # overwrite with the actual path self.context['filenames'] = kwargs.get('filenames', {}) def get_template(self, name): @@ -78,8 +83,10 @@ def get_files(self, path, exclude=[], extensions=None): :param path: the path to search the file on :param exclude: the list of path to exclude + :param extensions: the list of allowed extensions (if False, all + extensions are allowed) """ - if not extensions: + if extensions is None: extensions = self.markup files = [] @@ -93,8 +100,10 @@ def get_files(self, path, exclude=[], extensions=None): for e in exclude: if e in dirs: dirs.remove(e) - files.extend([os.sep.join((root, f)) for f in temp_files - if True in [f.endswith(ext) for ext in extensions]]) + for f in temp_files: + if extensions is False or \ + (True in [f.endswith(ext) for ext in extensions]): + files.append(os.sep.join((root, f))) return files def add_filename(self, content): @@ -361,7 +370,7 @@ def generate_context(self): self.authors = list(self.authors.items()) self.authors.sort(key=lambda item: item[0].name) - + self._update_context(('articles', 'dates', 'tags', 'categories', 'tag_cloud', 'authors', 'related_posts')) @@ -387,7 +396,7 @@ def generate_context(self): os.path.join(self.path, self.settings['PAGE_DIR']), exclude=self.settings['PAGE_EXCLUDES']): try: - content, metadata = read_file(f) + content, metadata = read_file(f, settings=self.settings) except Exception, e: logger.warning(u'Could not process %s\n%s' % (f, str(e))) continue @@ -434,6 +443,7 @@ def _copy_paths(self, paths, source, destination, output_path, final_path, overwrite=True) def generate_context(self): + self.staticfiles = [] if self.settings['WEBASSETS']: from webassets import Environment as AssetsEnvironment @@ -441,37 +451,72 @@ def generate_context(self): # Define the assets environment that will be passed to the # generators. The StaticGenerator must then be run first to have # the assets in the output_path before generating the templates. - assets_url = self.settings['SITEURL'] + '/theme/' + + # Let ASSET_URL honor Pelican's RELATIVE_URLS setting. + # Hint for templates: + # Current version of webassets seem to remove any relative + # paths at the beginning of the URL. So, if RELATIVE_URLS + # is on, ASSET_URL will start with 'theme/', regardless if we + # set assets_url here to './theme/' or to 'theme/'. + # XXX However, this breaks the ASSET_URL if user navigates to + # a sub-URL, e.g. if he clicks on a category. To workaround this + # issue, I use + # <link rel="stylesheet" href="{{ SITEURL }}/{{ ASSET_URL }}"> + # instead of + # <link rel="stylesheet" href="{{ ASSET_URL }}"> + if self.settings.get('RELATIVE_URLS'): + assets_url = './theme/' + else: + assets_url = self.settings['SITEURL'] + '/theme/' assets_src = os.path.join(self.output_path, 'theme') self.assets_env = AssetsEnvironment(assets_src, assets_url) if logging.getLevelName(logger.getEffectiveLevel()) == "DEBUG": self.assets_env.debug = True - def generate_output(self, writer): + # walk static paths + for static_path in self.settings['STATIC_PATHS']: + for f in self.get_files( + os.path.join(self.path, static_path), extensions=False): + f_rel = os.path.relpath(f, self.path) + sc = StaticContent(f_rel, os.path.join('static', f_rel), + settings=self.settings) + self.staticfiles.append(sc) + self.context['filenames'][f_rel] = sc + # same thing for FILES_TO_COPY + for src, dest in self.settings['FILES_TO_COPY']: + sc = StaticContent(src, dest, settings=self.settings) + self.staticfiles.append(sc) + self.context['filenames'][src] = sc - self._copy_paths(self.settings['STATIC_PATHS'], self.path, - 'static', self.output_path) + def generate_output(self, writer): self._copy_paths(self.settings['THEME_STATIC_PATHS'], self.theme, 'theme', self.output_path, '.') - - # copy all the files needed - for source, destination in self.settings['FILES_TO_COPY']: - copy(source, self.path, self.output_path, destination, - overwrite=True) + # copy all StaticContent files + for sc in self.staticfiles: + mkdir_p(os.path.dirname(sc.save_as)) + shutil.copy(sc.filepath, sc.save_as) + logger.info('copying %s to %s' % (sc.filepath, sc.save_as)) class PdfGenerator(Generator): """Generate PDFs on the output dir, for all articles and pages coming from rst""" def __init__(self, *args, **kwargs): + super(PdfGenerator, self).__init__(*args, **kwargs) try: from rst2pdf.createpdf import RstToPdf + pdf_style_path = os.path.join(self.settings['PDF_STYLE_PATH']) \ + if 'PDF_STYLE_PATH' in self.settings.keys() \ + else '' + pdf_style = self.settings['PDF_STYLE'] if 'PDF_STYLE' \ + in self.settings.keys() \ + else 'twelvepoint' self.pdfcreator = RstToPdf(breakside=0, - stylesheets=['twelvepoint']) + stylesheets=[pdf_style], + style_path=[pdf_style_path]) except ImportError: raise Exception("unable to find rst2pdf") - super(PdfGenerator, self).__init__(*args, **kwargs) def _create_pdf(self, obj, output_path): if obj.filename.endswith(".rst"): @@ -479,7 +524,7 @@ def _create_pdf(self, obj, output_path): output_pdf = os.path.join(output_path, filename) # print "Generating pdf for", obj.filename, " in ", output_pdf with open(obj.filename) as f: - self.pdfcreator.createPdf(text=f, output=output_pdf) + self.pdfcreator.createPdf(text=f.read(), output=output_pdf) logger.info(u' [ok] writing %s' % output_pdf) def generate_context(self): @@ -503,6 +548,19 @@ def generate_output(self, writer=None): for page in self.context['pages']: self._create_pdf(page, pdf_path) +class SourceFileGenerator(Generator): + def generate_context(self): + self.output_extension = self.settings['OUTPUT_SOURCES_EXTENSION'] + + def _create_source(self, obj, output_path): + filename = os.path.splitext(obj.save_as)[0] + dest = os.path.join(output_path, filename + self.output_extension) + copy('', obj.filename, dest) + + def generate_output(self, writer=None): + logger.info(u' Generating source files...') + for object in chain(self.context['articles'], self.context['pages']): + self._create_source(object, self.output_path) class LessCSSGenerator(Generator): """Compile less css files.""" diff --git a/pelican/plugins/global_license.py b/pelican/plugins/global_license.py --- a/pelican/plugins/global_license.py +++ b/pelican/plugins/global_license.py @@ -4,13 +4,14 @@ License plugin for Pelican ========================== -Simply add license variable in article's context, which contain -the license text. +This plugin allows you to define a LICENSE setting and adds the contents of that +license variable to the article's context, making that variable available to use +from within your theme's templates. Settings: --------- -Add LICENSE to your settings file to define default license. +Define LICENSE in your settings file with the contents of your default license. """ diff --git a/pelican/plugins/gravatar.py b/pelican/plugins/gravatar.py --- a/pelican/plugins/gravatar.py +++ b/pelican/plugins/gravatar.py @@ -5,20 +5,22 @@ Gravatar plugin for Pelican =========================== -Simply add author_gravatar variable in article's context, which contains -the gravatar url. +This plugin assigns the ``author_gravatar`` variable to the Gravatar URL and +makes the variable available within the article's context. Settings: --------- -Add AUTHOR_EMAIL to your settings file to define default author email. +Add AUTHOR_EMAIL to your settings file to define the default author's email +address. Obviously, that email address must be associated with a Gravatar +account. Article metadata: ------------------ :email: article's author email -If one of them are defined, the author_gravatar variable is added to +If one of them are defined, the author_gravatar variable is added to the article's context. """ diff --git a/pelican/plugins/sitemap.py b/pelican/plugins/sitemap.py new file mode 100644 --- /dev/null +++ b/pelican/plugins/sitemap.py @@ -0,0 +1,190 @@ +import collections +import os.path + +from datetime import datetime +from logging import warning, info +from codecs import open + +from pelican import signals, contents + +TXT_HEADER = u"""{0}/index.html +{0}/archives.html +{0}/tags.html +{0}/categories.html +""" + +XML_HEADER = u"""<?xml version="1.0" encoding="utf-8"?> +<urlset xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" +xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd" +xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> +""" + +XML_URL = u""" +<url> +<loc>{0}/{1}</loc> +<lastmod>{2}</lastmod> +<changefreq>{3}</changefreq> +<priority>{4}</priority> +</url> +""" + +XML_FOOTER = u""" +</urlset> +""" + + +def format_date(date): + if date.tzinfo: + tz = date.strftime('%s') + tz = tz[:-2] + ':' + tz[-2:] + else: + tz = "-00:00" + return date.strftime("%Y-%m-%dT%H:%M:%S") + tz + + +class SitemapGenerator(object): + + def __init__(self, context, settings, path, theme, output_path, *null): + + self.output_path = output_path + self.context = context + self.now = datetime.now() + self.siteurl = settings.get('SITEURL') + + self.format = 'xml' + + self.changefreqs = { + 'articles': 'monthly', + 'indexes': 'daily', + 'pages': 'monthly' + } + + self.priorities = { + 'articles': 0.5, + 'indexes': 0.5, + 'pages': 0.5 + } + + config = settings.get('SITEMAP', {}) + + if not isinstance(config, dict): + warning("sitemap plugin: the SITEMAP setting must be a dict") + else: + fmt = config.get('format') + pris = config.get('priorities') + chfreqs = config.get('changefreqs') + + if fmt not in ('xml', 'txt'): + warning("sitemap plugin: SITEMAP['format'] must be `txt' or `xml'") + warning("sitemap plugin: Setting SITEMAP['format'] on `xml'") + elif fmt == 'txt': + self.format = fmt + return + + valid_keys = ('articles', 'indexes', 'pages') + valid_chfreqs = ('always', 'hourly', 'daily', 'weekly', 'monthly', + 'yearly', 'never') + + if isinstance(pris, dict): + for k, v in pris.iteritems(): + if k in valid_keys and not isinstance(v, (int, float)): + default = self.priorities[k] + warning("sitemap plugin: priorities must be numbers") + warning("sitemap plugin: setting SITEMAP['priorities']" + "['{0}'] on {1}".format(k, default)) + pris[k] = default + self.priorities.update(pris) + elif pris is not None: + warning("sitemap plugin: SITEMAP['priorities'] must be a dict") + warning("sitemap plugin: using the default values") + + if isinstance(chfreqs, dict): + for k, v in chfreqs.iteritems(): + if k in valid_keys and v not in valid_chfreqs: + default = self.changefreqs[k] + warning("sitemap plugin: invalid changefreq `{0}'".format(v)) + warning("sitemap plugin: setting SITEMAP['changefreqs']" + "['{0}'] on '{1}'".format(k, default)) + chfreqs[k] = default + self.changefreqs.update(chfreqs) + elif chfreqs is not None: + warning("sitemap plugin: SITEMAP['changefreqs'] must be a dict") + warning("sitemap plugin: using the default values") + + + + def write_url(self, page, fd): + + if getattr(page, 'status', 'published') != 'published': + return + + page_path = os.path.join(self.output_path, page.url) + if not os.path.exists(page_path): + return + + lastmod = format_date(getattr(page, 'date', self.now)) + + if isinstance(page, contents.Article): + pri = self.priorities['articles'] + chfreq = self.changefreqs['articles'] + elif isinstance(page, contents.Page): + pri = self.priorities['pages'] + chfreq = self.changefreqs['pages'] + else: + pri = self.priorities['indexes'] + chfreq = self.changefreqs['indexes'] + + + if self.format == 'xml': + fd.write(XML_URL.format(self.siteurl, page.url, lastmod, chfreq, pri)) + else: + fd.write(self.siteurl + '/' + loc + '\n') + + + def generate_output(self, writer): + path = os.path.join(self.output_path, 'sitemap.{0}'.format(self.format)) + + pages = self.context['pages'] + self.context['articles'] \ + + [ c for (c, a) in self.context['categories']] \ + + [ t for (t, a) in self.context['tags']] \ + + [ a for (a, b) in self.context['authors']] + + for article in self.context['articles']: + pages += article.translations + + info('writing {0}'.format(path)) + + with open(path, 'w', encoding='utf-8') as fd: + + if self.format == 'xml': + fd.write(XML_HEADER) + else: + fd.write(TXT_HEADER.format(self.siteurl)) + + FakePage = collections.namedtuple('FakePage', + ['status', + 'date', + 'url']) + + for standard_page_url in ['index.html', + 'archives.html', + 'tags.html', + 'categories.html']: + fake = FakePage(status='published', + date=self.now, + url=standard_page_url) + self.write_url(fake, fd) + + for page in pages: + self.write_url(page, fd) + + if self.format == 'xml': + fd.write(XML_FOOTER) + + +def get_generators(generators): + return SitemapGenerator + + +def register(): + signals.get_generators.connect(get_generators) diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -16,7 +16,7 @@ import re from pelican.contents import Category, Tag, Author -from pelican.utils import get_date, open +from pelican.utils import get_date, pelican_open _METADATA_PROCESSORS = { @@ -102,7 +102,7 @@ def _parse_metadata(self, document): def _get_publisher(self, filename): extra_params = {'initial_header_level': '2'} pub = docutils.core.Publisher( - destination_class=docutils.io.StringOutput) + destination_class=docutils.io.StringOutput) pub.set_components('standalone', 'restructuredtext', 'html') pub.writer.translator_class = PelicanHTMLTranslator pub.process_programmatic_settings(None, extra_params, None) @@ -129,8 +129,13 @@ class MarkdownReader(Reader): def read(self, filename): """Parse content and metadata of markdown files""" - text = open(filename) - md = Markdown(extensions=set(self.extensions + ['meta'])) + markdown_extensions = self.settings.get('MARKDOWN_EXTENSIONS', []) + if isinstance(markdown_extensions, (str, unicode)): + markdown_extensions = [m.strip() for m in + markdown_extensions.split(',')] + text = pelican_open(filename) + md = Markdown(extensions=set( + self.extensions + markdown_extensions + ['meta'])) content = md.convert(text) metadata = {} @@ -146,7 +151,7 @@ class HtmlReader(Reader): def read(self, filename): """Parse content and metadata of (x)HTML files""" - with open(filename) as content: + with pelican_open(filename) as content: metadata = {'title': 'unnamed'} for i in self._re.findall(content): key = i.split(':')[0][5:].strip() diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import copy import imp import inspect import os @@ -31,6 +32,8 @@ 'SITENAME': 'A Pelican Blog', 'DISPLAY_PAGES_ON_MENU': True, 'PDF_GENERATOR': False, + 'OUTPUT_SOURCES': False, + 'OUTPUT_SOURCES_EXTENSION': '.text', 'DEFAULT_CATEGORY': 'misc', 'DEFAULT_DATE': 'fs', 'WITH_FUTURE_DATES': True, @@ -57,6 +60,7 @@ 'TAG_CLOUD_STEPS': 4, 'TAG_CLOUD_MAX_ITEMS': 100, 'DIRECT_TEMPLATES': ('index', 'tags', 'categories', 'archives'), + 'EXTRA_TEMPLATES_PATHS' : [], 'PAGINATED_DIRECT_TEMPLATES': ('index', ), 'PELICAN_CLASS': 'pelican.Pelican', 'DEFAULT_DATE_FORMAT': '%a %d %B %Y', @@ -74,29 +78,39 @@ 'SUMMARY_MAX_LENGTH': 50, 'WEBASSETS': False, 'PLUGINS': [], + 'MARKDOWN_EXTENSIONS': ['toc', ], } -def read_settings(filename=None): +def read_settings(filename=None, override=None): if filename: local_settings = get_settings_from_file(filename) + # Make the paths relative to the settings file + for p in ['PATH', 'OUTPUT_PATH', 'THEME']: + if p in local_settings and local_settings[p] is not None \ + and not isabs(local_settings[p]): + absp = os.path.abspath(os.path.normpath(os.path.join( + os.path.dirname(filename), local_settings[p]))) + if p != 'THEME' or os.path.exists(p): + local_settings[p] = absp else: - local_settings = _DEFAULT_CONFIG - configured_settings = configure_settings(local_settings, None, filename) - return configured_settings + local_settings = copy.deepcopy(_DEFAULT_CONFIG) + + if override: + local_settings.update(override) + + return configure_settings(local_settings) def get_settings_from_module(module=None, default_settings=_DEFAULT_CONFIG): """ Load settings from a module, returning a dict. - """ - context = default_settings.copy() + context = copy.deepcopy(default_settings) if module is not None: - context.update( - (k, v) for k, v in inspect.getmembers(module) if k.isupper() - ) + context.update( + (k, v) for k, v in inspect.getmembers(module) if k.isupper()) return context @@ -111,19 +125,23 @@ def get_settings_from_file(filename, default_settings=_DEFAULT_CONFIG): return get_settings_from_module(module, default_settings=default_settings) -def configure_settings(settings, default_settings=None, filename=None): - """Provide optimizations, error checking, and warnings for loaded settings""" - if default_settings is None: - default_settings = _DEFAULT_CONFIG - - # Make the paths relative to the settings file - if filename: - for path in ['PATH', 'OUTPUT_PATH']: - if path in settings: - if settings[path] is not None and not isabs(settings[path]): - settings[path] = os.path.abspath(os.path.normpath( - os.path.join(os.path.dirname(filename), settings[path])) - ) +def configure_settings(settings): + """ + Provide optimizations, error checking, and warnings for loaded settings + """ + if not 'PATH' in settings or not os.path.isdir(settings['PATH']): + raise Exception('You need to specify a path containing the content' + ' (see pelican --help for more information)') + + # find the theme in pelican.theme if the given one does not exists + if not os.path.isdir(settings['THEME']): + theme_path = os.sep.join([os.path.dirname( + os.path.abspath(__file__)), "themes/%s" % settings['THEME']]) + if os.path.exists(theme_path): + settings['THEME'] = theme_path + else: + raise Exception("Impossible to find the theme %s" + % settings['THEME']) # if locales is not a list, make it one locales = settings['LOCALE'] @@ -138,7 +156,7 @@ def configure_settings(settings, default_settings=None, filename=None): for locale_ in locales: try: locale.setlocale(locale.LC_ALL, locale_) - break # break if it is successfull + break # break if it is successful except locale.Error: pass else: @@ -174,4 +192,11 @@ def configure_settings(settings, default_settings=None, filename=None): logger.warn("You must install the webassets module to use WEBASSETS.") settings['WEBASSETS'] = False + if 'OUTPUT_SOURCES_EXTENSION' in settings: + if not isinstance(settings['OUTPUT_SOURCES_EXTENSION'], str): + settings['OUTPUT_SOURCES_EXTENSION'] = _DEFAULT_CONFIG['OUTPUT_SOURCES_EXTENSION'] + logger.warn("Detected misconfiguration with OUTPUT_SOURCES_EXTENSION." + " falling back to the default extension " + + _DEFAULT_CONFIG['OUTPUT_SOURCES_EXTENSION']) + return settings diff --git a/pelican/signals.py b/pelican/signals.py --- a/pelican/signals.py +++ b/pelican/signals.py @@ -1,7 +1,10 @@ from blinker import signal initialized = signal('pelican_initialized') +finalized = signal('pelican_finalized') article_generate_context = signal('article_generate_context') article_generator_init = signal('article_generator_init') +get_generators = signal('get_generators') pages_generate_context = signal('pages_generate_context') pages_generator_init = signal('pages_generator_init') +content_object_init = signal('content_object_init') diff --git a/pelican/tools/pelican_import.py b/pelican/tools/pelican_import.py --- a/pelican/tools/pelican_import.py +++ b/pelican/tools/pelican_import.py @@ -184,6 +184,8 @@ def build_header(title, date, author, categories, tags): header = '%s\n%s\n' % (title, '#' * len(title)) if date: header += ':date: %s\n' % date + if author: + header += ':author: %s\n' % author if categories: header += ':category: %s\n' % ', '.join(categories) if tags: @@ -196,6 +198,8 @@ def build_markdown_header(title, date, author, categories, tags): header = 'Title: %s\n' % title if date: header += 'Date: %s\n' % date + if author: + header += 'Author: %s\n' % author if categories: header += 'Category: %s\n' % ', '.join(categories) if tags: @@ -216,7 +220,7 @@ def fields2pelican(fields, out_markup, output_path, dircat=False, strip_raw=Fals filename = os.path.basename(filename) # option to put files in directories with categories names - if dircat and (len(categories) == 1): + if dircat and (len(categories) > 0): catname = slugify(categories[0]) out_filename = os.path.join(output_path, catname, filename+ext) if not os.path.isdir(os.path.join(output_path, catname)): diff --git a/pelican/tools/pelican_quickstart.py b/pelican/tools/pelican_quickstart.py --- a/pelican/tools/pelican_quickstart.py +++ b/pelican/tools/pelican_quickstart.py @@ -167,7 +167,7 @@ def main(): if ask('Do you want to upload your website using FTP?', answer=bool, default=False): CONF['ftp_host'] = ask('What is the hostname of your FTP server?', str, CONF['ftp_host']) CONF['ftp_user'] = ask('What is your username on that server?', str, CONF['ftp_user']) - CONF['ftp_target_dir'] = ask('Where do you want to put your web site on that server?', str, CONF['ftp_target_dir']) + CONF['ftp_target_dir'] = ask('Where do you want to put your web site on that server?', str, CONF['ftp_target_dir']) if ask('Do you want to upload your website using SSH?', answer=bool, default=False): CONF['ssh_host'] = ask('What is the hostname of your SSH server?', str, CONF['ssh_host']) CONF['ssh_port'] = ask('What is the port of your SSH server?', int, CONF['ssh_port']) @@ -188,9 +188,12 @@ def main(): try: with open(os.path.join(CONF['basedir'], 'pelicanconf.py'), 'w') as fd: + conf_python = dict() + for key, value in CONF.iteritems(): + conf_python[key] = repr(value) for line in get_template('pelicanconf.py'): template = string.Template(line) - fd.write(template.safe_substitute(CONF)) + fd.write(template.safe_substitute(conf_python)) fd.close() except OSError, e: print('Error: {0}'.format(e)) @@ -215,11 +218,16 @@ def main(): print('Error: {0}'.format(e)) if develop: + conf_shell = dict() + for key, value in CONF.iteritems(): + if isinstance(value, basestring) and ' ' in value: + value = '"' + value.replace('"', '\\"') + '"' + conf_shell[key] = value try: with open(os.path.join(CONF['basedir'], 'develop_server.sh'), 'w') as fd: for line in get_template('develop_server.sh'): template = string.Template(line) - fd.write(template.safe_substitute(CONF)) + fd.write(template.safe_substitute(conf_shell)) fd.close() os.chmod((os.path.join(CONF['basedir'], 'develop_server.sh')), 0755) except OSError, e: diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -4,10 +4,11 @@ import pytz import shutil import logging +import errno from collections import defaultdict, Hashable from functools import partial -from codecs import open as _open +from codecs import open from datetime import datetime from itertools import groupby from jinja2 import Markup @@ -15,6 +16,7 @@ logger = logging.getLogger(__name__) + class NoFilesError(Exception): pass @@ -64,9 +66,9 @@ def get_date(string): raise ValueError("'%s' is not a valid date" % string) -def open(filename): +def pelican_open(filename): """Open a file and return it's content""" - return _open(filename, encoding='utf-8').read() + return open(filename, encoding='utf-8').read() def slugify(value): @@ -123,6 +125,17 @@ def copy(path, source, destination, destination_path=None, overwrite=False): def clean_output_dir(path): """Remove all the files from the output directory""" + if not os.path.exists(path): + logger.debug("Directory already removed: %s" % path) + return + + if not os.path.isdir(path): + try: + os.remove(path) + except Exception, e: + logger.error("Unable to delete file %s; %e" % path, e) + return + # remove all the existing content from the output folder for filename in os.listdir(path): file = os.path.join(path, filename) @@ -306,3 +319,11 @@ def set_date_tzinfo(d, tz_name=None): return tz.localize(d) else: return d + + +def mkdir_p(path): + try: + os.makedirs(path) + except OSError, e: + if e.errno != errno.EEXIST: + raise diff --git a/pelican/writers.py b/pelican/writers.py --- a/pelican/writers.py +++ b/pelican/writers.py @@ -144,9 +144,9 @@ def _write_file(template, localcontext, output_path, name): paginators[key] = Paginator(object_list, len(object_list)) # generated pages, and write + name_root, ext = os.path.splitext(name) for page_num in range(paginators.values()[0].num_pages): paginated_localcontext = localcontext.copy() - paginated_name = name for key in paginators.iterkeys(): paginator = paginators[key] page = paginator.page(page_num + 1) @@ -154,9 +154,10 @@ def _write_file(template, localcontext, output_path, name): {'%s_paginator' % key: paginator, '%s_page' % key: page}) if page_num > 0: - ext = '.' + paginated_name.rsplit('.')[-1] - paginated_name = paginated_name.replace(ext, - '%s%s' % (page_num + 1, ext)) + paginated_name = '%s%s%s' % ( + name_root, page_num + 1, ext) + else: + paginated_name = name _write_file(template, paginated_localcontext, self.output_path, paginated_name)
diff --git a/samples/content/pages/test_page.rst b/samples/content/pages/test_page.rst --- a/samples/content/pages/test_page.rst +++ b/samples/content/pages/test_page.rst @@ -5,7 +5,7 @@ This is a test page Just an image. -.. image:: pictures/Fat_Cat.jpg +.. image:: |filename|/pictures/Fat_Cat.jpg :height: 450 px :width: 600 px :alt: alternate text diff --git a/tests/content/article_with_markdown_markup_extensions.md b/tests/content/article_with_markdown_markup_extensions.md new file mode 100644 --- /dev/null +++ b/tests/content/article_with_markdown_markup_extensions.md @@ -0,0 +1,8 @@ +Title: Test Markdown extensions + +[TOC] + +## Level1 + +### Level2 + diff --git a/tests/output/basic/drafts/a-draft-article.html b/tests/output/basic/drafts/a-draft-article.html --- a/tests/output/basic/drafts/a-draft-article.html +++ b/tests/output/basic/drafts/a-draft-article.html @@ -55,8 +55,8 @@ <h1 class="entry-title"> <div class="entry-content"> <footer class="post-info"> - <abbr class="published" title="2012-07-26T22:04:43.481849"> - Thu 26 July 2012 + <abbr class="published" title="2012-09-17T15:02:39.591671"> + Mon 17 September 2012 </abbr> diff --git a/tests/output/basic/feeds/all-fr.atom.xml b/tests/output/basic/feeds/all-fr.atom.xml --- a/tests/output/basic/feeds/all-fr.atom.xml +++ b/tests/output/basic/feeds/all-fr.atom.xml @@ -1,4 +1,4 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all-fr.atom.xml" rel="self"></link><id>/</id><updated>2012-07-26T22:04:43Z</updated><entry><title>Trop bien !</title><link href="/oh-yeah-fr.html" rel="alternate"></link><updated>2012-07-26T22:04:43Z</updated><author><name>Dummy Author</name></author><id>tag:,2012-07-26:oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all-fr.atom.xml" rel="self"></link><id>/</id><updated>2012-09-17T15:02:39Z</updated><entry><title>Trop bien !</title><link href="/oh-yeah-fr.html" rel="alternate"></link><updated>2012-09-17T15:02:39Z</updated><author><name>Dummy Author</name></author><id>tag:,2012-09-17:oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; </summary></entry><entry><title>Deuxième article</title><link href="/second-article-fr.html" rel="alternate"></link><updated>2012-02-29T00:00:00Z</updated><author><name>Dummy Author</name></author><id>tag:,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; </summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry></feed> \ No newline at end of file diff --git a/tests/output/basic/oh-yeah-fr.html b/tests/output/basic/oh-yeah-fr.html --- a/tests/output/basic/oh-yeah-fr.html +++ b/tests/output/basic/oh-yeah-fr.html @@ -55,8 +55,8 @@ <h1 class="entry-title"> <div class="entry-content"> <footer class="post-info"> - <abbr class="published" title="2012-07-26T22:04:43.481849"> - Thu 26 July 2012 + <abbr class="published" title="2012-09-17T15:02:39.591671"> + Mon 17 September 2012 </abbr> diff --git a/tests/output/basic/tag/bar.html b/tests/output/basic/tag/bar.html --- a/tests/output/basic/tag/bar.html +++ b/tests/output/basic/tag/bar.html @@ -51,7 +51,7 @@ <h1><a href="../.">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href=".././second-article-fr.html">Deuxième article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 @@ -68,10 +68,10 @@ <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h Translations: - <a href=".././second-article-fr.html">fr</a> + <a href=".././second-article.html">en</a> -</footer><!-- /.post-info --><p>This is some article, in english</p> +</footer><!-- /.post-info --><p>Ceci est un article, en français.</p> </article> @@ -92,8 +92,8 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href=".././second-article-fr.html" rel="bookmark" - title="Permalink to Deuxième article">Deuxième article</a></h1> + <h1><a href=".././second-article.html" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> </header> <div class="entry-content"> @@ -113,13 +113,13 @@ <h1><a href=".././second-article-fr.html" rel="bookmark" Translations: - <a href=".././second-article.html">en</a> + <a href=".././second-article-fr.html">fr</a> </footer><!-- /.post-info --> - <p>Ceci est un article, en français.</p> + <p>This is some article, in english</p> - <a class="readmore" href=".././second-article-fr.html">read more</a> + <a class="readmore" href=".././second-article.html">read more</a> </div><!-- /.entry-content --> </article></li> diff --git a/tests/output/basic/tag/baz.html b/tests/output/basic/tag/baz.html --- a/tests/output/basic/tag/baz.html +++ b/tests/output/basic/tag/baz.html @@ -51,7 +51,7 @@ <h1><a href="../.">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href=".././second-article-fr.html">Deuxième article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 @@ -68,10 +68,10 @@ <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h Translations: - <a href=".././second-article-fr.html">fr</a> + <a href=".././second-article.html">en</a> -</footer><!-- /.post-info --><p>This is some article, in english</p> +</footer><!-- /.post-info --><p>Ceci est un article, en français.</p> </article> @@ -92,8 +92,8 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href=".././second-article-fr.html" rel="bookmark" - title="Permalink to Deuxième article">Deuxième article</a></h1> + <h1><a href=".././second-article.html" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> </header> <div class="entry-content"> @@ -113,13 +113,13 @@ <h1><a href=".././second-article-fr.html" rel="bookmark" Translations: - <a href=".././second-article.html">en</a> + <a href=".././second-article-fr.html">fr</a> </footer><!-- /.post-info --> - <p>Ceci est un article, en français.</p> + <p>This is some article, in english</p> - <a class="readmore" href=".././second-article-fr.html">read more</a> + <a class="readmore" href=".././second-article.html">read more</a> </div><!-- /.entry-content --> </article></li> diff --git a/tests/output/basic/tag/foo.html b/tests/output/basic/tag/foo.html --- a/tests/output/basic/tag/foo.html +++ b/tests/output/basic/tag/foo.html @@ -51,7 +51,7 @@ <h1><a href="../.">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href=".././second-article-fr.html">Deuxième article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 @@ -68,10 +68,10 @@ <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h Translations: - <a href=".././second-article-fr.html">fr</a> + <a href=".././second-article.html">en</a> -</footer><!-- /.post-info --><p>This is some article, in english</p> +</footer><!-- /.post-info --><p>Ceci est un article, en français.</p> </article> @@ -92,8 +92,8 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href=".././second-article-fr.html" rel="bookmark" - title="Permalink to Deuxième article">Deuxième article</a></h1> + <h1><a href=".././second-article.html" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> </header> <div class="entry-content"> @@ -113,13 +113,13 @@ <h1><a href=".././second-article-fr.html" rel="bookmark" Translations: - <a href=".././second-article.html">en</a> + <a href=".././second-article-fr.html">fr</a> </footer><!-- /.post-info --> - <p>Ceci est un article, en français.</p> + <p>This is some article, in english</p> - <a class="readmore" href=".././second-article-fr.html">read more</a> + <a class="readmore" href=".././second-article.html">read more</a> </div><!-- /.entry-content --> </article></li> diff --git a/tests/output/basic/theme/css/main.css b/tests/output/basic/theme/css/main.css --- a/tests/output/basic/theme/css/main.css +++ b/tests/output/basic/theme/css/main.css @@ -97,7 +97,7 @@ dl {margin: 0 0 1.5em 0;} dt {font-weight: bold;} dd {margin-left: 1.5em;} -pre{background-color: #000; padding: 10px; color: #fff; margin: 10px; overflow: auto;} +pre{background-color: rgb(238, 238, 238); padding: 10px; margin: 10px; overflow: auto;} /* Quotes */ blockquote { @@ -308,7 +308,8 @@ img.left, figure.left {float: left; margin: 0 2em 2em 0;} .social a[type$='atom+xml'], .social a[type$='rss+xml'] {background-image: url('../images/icons/rss.png');} .social a[href*='twitter.com'] {background-image: url('../images/icons/twitter.png');} .social a[href*='linkedin.com'] {background-image: url('../images/icons/linkedin.png');} - .social a[href*='gitorious.org'] {background-image: url('../images/icons/gitorious.org');} + .social a[href*='gitorious.org'] {background-image: url('../images/icons/gitorious.png');} + .social a[href*='gittip.com'] {background-image: url('../images/icons/gittip.png');} /* About diff --git a/tests/output/basic/theme/css/pygment.css b/tests/output/basic/theme/css/pygment.css --- a/tests/output/basic/theme/css/pygment.css +++ b/tests/output/basic/theme/css/pygment.css @@ -1,5 +1,5 @@ .hll { -background-color:#FFFFCC; +background-color:#eee; } .c { color:#408090; diff --git a/tests/output/basic/theme/images/icons/gittip.png b/tests/output/basic/theme/images/icons/gittip.png new file mode 100644 Binary files /dev/null and b/tests/output/basic/theme/images/icons/gittip.png differ diff --git a/tests/output/custom/author/alexis-metaireau2.html b/tests/output/custom/author/alexis-metaireau2.html --- a/tests/output/custom/author/alexis-metaireau2.html +++ b/tests/output/custom/author/alexis-metaireau2.html @@ -92,7 +92,7 @@ <h1><a href=".././oh-yeah.html" rel="bookmark" <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !</p> -<img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> </div> <a class="readmore" href=".././oh-yeah.html">read more</a> diff --git a/tests/output/custom/category/bar.html b/tests/output/custom/category/bar.html --- a/tests/output/custom/category/bar.html +++ b/tests/output/custom/category/bar.html @@ -83,7 +83,7 @@ <h1 class="entry-title"><a href=".././oh-yeah.html">Oh yeah !</a></h1> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !</p> -<img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> </div> <p>There are <a href=".././oh-yeah.html#disqus_thread">comments</a>.</p> </article> diff --git a/tests/output/custom/category/yeah.html b/tests/output/custom/category/yeah.html --- a/tests/output/custom/category/yeah.html +++ b/tests/output/custom/category/yeah.html @@ -78,8 +78,8 @@ <h1 class="entry-title"><a href=".././this-is-a-super-article.html">This is a su <div class="section" id="this-is-a-simple-title"> <h2>This is a simple title</h2> <p>And here comes the cool <a class="reference external" href="http://books.couchdb.org/relax/design-documents/views">stuff</a>.</p> -<img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> -<img alt="alternate text" src="pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /> <pre class="literal-block"> &gt;&gt;&gt; from ipdb import set_trace &gt;&gt;&gt; set_trace() diff --git a/tests/output/custom/feeds/all-en.atom.xml b/tests/output/custom/feeds/all-en.atom.xml --- a/tests/output/custom/feeds/all-en.atom.xml +++ b/tests/output/custom/feeds/all-en.atom.xml @@ -9,8 +9,8 @@ &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; -&lt;img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;img alt="alternate text" src="pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; &lt;pre class="literal-block"&gt; &amp;gt;&amp;gt;&amp;gt; from ipdb import set_trace &amp;gt;&amp;gt;&amp;gt; set_trace() @@ -21,7 +21,7 @@ &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; </summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/tests/output/custom/feeds/all.atom.xml b/tests/output/custom/feeds/all.atom.xml --- a/tests/output/custom/feeds/all.atom.xml +++ b/tests/output/custom/feeds/all.atom.xml @@ -9,8 +9,8 @@ &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; -&lt;img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;img alt="alternate text" src="pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; &lt;pre class="literal-block"&gt; &amp;gt;&amp;gt;&amp;gt; from ipdb import set_trace &amp;gt;&amp;gt;&amp;gt; set_trace() @@ -21,7 +21,7 @@ &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; </summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/tests/output/custom/feeds/all.rss.xml b/tests/output/custom/feeds/all.rss.xml --- a/tests/output/custom/feeds/all.rss.xml +++ b/tests/output/custom/feeds/all.rss.xml @@ -9,8 +9,8 @@ &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; -&lt;img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;img alt="alternate text" src="pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; &lt;pre class="literal-block"&gt; &amp;gt;&amp;gt;&amp;gt; from ipdb import set_trace &amp;gt;&amp;gt;&amp;gt; set_trace() @@ -21,7 +21,7 @@ &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item></channel></rss> \ No newline at end of file diff --git a/tests/output/custom/feeds/bar.atom.xml b/tests/output/custom/feeds/bar.atom.xml --- a/tests/output/custom/feeds/bar.atom.xml +++ b/tests/output/custom/feeds/bar.atom.xml @@ -3,6 +3,6 @@ &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; </summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry></feed> \ No newline at end of file diff --git a/tests/output/custom/feeds/bar.rss.xml b/tests/output/custom/feeds/bar.rss.xml --- a/tests/output/custom/feeds/bar.rss.xml +++ b/tests/output/custom/feeds/bar.rss.xml @@ -3,6 +3,6 @@ &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> \ No newline at end of file diff --git a/tests/output/custom/feeds/yeah.atom.xml b/tests/output/custom/feeds/yeah.atom.xml --- a/tests/output/custom/feeds/yeah.atom.xml +++ b/tests/output/custom/feeds/yeah.atom.xml @@ -3,8 +3,8 @@ &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; -&lt;img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;img alt="alternate text" src="pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; &lt;pre class="literal-block"&gt; &amp;gt;&amp;gt;&amp;gt; from ipdb import set_trace &amp;gt;&amp;gt;&amp;gt; set_trace() diff --git a/tests/output/custom/feeds/yeah.rss.xml b/tests/output/custom/feeds/yeah.rss.xml --- a/tests/output/custom/feeds/yeah.rss.xml +++ b/tests/output/custom/feeds/yeah.rss.xml @@ -3,8 +3,8 @@ &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; -&lt;img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;img alt="alternate text" src="pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; &lt;pre class="literal-block"&gt; &amp;gt;&amp;gt;&amp;gt; from ipdb import set_trace &amp;gt;&amp;gt;&amp;gt; set_trace() diff --git a/tests/output/custom/index2.html b/tests/output/custom/index2.html --- a/tests/output/custom/index2.html +++ b/tests/output/custom/index2.html @@ -163,7 +163,7 @@ <h1><a href="./oh-yeah.html" rel="bookmark" <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !</p> -<img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> </div> <a class="readmore" href="./oh-yeah.html">read more</a> diff --git a/tests/output/custom/oh-yeah.html b/tests/output/custom/oh-yeah.html --- a/tests/output/custom/oh-yeah.html +++ b/tests/output/custom/oh-yeah.html @@ -86,7 +86,7 @@ <h1 class="entry-title"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !</p> -<img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> </div> </div><!-- /.entry-content --> diff --git a/tests/output/custom/pages/this-is-a-test-page.html b/tests/output/custom/pages/this-is-a-test-page.html --- a/tests/output/custom/pages/this-is-a-test-page.html +++ b/tests/output/custom/pages/this-is-a-test-page.html @@ -56,7 +56,7 @@ <h1><a href="../.">Alexis' log </a></h1> <h1 class="entry-title">This is a test page</h1> <p>Just an image.</p> -<img alt="alternate text" src="pictures/Fat_Cat.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Fat_Cat.jpg" style="width: 600px; height: 450px;" /> </section> diff --git a/tests/output/custom/tag/bar.html b/tests/output/custom/tag/bar.html --- a/tests/output/custom/tag/bar.html +++ b/tests/output/custom/tag/bar.html @@ -59,7 +59,7 @@ <h1><a href="../.">Alexis' log </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href=".././second-article-fr.html">Deuxième article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 @@ -76,11 +76,11 @@ <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h Translations: - <a href=".././second-article-fr.html">fr</a> + <a href=".././second-article.html">en</a> -</footer><!-- /.post-info --><p>This is some article, in english</p> -<p>There are <a href=".././second-article.html#disqus_thread">comments</a>.</p> +</footer><!-- /.post-info --><p>Ceci est un article, en français.</p> +<p>There are <a href=".././second-article-fr.html#disqus_thread">comments</a>.</p> </article> </aside><!-- /#featured --> @@ -100,8 +100,8 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href=".././second-article-fr.html" rel="bookmark" - title="Permalink to Deuxième article">Deuxième article</a></h1> + <h1><a href=".././second-article.html" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> </header> <div class="entry-content"> @@ -121,14 +121,14 @@ <h1><a href=".././second-article-fr.html" rel="bookmark" Translations: - <a href=".././second-article.html">en</a> + <a href=".././second-article-fr.html">fr</a> </footer><!-- /.post-info --> - <p>Ceci est un article, en français.</p> + <p>This is some article, in english</p> - <a class="readmore" href=".././second-article-fr.html">read more</a> - <p>There are <a href=".././second-article-fr.html#disqus_thread">comments</a>.</p> + <a class="readmore" href=".././second-article.html">read more</a> + <p>There are <a href=".././second-article.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> </article></li> @@ -205,7 +205,7 @@ <h1><a href=".././oh-yeah.html" rel="bookmark" <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !</p> -<img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> </div> <a class="readmore" href=".././oh-yeah.html">read more</a> diff --git a/tests/output/custom/tag/baz.html b/tests/output/custom/tag/baz.html --- a/tests/output/custom/tag/baz.html +++ b/tests/output/custom/tag/baz.html @@ -59,7 +59,7 @@ <h1><a href="../.">Alexis' log </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href=".././second-article-fr.html">Deuxième article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 @@ -76,11 +76,11 @@ <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h Translations: - <a href=".././second-article-fr.html">fr</a> + <a href=".././second-article.html">en</a> -</footer><!-- /.post-info --><p>This is some article, in english</p> -<p>There are <a href=".././second-article.html#disqus_thread">comments</a>.</p> +</footer><!-- /.post-info --><p>Ceci est un article, en français.</p> +<p>There are <a href=".././second-article-fr.html#disqus_thread">comments</a>.</p> </article> </aside><!-- /#featured --> @@ -100,8 +100,8 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href=".././second-article-fr.html" rel="bookmark" - title="Permalink to Deuxième article">Deuxième article</a></h1> + <h1><a href=".././second-article.html" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> </header> <div class="entry-content"> @@ -121,14 +121,14 @@ <h1><a href=".././second-article-fr.html" rel="bookmark" Translations: - <a href=".././second-article.html">en</a> + <a href=".././second-article-fr.html">fr</a> </footer><!-- /.post-info --> - <p>Ceci est un article, en français.</p> + <p>This is some article, in english</p> - <a class="readmore" href=".././second-article-fr.html">read more</a> - <p>There are <a href=".././second-article-fr.html#disqus_thread">comments</a>.</p> + <a class="readmore" href=".././second-article.html">read more</a> + <p>There are <a href=".././second-article.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> </article></li> diff --git a/tests/output/custom/tag/foo.html b/tests/output/custom/tag/foo.html --- a/tests/output/custom/tag/foo.html +++ b/tests/output/custom/tag/foo.html @@ -59,7 +59,7 @@ <h1><a href="../.">Alexis' log </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href=".././second-article-fr.html">Deuxième article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 @@ -76,11 +76,11 @@ <h1 class="entry-title"><a href=".././second-article.html">Second article</a></h Translations: - <a href=".././second-article-fr.html">fr</a> + <a href=".././second-article.html">en</a> -</footer><!-- /.post-info --><p>This is some article, in english</p> -<p>There are <a href=".././second-article.html#disqus_thread">comments</a>.</p> +</footer><!-- /.post-info --><p>Ceci est un article, en français.</p> +<p>There are <a href=".././second-article-fr.html#disqus_thread">comments</a>.</p> </article> </aside><!-- /#featured --> @@ -100,8 +100,8 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href=".././second-article-fr.html" rel="bookmark" - title="Permalink to Deuxième article">Deuxième article</a></h1> + <h1><a href=".././second-article.html" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> </header> <div class="entry-content"> @@ -121,14 +121,14 @@ <h1><a href=".././second-article-fr.html" rel="bookmark" Translations: - <a href=".././second-article.html">en</a> + <a href=".././second-article-fr.html">fr</a> </footer><!-- /.post-info --> - <p>Ceci est un article, en français.</p> + <p>This is some article, in english</p> - <a class="readmore" href=".././second-article-fr.html">read more</a> - <p>There are <a href=".././second-article-fr.html#disqus_thread">comments</a>.</p> + <a class="readmore" href=".././second-article.html">read more</a> + <p>There are <a href=".././second-article.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> </article></li> diff --git a/tests/output/custom/tag/foobar.html b/tests/output/custom/tag/foobar.html --- a/tests/output/custom/tag/foobar.html +++ b/tests/output/custom/tag/foobar.html @@ -78,8 +78,8 @@ <h1 class="entry-title"><a href=".././this-is-a-super-article.html">This is a su <div class="section" id="this-is-a-simple-title"> <h2>This is a simple title</h2> <p>And here comes the cool <a class="reference external" href="http://books.couchdb.org/relax/design-documents/views">stuff</a>.</p> -<img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> -<img alt="alternate text" src="pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /> <pre class="literal-block"> &gt;&gt;&gt; from ipdb import set_trace &gt;&gt;&gt; set_trace() diff --git a/tests/output/custom/tag/oh.html b/tests/output/custom/tag/oh.html --- a/tests/output/custom/tag/oh.html +++ b/tests/output/custom/tag/oh.html @@ -83,7 +83,7 @@ <h1 class="entry-title"><a href=".././oh-yeah.html">Oh yeah !</a></h1> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !</p> -<img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> </div> <p>There are <a href=".././oh-yeah.html#disqus_thread">comments</a>.</p> </article> diff --git a/tests/output/custom/tag/yeah.html b/tests/output/custom/tag/yeah.html --- a/tests/output/custom/tag/yeah.html +++ b/tests/output/custom/tag/yeah.html @@ -83,7 +83,7 @@ <h1 class="entry-title"><a href=".././oh-yeah.html">Oh yeah !</a></h1> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !</p> -<img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> </div> <p>There are <a href=".././oh-yeah.html#disqus_thread">comments</a>.</p> </article> diff --git a/tests/output/custom/theme/css/main.css b/tests/output/custom/theme/css/main.css --- a/tests/output/custom/theme/css/main.css +++ b/tests/output/custom/theme/css/main.css @@ -97,7 +97,7 @@ dl {margin: 0 0 1.5em 0;} dt {font-weight: bold;} dd {margin-left: 1.5em;} -pre{background-color: #000; padding: 10px; color: #fff; margin: 10px; overflow: auto;} +pre{background-color: rgb(238, 238, 238); padding: 10px; margin: 10px; overflow: auto;} /* Quotes */ blockquote { @@ -308,7 +308,8 @@ img.left, figure.left {float: left; margin: 0 2em 2em 0;} .social a[type$='atom+xml'], .social a[type$='rss+xml'] {background-image: url('../images/icons/rss.png');} .social a[href*='twitter.com'] {background-image: url('../images/icons/twitter.png');} .social a[href*='linkedin.com'] {background-image: url('../images/icons/linkedin.png');} - .social a[href*='gitorious.org'] {background-image: url('../images/icons/gitorious.org');} + .social a[href*='gitorious.org'] {background-image: url('../images/icons/gitorious.png');} + .social a[href*='gittip.com'] {background-image: url('../images/icons/gittip.png');} /* About diff --git a/tests/output/custom/theme/css/pygment.css b/tests/output/custom/theme/css/pygment.css --- a/tests/output/custom/theme/css/pygment.css +++ b/tests/output/custom/theme/css/pygment.css @@ -1,5 +1,5 @@ .hll { -background-color:#FFFFCC; +background-color:#eee; } .c { color:#408090; diff --git a/tests/output/custom/theme/images/icons/gittip.png b/tests/output/custom/theme/images/icons/gittip.png new file mode 100644 Binary files /dev/null and b/tests/output/custom/theme/images/icons/gittip.png differ diff --git a/tests/output/custom/this-is-a-super-article.html b/tests/output/custom/this-is-a-super-article.html --- a/tests/output/custom/this-is-a-super-article.html +++ b/tests/output/custom/this-is-a-super-article.html @@ -81,8 +81,8 @@ <h1 class="entry-title"> <div class="section" id="this-is-a-simple-title"> <h2>This is a simple title</h2> <p>And here comes the cool <a class="reference external" href="http://books.couchdb.org/relax/design-documents/views">stuff</a>.</p> -<img alt="alternate text" src="pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> -<img alt="alternate text" src="pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="http://blog.notmyidea.org/static/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /> <pre class="literal-block"> &gt;&gt;&gt; from ipdb import set_trace &gt;&gt;&gt; set_trace() diff --git a/tests/test_contents.py b/tests/test_contents.py --- a/tests/test_contents.py +++ b/tests/test_contents.py @@ -5,7 +5,7 @@ from pelican.contents import Page, Article from pelican.settings import _DEFAULT_CONFIG from pelican.utils import truncate_html_words - +from pelican.signals import content_object_init from jinja2.utils import generate_lorem_ipsum # generate one paragraph, enclosed with <p> @@ -158,6 +158,17 @@ def _copy_page_kwargs(self): return page_kwargs + def test_signal(self): + """If a title is given, it should be used to generate the slug.""" + + def receiver_test_function(sender,instance): + pass + + content_object_init.connect(receiver_test_function ,sender=Page) + page = Page(**self.page_kwargs) + self.assertTrue(content_object_init.has_receivers_for(Page)) + + class TestArticle(TestPage): def test_template(self): """ diff --git a/tests/test_generators.py b/tests/test_generators.py --- a/tests/test_generators.py +++ b/tests/test_generators.py @@ -69,6 +69,7 @@ def test_generate_context(self): [u'Article title', 'published', 'Default', 'article'], [u'Article with template', 'published', 'Default', 'custom'], [u'Test md File', 'published', 'test', 'article'], + [u'Test Markdown extensions', 'published', u'Default', 'article'], [u'This is a super article !', 'published', 'Yeah', 'article'], [u'This is an article with category !', 'published', 'yeah', 'article'], [u'This is an article without category !', 'published', 'Default', 'article'], diff --git a/tests/test_pelican.py b/tests/test_pelican.py --- a/tests/test_pelican.py +++ b/tests/test_pelican.py @@ -55,7 +55,11 @@ def test_basic_generation_works(self): with patch("pelican.contents.getenv") as mock_getenv: # force getenv('USER') to always return the same value mock_getenv.return_value = "Dummy Author" - pelican = Pelican(path=INPUT_PATH, output_path=self.temp_path) + settings = read_settings(filename=None, override={ + 'PATH': INPUT_PATH, + 'OUTPUT_PATH': self.temp_path, + }) + pelican = Pelican(settings=settings) pelican.run() diff = dircmp( self.temp_path, os.sep.join((OUTPUT_PATH, "basic"))) @@ -63,8 +67,11 @@ def test_basic_generation_works(self): def test_custom_generation_works(self): # the same thing with a specified set of settings should work - pelican = Pelican(path=INPUT_PATH, output_path=self.temp_path, - settings=read_settings(SAMPLE_CONFIG)) + settings = read_settings(filename=SAMPLE_CONFIG, override={ + 'PATH': INPUT_PATH, + 'OUTPUT_PATH': self.temp_path, + }) + pelican = Pelican(settings=settings) pelican.run() diff = dircmp(self.temp_path, os.sep.join((OUTPUT_PATH, "custom"))) self.assertFilesEqual(diff) diff --git a/tests/test_readers.py b/tests/test_readers.py --- a/tests/test_readers.py +++ b/tests/test_readers.py @@ -70,7 +70,7 @@ def test_typogrify(self): class MdReaderTest(unittest.TestCase): @unittest.skipUnless(readers.Markdown, "markdown isn't installed") - def test_article_with_md_extention(self): + def test_article_with_md_extension(self): # test to ensure the md extension is being processed by the correct reader reader = readers.MarkdownReader({}) content, metadata = reader.read(_filename('article_with_md_extension.md')) @@ -90,3 +90,22 @@ def test_article_with_mkd_extension(self): "<p>This is another markdown test file. Uses the mkd extension.</p>" self.assertEqual(content, expected) + + @unittest.skipUnless(readers.Markdown, "markdown isn't installed") + def test_article_with_markdown_markup_extension(self): + # test to ensure the markdown markup extension is being processed as expected + reader = readers.MarkdownReader({}) + reader.settings.update(dict(MARKDOWN_EXTENSIONS=['toc', ])) + content, metadata = reader.read(_filename('article_with_markdown_markup_extensions.md')) + expected = '<div class="toc">\n'\ + '<ul>\n'\ + '<li><a href="#level1">Level1</a><ul>\n'\ + '<li><a href="#level2">Level2</a></li>\n'\ + '</ul>\n'\ + '</li>\n'\ + '</ul>\n'\ + '</div>\n'\ + '<h2 id="level1">Level1</h2>\n'\ + '<h3 id="level2">Level2</h3>' + + self.assertEqual(content, expected) diff --git a/tests/test_settings.py b/tests/test_settings.py --- a/tests/test_settings.py +++ b/tests/test_settings.py @@ -1,6 +1,7 @@ +import copy from os.path import dirname, abspath, join -from pelican.settings import read_settings, configure_settings, _DEFAULT_CONFIG +from pelican.settings import read_settings, configure_settings, _DEFAULT_CONFIG, DEFAULT_THEME from .support import unittest @@ -31,21 +32,43 @@ def test_dont_copy_small_keys(self): def test_read_empty_settings(self): """providing no file should return the default values.""" settings = read_settings(None) - self.assertDictEqual(settings, _DEFAULT_CONFIG) + expected = copy.deepcopy(_DEFAULT_CONFIG) + expected["FEED_DOMAIN"] = '' #This is added by configure settings + self.maxDiff = None + self.assertDictEqual(settings, expected) + + def test_settings_return_independent(self): + """Make sure that the results from one settings call doesn't + effect past or future instances.""" + self.PATH = abspath(dirname(__file__)) + default_conf = join(self.PATH, 'default_conf.py') + settings = read_settings(default_conf) + settings['SITEURL'] = 'new-value' + new_settings = read_settings(default_conf) + self.assertNotEqual(new_settings['SITEURL'], settings['SITEURL']) + + def test_defaults_not_overwritten(self): + """This assumes 'SITENAME': 'A Pelican Blog'""" + settings = read_settings(None) + settings['SITENAME'] = 'Not a Pelican Blog' + self.assertNotEqual(settings['SITENAME'], _DEFAULT_CONFIG['SITENAME']) def test_configure_settings(self): """Manipulations to settings should be applied correctly.""" - # SITEURL should not have a trailing slash - settings = {'SITEURL': 'http://blog.notmyidea.org/', 'LOCALE': ''} + settings = { + 'SITEURL': 'http://blog.notmyidea.org/', + 'LOCALE': '', + 'PATH': '.', + 'THEME': DEFAULT_THEME, + } configure_settings(settings) + # SITEURL should not have a trailing slash self.assertEqual(settings['SITEURL'], 'http://blog.notmyidea.org') # FEED_DOMAIN, if undefined, should default to SITEURL - settings = {'SITEURL': 'http://blog.notmyidea.org', 'LOCALE': ''} - configure_settings(settings) self.assertEqual(settings['FEED_DOMAIN'], 'http://blog.notmyidea.org') - settings = {'FEED_DOMAIN': 'http://feeds.example.com', 'LOCALE': ''} + settings['FEED_DOMAIN'] = 'http://feeds.example.com' configure_settings(settings) self.assertEqual(settings['FEED_DOMAIN'], 'http://feeds.example.com') diff --git a/tests/test_utils.py b/tests/test_utils.py --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -112,3 +112,16 @@ def test_clean_output_dir(self): self.assertTrue(os.path.isdir(test_directory)) self.assertListEqual([], os.listdir(test_directory)) shutil.rmtree(test_directory) + + def test_clean_output_dir_not_there(self): + test_directory = os.path.join(os.path.dirname(__file__), 'does_not_exist') + utils.clean_output_dir(test_directory) + self.assertTrue(not os.path.exists(test_directory)) + + def test_clean_output_dir_is_file(self): + test_directory = os.path.join(os.path.dirname(__file__), 'this_is_a_file') + f = open(test_directory, 'w') + f.write('') + f.close() + utils.clean_output_dir(test_directory) + self.assertTrue(not os.path.exists(test_directory))
Confusing ARTICLE_DIR docs From the settings.html docs page: > ARTICLE_DIR ('') Directory to look at for articles. This hung me up because I was trying to use `./content/posts/` as my ARTICLE_DIR. Turns out it just needed to be `posts` Change the default highlight theme With the default "notmyidea" theme, I think we need to change some things: - the default pygment theme isn't that readable. What I've done on my weblog (http://blog.notmyidea.org) seems better. See this post for instance: http://blog.notmyidea.org/refactoring-cornice.html - Also, I would like to do some review of the css and probably add some features to it. What do you think? Any idea? --dir-cat flag in pelican-import doesn't work (fix inside) I managed to get it working by changing the following https://github.com/getpelican/pelican/blob/master/pelican/tools/pelican_import.py#L219 to `if dircat and (len(categories) > 0):` https://github.com/getpelican/pelican/blob/master/pelican/tools/pelican_import.py#L220 to `catname = categories[0]` I would pull request but I'm a python noob and I don't want to break things :wink2:
2012-10-16T00:06:43Z
[]
[]
getpelican/pelican
837
getpelican__pelican-837
[ "829" ]
679d4aa801993c2ab8ac72d73f110fd364d84127
diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -66,7 +66,7 @@ 'YEAR_ARCHIVE_SAVE_AS': False, 'MONTH_ARCHIVE_SAVE_AS': False, 'DAY_ARCHIVE_SAVE_AS': False, - 'RELATIVE_URLS': True, + 'RELATIVE_URLS': False, 'DEFAULT_LANG': 'en', 'TAG_CLOUD_STEPS': 4, 'TAG_CLOUD_MAX_ITEMS': 100, diff --git a/pelican/writers.py b/pelican/writers.py --- a/pelican/writers.py +++ b/pelican/writers.py @@ -89,7 +89,7 @@ def write_feed(self, elements, context, path=None, feed_type='atom'): finally: locale.setlocale(locale.LC_ALL, old_locale) - def write_file(self, name, template, context, relative_urls=True, + def write_file(self, name, template, context, relative_urls=False, paginated=None, **kwargs): """Render the template and write the file. diff --git a/samples/pelican.conf.py b/samples/pelican.conf.py --- a/samples/pelican.conf.py +++ b/samples/pelican.conf.py @@ -6,6 +6,9 @@ SITEURL = 'http://blog.notmyidea.org' TIMEZONE = "Europe/Paris" +# can be useful in development, but set to False when you're ready to publish +RELATIVE_URLS = True + GITHUB_URL = 'http://github.com/ametaireau/' DISQUS_SITENAME = "blog-notmyidea" PDF_GENERATOR = False
diff --git a/pelican/tests/output/basic/a-markdown-powered-article.html b/pelican/tests/output/basic/a-markdown-powered-article.html --- a/pelican/tests/output/basic/a-markdown-powered-article.html +++ b/pelican/tests/output/basic/a-markdown-powered-article.html @@ -3,38 +3,38 @@ <head> <title>A markdown powered article</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="./theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie.css"/> - <script src="./js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="./">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="./override/">Override url/save_as</a></li> - <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="./category/bar.html">bar</a></li> - <li class="active"><a href="./category/cat1.html">cat1</a></li> - <li ><a href="./category/misc.html">misc</a></li> - <li ><a href="./category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li class="active"><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> <section id="content" class="body"> <article> <header> <h1 class="entry-title"> - <a href="./a-markdown-powered-article.html" rel="bookmark" + <a href="/a-markdown-powered-article.html" rel="bookmark" title="Permalink to A markdown powered article">A markdown powered article</a></h1> </header> @@ -44,11 +44,11 @@ <h1 class="entry-title"> Wed 20 April 2011 </abbr> - <p>In <a href="./category/cat1.html">cat1</a>. </p> + <p>In <a href="/category/cat1.html">cat1</a>. </p> </footer><!-- /.post-info --> <p>You're mutually oblivious.</p> -<p><a href="./unbelievable.html">a root-relative link to unbelievable</a> -<a href="./unbelievable.html">a file-relative link to unbelievable</a></p> +<p><a href="/unbelievable.html">a root-relative link to unbelievable</a> +<a href="/unbelievable.html">a file-relative link to unbelievable</a></p> </div><!-- /.entry-content --> </article> diff --git a/pelican/tests/output/basic/archives.html b/pelican/tests/output/basic/archives.html --- a/pelican/tests/output/basic/archives.html +++ b/pelican/tests/output/basic/archives.html @@ -3,31 +3,31 @@ <head> <title>A Pelican Blog</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="./theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie.css"/> - <script src="./js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="./">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="./override/">Override url/save_as</a></li> - <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="./category/bar.html">bar</a></li> - <li ><a href="./category/cat1.html">cat1</a></li> - <li ><a href="./category/misc.html">misc</a></li> - <li ><a href="./category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> <section id="content" class="body"> @@ -35,23 +35,23 @@ <h1>Archives for A Pelican Blog</h1> <dl> <dt>Fri 30 November 2012</dt> - <dd><a href="./filename_metadata-example.html">FILENAME_METADATA example</a></dd> + <dd><a href="/filename_metadata-example.html">FILENAME_METADATA example</a></dd> <dt>Wed 29 February 2012</dt> - <dd><a href="./second-article.html">Second article</a></dd> + <dd><a href="/second-article.html">Second article</a></dd> <dt>Wed 20 April 2011</dt> - <dd><a href="./a-markdown-powered-article.html">A markdown powered article</a></dd> + <dd><a href="/a-markdown-powered-article.html">A markdown powered article</a></dd> <dt>Thu 17 February 2011</dt> - <dd><a href="./article-1.html">Article 1</a></dd> + <dd><a href="/article-1.html">Article 1</a></dd> <dt>Thu 17 February 2011</dt> - <dd><a href="./article-2.html">Article 2</a></dd> + <dd><a href="/article-2.html">Article 2</a></dd> <dt>Thu 17 February 2011</dt> - <dd><a href="./article-3.html">Article 3</a></dd> + <dd><a href="/article-3.html">Article 3</a></dd> <dt>Thu 02 December 2010</dt> - <dd><a href="./this-is-a-super-article.html">This is a super article !</a></dd> + <dd><a href="/this-is-a-super-article.html">This is a super article !</a></dd> <dt>Wed 20 October 2010</dt> - <dd><a href="./oh-yeah.html">Oh yeah !</a></dd> + <dd><a href="/oh-yeah.html">Oh yeah !</a></dd> <dt>Fri 15 October 2010</dt> - <dd><a href="./unbelievable.html">Unbelievable !</a></dd> + <dd><a href="/unbelievable.html">Unbelievable !</a></dd> </dl> </section> <section id="extras" class="body"> diff --git a/pelican/tests/output/basic/article-1.html b/pelican/tests/output/basic/article-1.html --- a/pelican/tests/output/basic/article-1.html +++ b/pelican/tests/output/basic/article-1.html @@ -3,38 +3,38 @@ <head> <title>Article 1</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="./theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie.css"/> - <script src="./js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="./">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="./override/">Override url/save_as</a></li> - <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="./category/bar.html">bar</a></li> - <li class="active"><a href="./category/cat1.html">cat1</a></li> - <li ><a href="./category/misc.html">misc</a></li> - <li ><a href="./category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li class="active"><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> <section id="content" class="body"> <article> <header> <h1 class="entry-title"> - <a href="./article-1.html" rel="bookmark" + <a href="/article-1.html" rel="bookmark" title="Permalink to Article 1">Article 1</a></h1> </header> @@ -44,7 +44,7 @@ <h1 class="entry-title"> Thu 17 February 2011 </abbr> - <p>In <a href="./category/cat1.html">cat1</a>. </p> + <p>In <a href="/category/cat1.html">cat1</a>. </p> </footer><!-- /.post-info --> <p>Article 1</p> diff --git a/pelican/tests/output/basic/article-2.html b/pelican/tests/output/basic/article-2.html --- a/pelican/tests/output/basic/article-2.html +++ b/pelican/tests/output/basic/article-2.html @@ -3,38 +3,38 @@ <head> <title>Article 2</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="./theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie.css"/> - <script src="./js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="./">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="./override/">Override url/save_as</a></li> - <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="./category/bar.html">bar</a></li> - <li class="active"><a href="./category/cat1.html">cat1</a></li> - <li ><a href="./category/misc.html">misc</a></li> - <li ><a href="./category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li class="active"><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> <section id="content" class="body"> <article> <header> <h1 class="entry-title"> - <a href="./article-2.html" rel="bookmark" + <a href="/article-2.html" rel="bookmark" title="Permalink to Article 2">Article 2</a></h1> </header> @@ -44,7 +44,7 @@ <h1 class="entry-title"> Thu 17 February 2011 </abbr> - <p>In <a href="./category/cat1.html">cat1</a>. </p> + <p>In <a href="/category/cat1.html">cat1</a>. </p> </footer><!-- /.post-info --> <p>Article 2</p> diff --git a/pelican/tests/output/basic/article-3.html b/pelican/tests/output/basic/article-3.html --- a/pelican/tests/output/basic/article-3.html +++ b/pelican/tests/output/basic/article-3.html @@ -3,38 +3,38 @@ <head> <title>Article 3</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="./theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie.css"/> - <script src="./js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="./">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="./override/">Override url/save_as</a></li> - <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="./category/bar.html">bar</a></li> - <li class="active"><a href="./category/cat1.html">cat1</a></li> - <li ><a href="./category/misc.html">misc</a></li> - <li ><a href="./category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li class="active"><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> <section id="content" class="body"> <article> <header> <h1 class="entry-title"> - <a href="./article-3.html" rel="bookmark" + <a href="/article-3.html" rel="bookmark" title="Permalink to Article 3">Article 3</a></h1> </header> @@ -44,7 +44,7 @@ <h1 class="entry-title"> Thu 17 February 2011 </abbr> - <p>In <a href="./category/cat1.html">cat1</a>. </p> + <p>In <a href="/category/cat1.html">cat1</a>. </p> </footer><!-- /.post-info --> <p>Article 3</p> diff --git a/pelican/tests/output/basic/author/alexis-metaireau.html b/pelican/tests/output/basic/author/alexis-metaireau.html --- a/pelican/tests/output/basic/author/alexis-metaireau.html +++ b/pelican/tests/output/basic/author/alexis-metaireau.html @@ -3,31 +3,31 @@ <head> <title>A Pelican Blog - Alexis Métaireau</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="../category/bar.html">bar</a></li> - <li ><a href="../category/cat1.html">cat1</a></li> - <li ><a href="../category/misc.html">misc</a></li> - <li ><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> @@ -35,17 +35,17 @@ <h1><a href="../">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href="../oh-yeah.html">Oh yeah !</a></h1> + <h1 class="entry-title"><a href="/oh-yeah.html">Oh yeah !</a></h1> <footer class="post-info"> <abbr class="published" title="2010-10-20T10:14:00"> Wed 20 October 2010 </abbr> <address class="vcard author"> - By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="../category/bar.html">bar</a>. </p> -<p>tags: <a href="../tag/oh.html">oh</a><a href="../tag/bar.html">bar</a><a href="../tag/yeah.html">yeah</a></p> + <p>In <a href="/category/bar.html">bar</a>. </p> +<p>tags: <a href="/tag/oh.html">oh</a><a href="/tag/bar.html">bar</a><a href="/tag/yeah.html">yeah</a></p> </footer><!-- /.post-info --><div class="section" id="why-not"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! @@ -63,7 +63,7 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href="../this-is-a-super-article.html" rel="bookmark" + <h1><a href="/this-is-a-super-article.html" rel="bookmark" title="Permalink to This is a super article !">This is a super article !</a></h1> </header> @@ -74,14 +74,14 @@ <h1><a href="../this-is-a-super-article.html" rel="bookmark" </abbr> <address class="vcard author"> - By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="../category/yeah.html">yeah</a>. </p> -<p>tags: <a href="../tag/foo.html">foo</a><a href="../tag/bar.html">bar</a><a href="../tag/foobar.html">foobar</a></p> + <p>In <a href="/category/yeah.html">yeah</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/foobar.html">foobar</a></p> </footer><!-- /.post-info --> <p class="first last">Multi-line metadata should be supported as well as <strong>inline markup</strong>.</p> - <a class="readmore" href="../this-is-a-super-article.html">read more</a> + <a class="readmore" href="/this-is-a-super-article.html">read more</a> </div><!-- /.entry-content --> </article></li> </ol><!-- /#posts-list --> diff --git a/pelican/tests/output/basic/categories.html b/pelican/tests/output/basic/categories.html --- a/pelican/tests/output/basic/categories.html +++ b/pelican/tests/output/basic/categories.html @@ -3,38 +3,38 @@ <head> <title>A Pelican Blog</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="./theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie.css"/> - <script src="./js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="./">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="./override/">Override url/save_as</a></li> - <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="./category/bar.html">bar</a></li> - <li ><a href="./category/cat1.html">cat1</a></li> - <li ><a href="./category/misc.html">misc</a></li> - <li ><a href="./category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> <ul> - <li><a href="./category/bar.html">bar</a></li> - <li><a href="./category/cat1.html">cat1</a></li> - <li><a href="./category/misc.html">misc</a></li> - <li><a href="./category/yeah.html">yeah</a></li> + <li><a href="/category/bar.html">bar</a></li> + <li><a href="/category/cat1.html">cat1</a></li> + <li><a href="/category/misc.html">misc</a></li> + <li><a href="/category/yeah.html">yeah</a></li> </ul> <section id="extras" class="body"> <div class="social"> diff --git a/pelican/tests/output/basic/category/bar.html b/pelican/tests/output/basic/category/bar.html --- a/pelican/tests/output/basic/category/bar.html +++ b/pelican/tests/output/basic/category/bar.html @@ -3,31 +3,31 @@ <head> <title>A Pelican Blog - bar</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li class="active"><a href="../category/bar.html">bar</a></li> - <li ><a href="../category/cat1.html">cat1</a></li> - <li ><a href="../category/misc.html">misc</a></li> - <li ><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li class="active"><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> @@ -35,17 +35,17 @@ <h1><a href="../">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href="../oh-yeah.html">Oh yeah !</a></h1> + <h1 class="entry-title"><a href="/oh-yeah.html">Oh yeah !</a></h1> <footer class="post-info"> <abbr class="published" title="2010-10-20T10:14:00"> Wed 20 October 2010 </abbr> <address class="vcard author"> - By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="../category/bar.html">bar</a>. </p> -<p>tags: <a href="../tag/oh.html">oh</a><a href="../tag/bar.html">bar</a><a href="../tag/yeah.html">yeah</a></p> + <p>In <a href="/category/bar.html">bar</a>. </p> +<p>tags: <a href="/tag/oh.html">oh</a><a href="/tag/bar.html">bar</a><a href="/tag/yeah.html">yeah</a></p> </footer><!-- /.post-info --><div class="section" id="why-not"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! diff --git a/pelican/tests/output/basic/category/cat1.html b/pelican/tests/output/basic/category/cat1.html --- a/pelican/tests/output/basic/category/cat1.html +++ b/pelican/tests/output/basic/category/cat1.html @@ -3,31 +3,31 @@ <head> <title>A Pelican Blog - cat1</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="../category/bar.html">bar</a></li> - <li class="active"><a href="../category/cat1.html">cat1</a></li> - <li ><a href="../category/misc.html">misc</a></li> - <li ><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li class="active"><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> @@ -35,17 +35,17 @@ <h1><a href="../">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href="../a-markdown-powered-article.html">A markdown powered article</a></h1> + <h1 class="entry-title"><a href="/a-markdown-powered-article.html">A markdown powered article</a></h1> <footer class="post-info"> <abbr class="published" title="2011-04-20T00:00:00"> Wed 20 April 2011 </abbr> - <p>In <a href="../category/cat1.html">cat1</a>. </p> + <p>In <a href="/category/cat1.html">cat1</a>. </p> </footer><!-- /.post-info --><p>You're mutually oblivious.</p> -<p><a href="../unbelievable.html">a root-relative link to unbelievable</a> -<a href="../unbelievable.html">a file-relative link to unbelievable</a></p> </article> +<p><a href="/unbelievable.html">a root-relative link to unbelievable</a> +<a href="/unbelievable.html">a file-relative link to unbelievable</a></p> </article> </aside><!-- /#featured --> <section id="content" class="body"> <h1>Other articles</h1> @@ -56,7 +56,7 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href="../article-1.html" rel="bookmark" + <h1><a href="/article-1.html" rel="bookmark" title="Permalink to Article 1">Article 1</a></h1> </header> @@ -66,11 +66,11 @@ <h1><a href="../article-1.html" rel="bookmark" Thu 17 February 2011 </abbr> - <p>In <a href="../category/cat1.html">cat1</a>. </p> + <p>In <a href="/category/cat1.html">cat1</a>. </p> </footer><!-- /.post-info --> <p>Article 1</p> - <a class="readmore" href="../article-1.html">read more</a> + <a class="readmore" href="/article-1.html">read more</a> </div><!-- /.entry-content --> </article></li> @@ -78,7 +78,7 @@ <h1><a href="../article-1.html" rel="bookmark" <li><article class="hentry"> <header> - <h1><a href="../article-2.html" rel="bookmark" + <h1><a href="/article-2.html" rel="bookmark" title="Permalink to Article 2">Article 2</a></h1> </header> @@ -88,11 +88,11 @@ <h1><a href="../article-2.html" rel="bookmark" Thu 17 February 2011 </abbr> - <p>In <a href="../category/cat1.html">cat1</a>. </p> + <p>In <a href="/category/cat1.html">cat1</a>. </p> </footer><!-- /.post-info --> <p>Article 2</p> - <a class="readmore" href="../article-2.html">read more</a> + <a class="readmore" href="/article-2.html">read more</a> </div><!-- /.entry-content --> </article></li> @@ -100,7 +100,7 @@ <h1><a href="../article-2.html" rel="bookmark" <li><article class="hentry"> <header> - <h1><a href="../article-3.html" rel="bookmark" + <h1><a href="/article-3.html" rel="bookmark" title="Permalink to Article 3">Article 3</a></h1> </header> @@ -110,11 +110,11 @@ <h1><a href="../article-3.html" rel="bookmark" Thu 17 February 2011 </abbr> - <p>In <a href="../category/cat1.html">cat1</a>. </p> + <p>In <a href="/category/cat1.html">cat1</a>. </p> </footer><!-- /.post-info --> <p>Article 3</p> - <a class="readmore" href="../article-3.html">read more</a> + <a class="readmore" href="/article-3.html">read more</a> </div><!-- /.entry-content --> </article></li> </ol><!-- /#posts-list --> diff --git a/pelican/tests/output/basic/category/misc.html b/pelican/tests/output/basic/category/misc.html --- a/pelican/tests/output/basic/category/misc.html +++ b/pelican/tests/output/basic/category/misc.html @@ -3,31 +3,31 @@ <head> <title>A Pelican Blog - misc</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="../category/bar.html">bar</a></li> - <li ><a href="../category/cat1.html">cat1</a></li> - <li class="active"><a href="../category/misc.html">misc</a></li> - <li ><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li class="active"><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> @@ -35,13 +35,13 @@ <h1><a href="../">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href="../filename_metadata-example.html">FILENAME_METADATA example</a></h1> + <h1 class="entry-title"><a href="/filename_metadata-example.html">FILENAME_METADATA example</a></h1> <footer class="post-info"> <abbr class="published" title="2012-11-30T00:00:00"> Fri 30 November 2012 </abbr> - <p>In <a href="../category/misc.html">misc</a>. </p> + <p>In <a href="/category/misc.html">misc</a>. </p> </footer><!-- /.post-info --><p>Some cool stuff!</p> </article> @@ -55,7 +55,7 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href="../second-article.html" rel="bookmark" + <h1><a href="/second-article.html" rel="bookmark" title="Permalink to Second article">Second article</a></h1> </header> @@ -65,13 +65,13 @@ <h1><a href="../second-article.html" rel="bookmark" Wed 29 February 2012 </abbr> - <p>In <a href="../category/misc.html">misc</a>. </p> -<p>tags: <a href="../tag/foo.html">foo</a><a href="../tag/bar.html">bar</a><a href="../tag/baz.html">baz</a></p>Translations: - <a href="../second-article-fr.html">fr</a> + <p>In <a href="/category/misc.html">misc</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/baz.html">baz</a></p>Translations: + <a href="/second-article-fr.html">fr</a> </footer><!-- /.post-info --> <p>This is some article, in english</p> - <a class="readmore" href="../second-article.html">read more</a> + <a class="readmore" href="/second-article.html">read more</a> </div><!-- /.entry-content --> </article></li> @@ -79,7 +79,7 @@ <h1><a href="../second-article.html" rel="bookmark" <li><article class="hentry"> <header> - <h1><a href="../unbelievable.html" rel="bookmark" + <h1><a href="/unbelievable.html" rel="bookmark" title="Permalink to Unbelievable !">Unbelievable !</a></h1> </header> @@ -89,13 +89,13 @@ <h1><a href="../unbelievable.html" rel="bookmark" Fri 15 October 2010 </abbr> - <p>In <a href="../category/misc.html">misc</a>. </p> + <p>In <a href="/category/misc.html">misc</a>. </p> </footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> -<p><a class="reference external" href="../a-markdown-powered-article.html">a root-relative link to markdown-article</a> -<a class="reference external" href="../a-markdown-powered-article.html">a file-relative link to markdown-article</a></p> +<p><a class="reference external" href="/a-markdown-powered-article.html">a root-relative link to markdown-article</a> +<a class="reference external" href="/a-markdown-powered-article.html">a file-relative link to markdown-article</a></p> - <a class="readmore" href="../unbelievable.html">read more</a> + <a class="readmore" href="/unbelievable.html">read more</a> </div><!-- /.entry-content --> </article></li> </ol><!-- /#posts-list --> diff --git a/pelican/tests/output/basic/category/yeah.html b/pelican/tests/output/basic/category/yeah.html --- a/pelican/tests/output/basic/category/yeah.html +++ b/pelican/tests/output/basic/category/yeah.html @@ -3,31 +3,31 @@ <head> <title>A Pelican Blog - yeah</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="../category/bar.html">bar</a></li> - <li ><a href="../category/cat1.html">cat1</a></li> - <li ><a href="../category/misc.html">misc</a></li> - <li class="active"><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li class="active"><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> @@ -35,17 +35,17 @@ <h1><a href="../">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href="../this-is-a-super-article.html">This is a super article !</a></h1> + <h1 class="entry-title"><a href="/this-is-a-super-article.html">This is a super article !</a></h1> <footer class="post-info"> <abbr class="published" title="2010-12-02T10:14:00"> Thu 02 December 2010 </abbr> <address class="vcard author"> - By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="../category/yeah.html">yeah</a>. </p> -<p>tags: <a href="../tag/foo.html">foo</a><a href="../tag/bar.html">bar</a><a href="../tag/foobar.html">foobar</a></p> + <p>In <a href="/category/yeah.html">yeah</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/foobar.html">foobar</a></p> </footer><!-- /.post-info --><p>Some content here !</p> <div class="section" id="this-is-a-simple-title"> <h2>This is a simple title</h2> diff --git a/pelican/tests/output/basic/filename_metadata-example.html b/pelican/tests/output/basic/filename_metadata-example.html --- a/pelican/tests/output/basic/filename_metadata-example.html +++ b/pelican/tests/output/basic/filename_metadata-example.html @@ -3,38 +3,38 @@ <head> <title>FILENAME_METADATA example</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="./theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie.css"/> - <script src="./js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="./">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="./override/">Override url/save_as</a></li> - <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="./category/bar.html">bar</a></li> - <li ><a href="./category/cat1.html">cat1</a></li> - <li class="active"><a href="./category/misc.html">misc</a></li> - <li ><a href="./category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li class="active"><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> <section id="content" class="body"> <article> <header> <h1 class="entry-title"> - <a href="./filename_metadata-example.html" rel="bookmark" + <a href="/filename_metadata-example.html" rel="bookmark" title="Permalink to FILENAME_METADATA example">FILENAME_METADATA example</a></h1> </header> @@ -44,7 +44,7 @@ <h1 class="entry-title"> Fri 30 November 2012 </abbr> - <p>In <a href="./category/misc.html">misc</a>. </p> + <p>In <a href="/category/misc.html">misc</a>. </p> </footer><!-- /.post-info --> <p>Some cool stuff!</p> diff --git a/pelican/tests/output/basic/index.html b/pelican/tests/output/basic/index.html --- a/pelican/tests/output/basic/index.html +++ b/pelican/tests/output/basic/index.html @@ -3,31 +3,31 @@ <head> <title>A Pelican Blog</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="./theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie.css"/> - <script src="./js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="./">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="./override/">Override url/save_as</a></li> - <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="./category/bar.html">bar</a></li> - <li ><a href="./category/cat1.html">cat1</a></li> - <li ><a href="./category/misc.html">misc</a></li> - <li ><a href="./category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> @@ -35,13 +35,13 @@ <h1><a href="./">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href="./filename_metadata-example.html">FILENAME_METADATA example</a></h1> + <h1 class="entry-title"><a href="/filename_metadata-example.html">FILENAME_METADATA example</a></h1> <footer class="post-info"> <abbr class="published" title="2012-11-30T00:00:00"> Fri 30 November 2012 </abbr> - <p>In <a href="./category/misc.html">misc</a>. </p> + <p>In <a href="/category/misc.html">misc</a>. </p> </footer><!-- /.post-info --><p>Some cool stuff!</p> </article> @@ -55,7 +55,7 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href="./second-article.html" rel="bookmark" + <h1><a href="/second-article.html" rel="bookmark" title="Permalink to Second article">Second article</a></h1> </header> @@ -65,13 +65,13 @@ <h1><a href="./second-article.html" rel="bookmark" Wed 29 February 2012 </abbr> - <p>In <a href="./category/misc.html">misc</a>. </p> -<p>tags: <a href="./tag/foo.html">foo</a><a href="./tag/bar.html">bar</a><a href="./tag/baz.html">baz</a></p>Translations: - <a href="./second-article-fr.html">fr</a> + <p>In <a href="/category/misc.html">misc</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/baz.html">baz</a></p>Translations: + <a href="/second-article-fr.html">fr</a> </footer><!-- /.post-info --> <p>This is some article, in english</p> - <a class="readmore" href="./second-article.html">read more</a> + <a class="readmore" href="/second-article.html">read more</a> </div><!-- /.entry-content --> </article></li> @@ -79,7 +79,7 @@ <h1><a href="./second-article.html" rel="bookmark" <li><article class="hentry"> <header> - <h1><a href="./a-markdown-powered-article.html" rel="bookmark" + <h1><a href="/a-markdown-powered-article.html" rel="bookmark" title="Permalink to A markdown powered article">A markdown powered article</a></h1> </header> @@ -89,12 +89,12 @@ <h1><a href="./a-markdown-powered-article.html" rel="bookmark" Wed 20 April 2011 </abbr> - <p>In <a href="./category/cat1.html">cat1</a>. </p> + <p>In <a href="/category/cat1.html">cat1</a>. </p> </footer><!-- /.post-info --> <p>You're mutually oblivious.</p> -<p><a href="./unbelievable.html">a root-relative link to unbelievable</a> -<a href="./unbelievable.html">a file-relative link to unbelievable</a></p> - <a class="readmore" href="./a-markdown-powered-article.html">read more</a> +<p><a href="/unbelievable.html">a root-relative link to unbelievable</a> +<a href="/unbelievable.html">a file-relative link to unbelievable</a></p> + <a class="readmore" href="/a-markdown-powered-article.html">read more</a> </div><!-- /.entry-content --> </article></li> @@ -102,7 +102,7 @@ <h1><a href="./a-markdown-powered-article.html" rel="bookmark" <li><article class="hentry"> <header> - <h1><a href="./article-1.html" rel="bookmark" + <h1><a href="/article-1.html" rel="bookmark" title="Permalink to Article 1">Article 1</a></h1> </header> @@ -112,11 +112,11 @@ <h1><a href="./article-1.html" rel="bookmark" Thu 17 February 2011 </abbr> - <p>In <a href="./category/cat1.html">cat1</a>. </p> + <p>In <a href="/category/cat1.html">cat1</a>. </p> </footer><!-- /.post-info --> <p>Article 1</p> - <a class="readmore" href="./article-1.html">read more</a> + <a class="readmore" href="/article-1.html">read more</a> </div><!-- /.entry-content --> </article></li> @@ -124,7 +124,7 @@ <h1><a href="./article-1.html" rel="bookmark" <li><article class="hentry"> <header> - <h1><a href="./article-2.html" rel="bookmark" + <h1><a href="/article-2.html" rel="bookmark" title="Permalink to Article 2">Article 2</a></h1> </header> @@ -134,11 +134,11 @@ <h1><a href="./article-2.html" rel="bookmark" Thu 17 February 2011 </abbr> - <p>In <a href="./category/cat1.html">cat1</a>. </p> + <p>In <a href="/category/cat1.html">cat1</a>. </p> </footer><!-- /.post-info --> <p>Article 2</p> - <a class="readmore" href="./article-2.html">read more</a> + <a class="readmore" href="/article-2.html">read more</a> </div><!-- /.entry-content --> </article></li> @@ -146,7 +146,7 @@ <h1><a href="./article-2.html" rel="bookmark" <li><article class="hentry"> <header> - <h1><a href="./article-3.html" rel="bookmark" + <h1><a href="/article-3.html" rel="bookmark" title="Permalink to Article 3">Article 3</a></h1> </header> @@ -156,11 +156,11 @@ <h1><a href="./article-3.html" rel="bookmark" Thu 17 February 2011 </abbr> - <p>In <a href="./category/cat1.html">cat1</a>. </p> + <p>In <a href="/category/cat1.html">cat1</a>. </p> </footer><!-- /.post-info --> <p>Article 3</p> - <a class="readmore" href="./article-3.html">read more</a> + <a class="readmore" href="/article-3.html">read more</a> </div><!-- /.entry-content --> </article></li> @@ -168,7 +168,7 @@ <h1><a href="./article-3.html" rel="bookmark" <li><article class="hentry"> <header> - <h1><a href="./this-is-a-super-article.html" rel="bookmark" + <h1><a href="/this-is-a-super-article.html" rel="bookmark" title="Permalink to This is a super article !">This is a super article !</a></h1> </header> @@ -179,14 +179,14 @@ <h1><a href="./this-is-a-super-article.html" rel="bookmark" </abbr> <address class="vcard author"> - By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="./category/yeah.html">yeah</a>. </p> -<p>tags: <a href="./tag/foo.html">foo</a><a href="./tag/bar.html">bar</a><a href="./tag/foobar.html">foobar</a></p> + <p>In <a href="/category/yeah.html">yeah</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/foobar.html">foobar</a></p> </footer><!-- /.post-info --> <p class="first last">Multi-line metadata should be supported as well as <strong>inline markup</strong>.</p> - <a class="readmore" href="./this-is-a-super-article.html">read more</a> + <a class="readmore" href="/this-is-a-super-article.html">read more</a> </div><!-- /.entry-content --> </article></li> @@ -194,7 +194,7 @@ <h1><a href="./this-is-a-super-article.html" rel="bookmark" <li><article class="hentry"> <header> - <h1><a href="./oh-yeah.html" rel="bookmark" + <h1><a href="/oh-yeah.html" rel="bookmark" title="Permalink to Oh yeah !">Oh yeah !</a></h1> </header> @@ -205,10 +205,10 @@ <h1><a href="./oh-yeah.html" rel="bookmark" </abbr> <address class="vcard author"> - By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="./category/bar.html">bar</a>. </p> -<p>tags: <a href="./tag/oh.html">oh</a><a href="./tag/bar.html">bar</a><a href="./tag/yeah.html">yeah</a></p> + <p>In <a href="/category/bar.html">bar</a>. </p> +<p>tags: <a href="/tag/oh.html">oh</a><a href="/tag/bar.html">bar</a><a href="/tag/yeah.html">yeah</a></p> </footer><!-- /.post-info --> <div class="section" id="why-not"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! @@ -216,7 +216,7 @@ <h2>Why not ?</h2> <img alt="alternate text" src="|filename|/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> </div> - <a class="readmore" href="./oh-yeah.html">read more</a> + <a class="readmore" href="/oh-yeah.html">read more</a> </div><!-- /.entry-content --> </article></li> @@ -224,7 +224,7 @@ <h2>Why not ?</h2> <li><article class="hentry"> <header> - <h1><a href="./unbelievable.html" rel="bookmark" + <h1><a href="/unbelievable.html" rel="bookmark" title="Permalink to Unbelievable !">Unbelievable !</a></h1> </header> @@ -234,13 +234,13 @@ <h1><a href="./unbelievable.html" rel="bookmark" Fri 15 October 2010 </abbr> - <p>In <a href="./category/misc.html">misc</a>. </p> + <p>In <a href="/category/misc.html">misc</a>. </p> </footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> -<p><a class="reference external" href="./a-markdown-powered-article.html">a root-relative link to markdown-article</a> -<a class="reference external" href="./a-markdown-powered-article.html">a file-relative link to markdown-article</a></p> +<p><a class="reference external" href="/a-markdown-powered-article.html">a root-relative link to markdown-article</a> +<a class="reference external" href="/a-markdown-powered-article.html">a file-relative link to markdown-article</a></p> - <a class="readmore" href="./unbelievable.html">read more</a> + <a class="readmore" href="/unbelievable.html">read more</a> </div><!-- /.entry-content --> </article></li> </ol><!-- /#posts-list --> diff --git a/pelican/tests/output/basic/oh-yeah.html b/pelican/tests/output/basic/oh-yeah.html --- a/pelican/tests/output/basic/oh-yeah.html +++ b/pelican/tests/output/basic/oh-yeah.html @@ -3,38 +3,38 @@ <head> <title>Oh yeah !</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="./theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie.css"/> - <script src="./js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="./">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="./override/">Override url/save_as</a></li> - <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> - <li class="active"><a href="./category/bar.html">bar</a></li> - <li ><a href="./category/cat1.html">cat1</a></li> - <li ><a href="./category/misc.html">misc</a></li> - <li ><a href="./category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li class="active"><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> <section id="content" class="body"> <article> <header> <h1 class="entry-title"> - <a href="./oh-yeah.html" rel="bookmark" + <a href="/oh-yeah.html" rel="bookmark" title="Permalink to Oh yeah !">Oh yeah !</a></h1> </header> @@ -45,10 +45,10 @@ <h1 class="entry-title"> </abbr> <address class="vcard author"> - By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="./category/bar.html">bar</a>. </p> -<p>tags: <a href="./tag/oh.html">oh</a><a href="./tag/bar.html">bar</a><a href="./tag/yeah.html">yeah</a></p> + <p>In <a href="/category/bar.html">bar</a>. </p> +<p>tags: <a href="/tag/oh.html">oh</a><a href="/tag/bar.html">bar</a><a href="/tag/yeah.html">yeah</a></p> </footer><!-- /.post-info --> <div class="section" id="why-not"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! diff --git a/pelican/tests/output/basic/override/index.html b/pelican/tests/output/basic/override/index.html --- a/pelican/tests/output/basic/override/index.html +++ b/pelican/tests/output/basic/override/index.html @@ -3,31 +3,31 @@ <head> <title>Override url/save_as</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="../category/bar.html">bar</a></li> - <li ><a href="../category/cat1.html">cat1</a></li> - <li ><a href="../category/misc.html">misc</a></li> - <li ><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> diff --git a/pelican/tests/output/basic/pages/this-is-a-test-hidden-page.html b/pelican/tests/output/basic/pages/this-is-a-test-hidden-page.html --- a/pelican/tests/output/basic/pages/this-is-a-test-hidden-page.html +++ b/pelican/tests/output/basic/pages/this-is-a-test-hidden-page.html @@ -3,31 +3,31 @@ <head> <title>This is a test hidden page</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="../category/bar.html">bar</a></li> - <li ><a href="../category/cat1.html">cat1</a></li> - <li ><a href="../category/misc.html">misc</a></li> - <li ><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> diff --git a/pelican/tests/output/basic/pages/this-is-a-test-page.html b/pelican/tests/output/basic/pages/this-is-a-test-page.html --- a/pelican/tests/output/basic/pages/this-is-a-test-page.html +++ b/pelican/tests/output/basic/pages/this-is-a-test-page.html @@ -3,31 +3,31 @@ <head> <title>This is a test page</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="../category/bar.html">bar</a></li> - <li ><a href="../category/cat1.html">cat1</a></li> - <li ><a href="../category/misc.html">misc</a></li> - <li ><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> diff --git a/pelican/tests/output/basic/second-article-fr.html b/pelican/tests/output/basic/second-article-fr.html --- a/pelican/tests/output/basic/second-article-fr.html +++ b/pelican/tests/output/basic/second-article-fr.html @@ -3,38 +3,38 @@ <head> <title>Deuxième article</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="./theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie.css"/> - <script src="./js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="./">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="./override/">Override url/save_as</a></li> - <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="./category/bar.html">bar</a></li> - <li ><a href="./category/cat1.html">cat1</a></li> - <li class="active"><a href="./category/misc.html">misc</a></li> - <li ><a href="./category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li class="active"><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> <section id="content" class="body"> <article> <header> <h1 class="entry-title"> - <a href="./second-article-fr.html" rel="bookmark" + <a href="/second-article-fr.html" rel="bookmark" title="Permalink to Deuxième article">Deuxième article</a></h1> </header> @@ -44,9 +44,9 @@ <h1 class="entry-title"> Wed 29 February 2012 </abbr> - <p>In <a href="./category/misc.html">misc</a>. </p> -<p>tags: <a href="./tag/foo.html">foo</a><a href="./tag/bar.html">bar</a><a href="./tag/baz.html">baz</a></p>Translations: - <a href="./second-article.html">en</a> + <p>In <a href="/category/misc.html">misc</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/baz.html">baz</a></p>Translations: + <a href="/second-article.html">en</a> </footer><!-- /.post-info --> <p>Ceci est un article, en français.</p> diff --git a/pelican/tests/output/basic/second-article.html b/pelican/tests/output/basic/second-article.html --- a/pelican/tests/output/basic/second-article.html +++ b/pelican/tests/output/basic/second-article.html @@ -3,38 +3,38 @@ <head> <title>Second article</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="./theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie.css"/> - <script src="./js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="./">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="./override/">Override url/save_as</a></li> - <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="./category/bar.html">bar</a></li> - <li ><a href="./category/cat1.html">cat1</a></li> - <li class="active"><a href="./category/misc.html">misc</a></li> - <li ><a href="./category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li class="active"><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> <section id="content" class="body"> <article> <header> <h1 class="entry-title"> - <a href="./second-article.html" rel="bookmark" + <a href="/second-article.html" rel="bookmark" title="Permalink to Second article">Second article</a></h1> </header> @@ -44,9 +44,9 @@ <h1 class="entry-title"> Wed 29 February 2012 </abbr> - <p>In <a href="./category/misc.html">misc</a>. </p> -<p>tags: <a href="./tag/foo.html">foo</a><a href="./tag/bar.html">bar</a><a href="./tag/baz.html">baz</a></p>Translations: - <a href="./second-article-fr.html">fr</a> + <p>In <a href="/category/misc.html">misc</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/baz.html">baz</a></p>Translations: + <a href="/second-article-fr.html">fr</a> </footer><!-- /.post-info --> <p>This is some article, in english</p> diff --git a/pelican/tests/output/basic/tag/bar.html b/pelican/tests/output/basic/tag/bar.html --- a/pelican/tests/output/basic/tag/bar.html +++ b/pelican/tests/output/basic/tag/bar.html @@ -3,31 +3,31 @@ <head> <title>A Pelican Blog - bar</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="../category/bar.html">bar</a></li> - <li ><a href="../category/cat1.html">cat1</a></li> - <li ><a href="../category/misc.html">misc</a></li> - <li ><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> @@ -35,15 +35,15 @@ <h1><a href="../">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href="../second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href="/second-article.html">Second article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 </abbr> - <p>In <a href="../category/misc.html">misc</a>. </p> -<p>tags: <a href="../tag/foo.html">foo</a><a href="../tag/bar.html">bar</a><a href="../tag/baz.html">baz</a></p>Translations: - <a href="../second-article-fr.html">fr</a> + <p>In <a href="/category/misc.html">misc</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/baz.html">baz</a></p>Translations: + <a href="/second-article-fr.html">fr</a> </footer><!-- /.post-info --><p>This is some article, in english</p> </article> @@ -57,7 +57,7 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href="../this-is-a-super-article.html" rel="bookmark" + <h1><a href="/this-is-a-super-article.html" rel="bookmark" title="Permalink to This is a super article !">This is a super article !</a></h1> </header> @@ -68,14 +68,14 @@ <h1><a href="../this-is-a-super-article.html" rel="bookmark" </abbr> <address class="vcard author"> - By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="../category/yeah.html">yeah</a>. </p> -<p>tags: <a href="../tag/foo.html">foo</a><a href="../tag/bar.html">bar</a><a href="../tag/foobar.html">foobar</a></p> + <p>In <a href="/category/yeah.html">yeah</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/foobar.html">foobar</a></p> </footer><!-- /.post-info --> <p class="first last">Multi-line metadata should be supported as well as <strong>inline markup</strong>.</p> - <a class="readmore" href="../this-is-a-super-article.html">read more</a> + <a class="readmore" href="/this-is-a-super-article.html">read more</a> </div><!-- /.entry-content --> </article></li> @@ -83,7 +83,7 @@ <h1><a href="../this-is-a-super-article.html" rel="bookmark" <li><article class="hentry"> <header> - <h1><a href="../oh-yeah.html" rel="bookmark" + <h1><a href="/oh-yeah.html" rel="bookmark" title="Permalink to Oh yeah !">Oh yeah !</a></h1> </header> @@ -94,10 +94,10 @@ <h1><a href="../oh-yeah.html" rel="bookmark" </abbr> <address class="vcard author"> - By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="../category/bar.html">bar</a>. </p> -<p>tags: <a href="../tag/oh.html">oh</a><a href="../tag/bar.html">bar</a><a href="../tag/yeah.html">yeah</a></p> + <p>In <a href="/category/bar.html">bar</a>. </p> +<p>tags: <a href="/tag/oh.html">oh</a><a href="/tag/bar.html">bar</a><a href="/tag/yeah.html">yeah</a></p> </footer><!-- /.post-info --> <div class="section" id="why-not"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! @@ -105,7 +105,7 @@ <h2>Why not ?</h2> <img alt="alternate text" src="|filename|/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> </div> - <a class="readmore" href="../oh-yeah.html">read more</a> + <a class="readmore" href="/oh-yeah.html">read more</a> </div><!-- /.entry-content --> </article></li> </ol><!-- /#posts-list --> diff --git a/pelican/tests/output/basic/tag/baz.html b/pelican/tests/output/basic/tag/baz.html --- a/pelican/tests/output/basic/tag/baz.html +++ b/pelican/tests/output/basic/tag/baz.html @@ -3,31 +3,31 @@ <head> <title>A Pelican Blog - baz</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="../category/bar.html">bar</a></li> - <li ><a href="../category/cat1.html">cat1</a></li> - <li ><a href="../category/misc.html">misc</a></li> - <li ><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> @@ -35,15 +35,15 @@ <h1><a href="../">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href="../second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href="/second-article.html">Second article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 </abbr> - <p>In <a href="../category/misc.html">misc</a>. </p> -<p>tags: <a href="../tag/foo.html">foo</a><a href="../tag/bar.html">bar</a><a href="../tag/baz.html">baz</a></p>Translations: - <a href="../second-article-fr.html">fr</a> + <p>In <a href="/category/misc.html">misc</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/baz.html">baz</a></p>Translations: + <a href="/second-article-fr.html">fr</a> </footer><!-- /.post-info --><p>This is some article, in english</p> </article> diff --git a/pelican/tests/output/basic/tag/foo.html b/pelican/tests/output/basic/tag/foo.html --- a/pelican/tests/output/basic/tag/foo.html +++ b/pelican/tests/output/basic/tag/foo.html @@ -3,31 +3,31 @@ <head> <title>A Pelican Blog - foo</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="../category/bar.html">bar</a></li> - <li ><a href="../category/cat1.html">cat1</a></li> - <li ><a href="../category/misc.html">misc</a></li> - <li ><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> @@ -35,15 +35,15 @@ <h1><a href="../">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href="../second-article.html">Second article</a></h1> + <h1 class="entry-title"><a href="/second-article.html">Second article</a></h1> <footer class="post-info"> <abbr class="published" title="2012-02-29T00:00:00"> Wed 29 February 2012 </abbr> - <p>In <a href="../category/misc.html">misc</a>. </p> -<p>tags: <a href="../tag/foo.html">foo</a><a href="../tag/bar.html">bar</a><a href="../tag/baz.html">baz</a></p>Translations: - <a href="../second-article-fr.html">fr</a> + <p>In <a href="/category/misc.html">misc</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/baz.html">baz</a></p>Translations: + <a href="/second-article-fr.html">fr</a> </footer><!-- /.post-info --><p>This is some article, in english</p> </article> @@ -57,7 +57,7 @@ <h1>Other articles</h1> <li><article class="hentry"> <header> - <h1><a href="../this-is-a-super-article.html" rel="bookmark" + <h1><a href="/this-is-a-super-article.html" rel="bookmark" title="Permalink to This is a super article !">This is a super article !</a></h1> </header> @@ -68,14 +68,14 @@ <h1><a href="../this-is-a-super-article.html" rel="bookmark" </abbr> <address class="vcard author"> - By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="../category/yeah.html">yeah</a>. </p> -<p>tags: <a href="../tag/foo.html">foo</a><a href="../tag/bar.html">bar</a><a href="../tag/foobar.html">foobar</a></p> + <p>In <a href="/category/yeah.html">yeah</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/foobar.html">foobar</a></p> </footer><!-- /.post-info --> <p class="first last">Multi-line metadata should be supported as well as <strong>inline markup</strong>.</p> - <a class="readmore" href="../this-is-a-super-article.html">read more</a> + <a class="readmore" href="/this-is-a-super-article.html">read more</a> </div><!-- /.entry-content --> </article></li> </ol><!-- /#posts-list --> diff --git a/pelican/tests/output/basic/tag/foobar.html b/pelican/tests/output/basic/tag/foobar.html --- a/pelican/tests/output/basic/tag/foobar.html +++ b/pelican/tests/output/basic/tag/foobar.html @@ -3,31 +3,31 @@ <head> <title>A Pelican Blog - foobar</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="../category/bar.html">bar</a></li> - <li ><a href="../category/cat1.html">cat1</a></li> - <li ><a href="../category/misc.html">misc</a></li> - <li ><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> @@ -35,17 +35,17 @@ <h1><a href="../">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href="../this-is-a-super-article.html">This is a super article !</a></h1> + <h1 class="entry-title"><a href="/this-is-a-super-article.html">This is a super article !</a></h1> <footer class="post-info"> <abbr class="published" title="2010-12-02T10:14:00"> Thu 02 December 2010 </abbr> <address class="vcard author"> - By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="../category/yeah.html">yeah</a>. </p> -<p>tags: <a href="../tag/foo.html">foo</a><a href="../tag/bar.html">bar</a><a href="../tag/foobar.html">foobar</a></p> + <p>In <a href="/category/yeah.html">yeah</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/foobar.html">foobar</a></p> </footer><!-- /.post-info --><p>Some content here !</p> <div class="section" id="this-is-a-simple-title"> <h2>This is a simple title</h2> diff --git a/pelican/tests/output/basic/tag/oh.html b/pelican/tests/output/basic/tag/oh.html --- a/pelican/tests/output/basic/tag/oh.html +++ b/pelican/tests/output/basic/tag/oh.html @@ -3,31 +3,31 @@ <head> <title>A Pelican Blog - oh</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="../category/bar.html">bar</a></li> - <li ><a href="../category/cat1.html">cat1</a></li> - <li ><a href="../category/misc.html">misc</a></li> - <li ><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> @@ -35,17 +35,17 @@ <h1><a href="../">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href="../oh-yeah.html">Oh yeah !</a></h1> + <h1 class="entry-title"><a href="/oh-yeah.html">Oh yeah !</a></h1> <footer class="post-info"> <abbr class="published" title="2010-10-20T10:14:00"> Wed 20 October 2010 </abbr> <address class="vcard author"> - By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="../category/bar.html">bar</a>. </p> -<p>tags: <a href="../tag/oh.html">oh</a><a href="../tag/bar.html">bar</a><a href="../tag/yeah.html">yeah</a></p> + <p>In <a href="/category/bar.html">bar</a>. </p> +<p>tags: <a href="/tag/oh.html">oh</a><a href="/tag/bar.html">bar</a><a href="/tag/yeah.html">yeah</a></p> </footer><!-- /.post-info --><div class="section" id="why-not"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! diff --git a/pelican/tests/output/basic/tag/yeah.html b/pelican/tests/output/basic/tag/yeah.html --- a/pelican/tests/output/basic/tag/yeah.html +++ b/pelican/tests/output/basic/tag/yeah.html @@ -3,31 +3,31 @@ <head> <title>A Pelican Blog - yeah</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="../theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie.css"/> - <script src="../js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="../css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="../">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="../override/">Override url/save_as</a></li> - <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="../category/bar.html">bar</a></li> - <li ><a href="../category/cat1.html">cat1</a></li> - <li ><a href="../category/misc.html">misc</a></li> - <li ><a href="../category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> @@ -35,17 +35,17 @@ <h1><a href="../">A Pelican Blog </a></h1> <aside id="featured" class="body"> <article> - <h1 class="entry-title"><a href="../oh-yeah.html">Oh yeah !</a></h1> + <h1 class="entry-title"><a href="/oh-yeah.html">Oh yeah !</a></h1> <footer class="post-info"> <abbr class="published" title="2010-10-20T10:14:00"> Wed 20 October 2010 </abbr> <address class="vcard author"> - By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="../category/bar.html">bar</a>. </p> -<p>tags: <a href="../tag/oh.html">oh</a><a href="../tag/bar.html">bar</a><a href="../tag/yeah.html">yeah</a></p> + <p>In <a href="/category/bar.html">bar</a>. </p> +<p>tags: <a href="/tag/oh.html">oh</a><a href="/tag/bar.html">bar</a><a href="/tag/yeah.html">yeah</a></p> </footer><!-- /.post-info --><div class="section" id="why-not"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! diff --git a/pelican/tests/output/basic/this-is-a-super-article.html b/pelican/tests/output/basic/this-is-a-super-article.html --- a/pelican/tests/output/basic/this-is-a-super-article.html +++ b/pelican/tests/output/basic/this-is-a-super-article.html @@ -3,38 +3,38 @@ <head> <title>This is a super article !</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="./theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie.css"/> - <script src="./js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="./">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="./override/">Override url/save_as</a></li> - <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="./category/bar.html">bar</a></li> - <li ><a href="./category/cat1.html">cat1</a></li> - <li ><a href="./category/misc.html">misc</a></li> - <li class="active"><a href="./category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li ><a href="/category/misc.html">misc</a></li> + <li class="active"><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> <section id="content" class="body"> <article> <header> <h1 class="entry-title"> - <a href="./this-is-a-super-article.html" rel="bookmark" + <a href="/this-is-a-super-article.html" rel="bookmark" title="Permalink to This is a super article !">This is a super article !</a></h1> </header> @@ -45,10 +45,10 @@ <h1 class="entry-title"> </abbr> <address class="vcard author"> - By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> - <p>In <a href="./category/yeah.html">yeah</a>. </p> -<p>tags: <a href="./tag/foo.html">foo</a><a href="./tag/bar.html">bar</a><a href="./tag/foobar.html">foobar</a></p> + <p>In <a href="/category/yeah.html">yeah</a>. </p> +<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/foobar.html">foobar</a></p> </footer><!-- /.post-info --> <p>Some content here !</p> <div class="section" id="this-is-a-simple-title"> <h2>This is a simple title</h2> diff --git a/pelican/tests/output/basic/unbelievable.html b/pelican/tests/output/basic/unbelievable.html --- a/pelican/tests/output/basic/unbelievable.html +++ b/pelican/tests/output/basic/unbelievable.html @@ -3,38 +3,38 @@ <head> <title>Unbelievable !</title> <meta charset="utf-8" /> - <link rel="stylesheet" href="./theme/css/main.css" type="text/css" /> + <link rel="stylesheet" href="/theme/css/main.css" type="text/css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> <!--[if IE]> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <!--[if lte IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie.css"/> - <script src="./js/IE8.js" type="text/javascript"></script><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie.css"/> + <script src="/js/IE8.js" type="text/javascript"></script><![endif]--> <!--[if lt IE 7]> - <link rel="stylesheet" type="text/css" media="all" href="./css/ie6.css"/><![endif]--> + <link rel="stylesheet" type="text/css" media="all" href="/css/ie6.css"/><![endif]--> </head> <body id="index" class="home"> <header id="banner" class="body"> - <h1><a href="./">A Pelican Blog </a></h1> + <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> - <li><a href="./override/">Override url/save_as</a></li> - <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> - <li ><a href="./category/bar.html">bar</a></li> - <li ><a href="./category/cat1.html">cat1</a></li> - <li class="active"><a href="./category/misc.html">misc</a></li> - <li ><a href="./category/yeah.html">yeah</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li ><a href="/category/bar.html">bar</a></li> + <li ><a href="/category/cat1.html">cat1</a></li> + <li class="active"><a href="/category/misc.html">misc</a></li> + <li ><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> <section id="content" class="body"> <article> <header> <h1 class="entry-title"> - <a href="./unbelievable.html" rel="bookmark" + <a href="/unbelievable.html" rel="bookmark" title="Permalink to Unbelievable !">Unbelievable !</a></h1> </header> @@ -44,11 +44,11 @@ <h1 class="entry-title"> Fri 15 October 2010 </abbr> - <p>In <a href="./category/misc.html">misc</a>. </p> + <p>In <a href="/category/misc.html">misc</a>. </p> </footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> -<p><a class="reference external" href="./a-markdown-powered-article.html">a root-relative link to markdown-article</a> -<a class="reference external" href="./a-markdown-powered-article.html">a file-relative link to markdown-article</a></p> +<p><a class="reference external" href="/a-markdown-powered-article.html">a root-relative link to markdown-article</a> +<a class="reference external" href="/a-markdown-powered-article.html">a file-relative link to markdown-article</a></p> </div><!-- /.entry-content --> diff --git a/pelican/tests/test_pelican.py b/pelican/tests/test_pelican.py --- a/pelican/tests/test_pelican.py +++ b/pelican/tests/test_pelican.py @@ -77,7 +77,7 @@ def test_basic_generation_works(self): dcmp = dircmp(self.temp_path, os.path.join(OUTPUT_PATH, 'basic')) self.assertFilesEqual(recursiveDiff(dcmp)) self.assertLogCountEqual( - count=10, + count=4, msg="Unable to find.*skipping url replacement", level=logging.WARNING) diff --git a/pelican/tests/test_webassets.py b/pelican/tests/test_webassets.py --- a/pelican/tests/test_webassets.py +++ b/pelican/tests/test_webassets.py @@ -56,6 +56,9 @@ def check_link_tag(self, css_file, html_file): class TestWebAssetsRelativeURLS(TestWebAssets): """Test pelican with relative urls.""" + def setUp(self): + TestWebAssets.setUp(self, override={'RELATIVE_URLS': True}) + def test_jinja2_ext(self): # Test that the Jinja2 extension was correctly added. @@ -90,8 +93,7 @@ class TestWebAssetsAbsoluteURLS(TestWebAssets): """Test pelican with absolute urls.""" def setUp(self): - TestWebAssets.setUp(self, override={'RELATIVE_URLS': False, - 'SITEURL': 'http://localhost'}) + TestWebAssets.setUp(self, override={'SITEURL': 'http://localhost'}) def test_absolute_url(self): # Look in the output files for the link tag with absolute url.
Change default setting for RELATIVE_URLS to False The default value for RELATIVE_URLS is currently set to `True`, which is the source of a considerable amount of confusion and unexpected side effects. This issue tracks modification of this setting to `False` by default, while ensuring that tests pass and that this change does not cause any unanticipated negative behavior.
I was affected by this, as I expected `SITEURL` to work in production. Upon inspection and 1.5 hours of lost time, I found that setting `RELATIVE_URLS=False` to fix this. Support action to make `False` the default value for `RELATIVE_URLS` for the reason of least surprise.
2013-04-11T21:01:56Z
[]
[]
getpelican/pelican
845
getpelican__pelican-845
[ "704" ]
34f05d4ba649d16bf0e3d94b160838635dd48dc1
diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py --- a/pelican/urlwrappers.py +++ b/pelican/urlwrappers.py @@ -13,21 +13,35 @@ @functools.total_ordering class URLWrapper(object): def __init__(self, name, settings): + # next 2 lines are redundant with the setter of the name property + # but are here for clarity + self._name = name + self.slug = slugify(name) self.name = name - self.slug = slugify(self.name) self.settings = settings + @property + def name(self): + return self._name + + @name.setter + def name(self, name): + self._name = name + self.slug = slugify(name) + def as_dict(self): - return self.__dict__ + d = self.__dict__ + d['name'] = self.name + return d def __hash__(self): - return hash(self.name) + return hash(self.slug) def _key(self): - return self.name + return self.slug def _normalize_key(self, key): - return six.text_type(key) + return six.text_type(slugify(key)) def __eq__(self, other): return self._key() == self._normalize_key(other)
diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py --- a/pelican/tests/test_generators.py +++ b/pelican/tests/test_generators.py @@ -89,17 +89,29 @@ def test_generate_context(self): ['This is an article without category !', 'published', 'TestCategory', 'article'], ['This is a super article !', 'published', 'yeah', 'article'], - ['マックOS X 10.8でパイソンとVirtualenvをインストールと設定', 'published', '指導書', 'article'] + ['マックOS X 10.8でパイソンとVirtualenvをインストールと設定', + 'published', '指導書', 'article'] ] self.assertEqual(sorted(articles_expected), sorted(articles)) def test_generate_categories(self): generator = self.get_populated_generator() + # test for name + # categories are grouped by slug; if two categories have the same slug + # but different names they will be grouped together, the first one in + # terms of process order will define the name for that category categories = [cat.name for cat, _ in generator.categories] - categories_expected = ['Default', 'TestCategory', 'Yeah', 'test', - 'yeah', '指導書'] - self.assertEqual(categories, categories_expected) + categories_alternatives = ( + sorted(['Default', 'TestCategory', 'Yeah', 'test', '指導書']), + sorted(['Default', 'TestCategory', 'yeah', 'test', '指導書']), + ) + self.assertTrue(sorted(categories) in categories_alternatives) + # test for slug + categories = [cat.slug for cat, _ in generator.categories] + categories_expected = ['default', 'testcategory', 'yeah', 'test', + 'zhi-dao-shu'] + self.assertEqual(sorted(categories), sorted(categories_expected)) def test_do_not_use_folder_as_category(self): @@ -113,9 +125,20 @@ def test_do_not_use_folder_as_category(self): CUR_DIR, _DEFAULT_CONFIG['THEME'], None, _DEFAULT_CONFIG['MARKUP']) generator.generate_context() - + # test for name + # categories are grouped by slug; if two categories have the same slug + # but different names they will be grouped together, the first one in + # terms of process order will define the name for that category categories = [cat.name for cat, _ in generator.categories] - self.assertEqual(categories, ['Default', 'Yeah', 'test', 'yeah', '指導書']) + categories_alternatives = ( + sorted(['Default', 'Yeah', 'test', '指導書']), + sorted(['Default', 'yeah', 'test', '指導書']), + ) + self.assertTrue(sorted(categories) in categories_alternatives) + # test for slug + categories = [cat.slug for cat, _ in generator.categories] + categories_expected = ['default', 'yeah', 'test', 'zhi-dao-shu'] + self.assertEqual(sorted(categories), sorted(categories_expected)) def test_direct_templates_save_as_default(self):
Conflicts rendering Category pages when category is not defined in consistent case I was testing a jinja macro that dealt with creating links for categories. I noted that if you define a category in one article as `Category: Something` and in another article as `Category: something` that these are treated as separate categories, however, when your category page is rendered, there is only the lowecase url, e.g. `category/something.html`. This will only associate with the articles with meta data defined as `Category: something` and not anywhere where it is defined with uppercase since there is no `category/Something.html`. I am not sure if making this case insensitive would break code. Certainly, it would be unclear when printing the category name which case to use. From an intelligent template process, you would set you case using CSS style attribute to be sure it was the way you want, and it could always render categories in lower case. Otherwise, it might just be sufficient to put this into the documentation. I always tend to capitalize by categories, but some people might not notice and wonder why some articles are missing. I have not yet tested this, but I would imagine the same issue exists for tags.
To fix this the `categories` `dict` in `generators.py` needs to be keyed by the category slug rather than the category name. This does present another problem though of which category name to show on the category page (Something vs. something). I would believe the default should be that all names are lowecase, you can then apply a uppercase function on the output or use CSS styling if you want to ensure proper case display. On Sun, Mar 24, 2013 at 08:17:25PM -0700, Eric wrote: > I would believe the default should be that all names are lowecase, > you can then apply a uppercase function on the output or use CSS > styling if you want to ensure proper case display. It is conceivable that you'd want some of your categories in Title Case, and some in UPPERCASE (e.g. acronyms). That would be a hard distinction to make in CSS. To me, it seems like a better idea to preserve case keying and display. If you have the same category name with two capitalizations, you get two different categories (with two category feeds, etc.). If you don't like that, pick one capitalization and standardize your metadata ;). @wking: With case-insensitive filesystems, such as HFS+ on Mac OS X, you won't get two different category feeds if you have two categories that only differ in terms of case. That said, I tend to agree with what I presume to be your conclusion: it's easier to standardize your metadata than to solve this thorny problem at a technical level. That said, folks are welcome to pursue something akin to what @rupert suggested, or submit a pull request to clarify the current behavior in the docs. Perhaps a warning when two categories / tags have the same URL?
2013-04-14T17:24:21Z
[]
[]
getpelican/pelican
867
getpelican__pelican-867
[ "708" ]
0397274fed63c03512b558b88120a7103b6ddd56
diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -231,10 +231,11 @@ def _get_summary(self): if hasattr(self, '_summary'): return self._summary - if self.settings['SUMMARY_MAX_LENGTH']: - return truncate_html_words(self.content, - self.settings['SUMMARY_MAX_LENGTH']) - return self.content + if self.settings['SUMMARY_MAX_LENGTH'] is None: + return self.content + + return truncate_html_words(self.content, + self.settings['SUMMARY_MAX_LENGTH']) def _set_summary(self, summary): """Dummy function"""
diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -71,6 +71,9 @@ def test_summary_max_length(self): settings['SUMMARY_MAX_LENGTH'] = 10 page = Page(**page_kwargs) self.assertEqual(page.summary, truncate_html_words(TEST_CONTENT, 10)) + settings['SUMMARY_MAX_LENGTH'] = 0 + page = Page(**page_kwargs) + self.assertEqual(page.summary, '') def test_slug(self): # If a title is given, it should be used to generate the slug.
Setting default summary length = 0 I want to disable the default article summary, and only show one if defined in the article metadata. I assumed this would be done with: ``` SUMMARY_MAX_LENGTH = 0 ``` But this has the same effect as `SUMMARY_MAX_LENGTH = None`, meaning all content is shown. It does work however by setting a negative value: ``` SUMMARY_MAX_LENGTH = -1 ``` I don't know if this is a bug or a feature. If it's a feature, it's probably good to document it. If it's a bug, setting to `0` would in my opinion the most logical way to disable default summaries.
Yeah it would probably make more sense if the functionality of `SUMMARY_MAX_LENGTH = 0` and `SUMMARY_MAX_LENGTH = -1` were swapped. So `SUMMARY_MAX_LENGTH = 0` would disable implicit summaries and `SUMMARY_MAX_LENGTH = -1` would use all content for implicit summaries. Thanks for your reply. In your proposal, what would `SUMMARY_MAX_LENGTH = None` mean? Would you drop support for this? I would suggest: ``` SUMMARY_MAX_LENGTH = 30 # Unless summary is specified in content, set summary to 30 words. SUMMARY_MAX_LENGHT = 0 # Unless summary is specified in content, set summary to 0 words (= no default summary) SUMMARY_MAX_LENGTH = None # Unless summary is specified in content, set no limit on summary (= use content as summary) ``` If that's not possible for technical reasons (e.g. if `None` means the same as `0`), we could indeed use `SUMMARY_MAX_LENGTH = -1` instead of `=None`.
2013-04-26T23:40:08Z
[]
[]
getpelican/pelican
932
getpelican__pelican-932
[ "918" ]
8f295f7a037e0d512181946b9b87636f4a853e26
diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -5,6 +5,7 @@ import logging import os import re +import logging try: import docutils import docutils.core @@ -47,6 +48,8 @@ 'author': Author, } +logger = logging.getLogger(__name__) + class Reader(object): enabled = True @@ -199,7 +202,7 @@ class HTMLReader(Reader): enabled = True class _HTMLParser(HTMLParser): - def __init__(self, settings): + def __init__(self, settings, filename): HTMLParser.__init__(self) self.body = '' self.metadata = {} @@ -207,6 +210,8 @@ def __init__(self, settings): self._data_buffer = '' + self._filename = filename + self._in_top_level = True self._in_head = False self._in_title = False @@ -275,7 +280,11 @@ def build_tag(self, tag, attrs, close_tag): def _handle_meta_tag(self, attrs): name = self._attr_value(attrs, 'name').lower() - contents = self._attr_value(attrs, 'contents', '') + contents = self._attr_value(attrs, 'content', '') + if not contents: + contents = self._attr_value(attrs, 'contents', '') + if contents: + logger.warning("Meta tag attribute 'contents' used in file %s, should be changed to 'content'", self._filename) if name == 'keywords': name = 'tags' @@ -288,7 +297,7 @@ def _attr_value(cls, attrs, name, default=None): def read(self, filename): """Parse content and metadata of HTML files""" with pelican_open(filename) as content: - parser = self._HTMLParser(self.settings) + parser = self._HTMLParser(self.settings, filename) parser.feed(content) parser.close()
diff --git a/pelican/tests/content/article_with_keywords.html b/pelican/tests/content/article_with_keywords.html --- a/pelican/tests/content/article_with_keywords.html +++ b/pelican/tests/content/article_with_keywords.html @@ -1,6 +1,6 @@ <html> <head> <title>This is a super article !</title> - <meta name="keywords" contents="foo, bar, foobar" /> + <meta name="keywords" content="foo, bar, foobar" /> </head> </html> diff --git a/pelican/tests/content/article_with_metadata.html b/pelican/tests/content/article_with_metadata.html --- a/pelican/tests/content/article_with_metadata.html +++ b/pelican/tests/content/article_with_metadata.html @@ -1,12 +1,12 @@ <html> <head> <title>This is a super article !</title> - <meta name="tags" contents="foo, bar, foobar" /> - <meta name="date" contents="2010-12-02 10:14" /> - <meta name="category" contents="yeah" /> - <meta name="author" contents="Alexis Métaireau" /> - <meta name="summary" contents="Summary and stuff" /> - <meta name="custom_field" contents="http://notmyidea.org" /> + <meta name="tags" content="foo, bar, foobar" /> + <meta name="date" content="2010-12-02 10:14" /> + <meta name="category" content="yeah" /> + <meta name="author" content="Alexis Métaireau" /> + <meta name="summary" content="Summary and stuff" /> + <meta name="custom_field" content="http://notmyidea.org" /> </head> <body> Multi-line metadata should be supported diff --git a/pelican/tests/content/article_with_metadata_and_contents.html b/pelican/tests/content/article_with_metadata_and_contents.html new file mode 100644 --- /dev/null +++ b/pelican/tests/content/article_with_metadata_and_contents.html @@ -0,0 +1,15 @@ +<html> + <head> + <title>This is a super article !</title> + <meta name="tags" contents="foo, bar, foobar" /> + <meta name="date" contents="2010-12-02 10:14" /> + <meta name="category" contents="yeah" /> + <meta name="author" contents="Alexis Métaireau" /> + <meta name="summary" contents="Summary and stuff" /> + <meta name="custom_field" contents="http://notmyidea.org" /> + </head> + <body> + Multi-line metadata should be supported + as well as <strong>inline markup</strong>. + </body> +</html> diff --git a/pelican/tests/content/article_with_uppercase_metadata.html b/pelican/tests/content/article_with_uppercase_metadata.html --- a/pelican/tests/content/article_with_uppercase_metadata.html +++ b/pelican/tests/content/article_with_uppercase_metadata.html @@ -1,6 +1,6 @@ <html> <head> <title>This is a super article !</title> - <meta name="Category" contents="Yeah" /> + <meta name="Category" content="Yeah" /> </head> </html> diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -350,6 +350,21 @@ def test_article_with_metadata(self): for key, value in expected.items(): self.assertEqual(value, page.metadata[key], key) + def test_article_with_metadata_and_contents_attrib(self): + page = self.read_file(path='article_with_metadata_and_contents.html') + expected = { + 'category': 'yeah', + 'author': 'Alexis Métaireau', + 'title': 'This is a super article !', + 'summary': 'Summary and stuff', + 'date': datetime.datetime(2010, 12, 2, 10, 14), + 'tags': ['foo', 'bar', 'foobar'], + 'custom_field': 'http://notmyidea.org', + } + for key, value in expected.items(): + self.assertEqual(value, page.metadata[key], key) + + def test_article_with_null_attributes(self): page = self.read_file(path='article_with_null_attributes.html')
HTML reader uses incorrect "contents" attribute for meta tag, should be "content" The HTML reader tries to extract values from a meta tag using the "contents" attribute. However, meta tags have no "contents" attribute (see http://www.w3.org/TR/html401/struct/global.html#h-7.4.4.2), it should be "content".
Hi Kyle. Thanks for the issue report. Might you be willing to implement a fix for this?
2013-06-14T19:16:11Z
[]
[]
getpelican/pelican
943
getpelican__pelican-943
[ "938" ]
a14dc4dad20f7a918ad7391af5950055204af904
diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -168,7 +168,8 @@ def generate_output(self, writer): try: template = self.env.get_template(source) rurls = self.settings['RELATIVE_URLS'] - writer.write_file(dest, template, self.context, rurls) + writer.write_file(dest, template, self.context, rurls, + override_output=True) finally: del self.env.loader.loaders[0] @@ -262,7 +263,8 @@ def generate_articles(self, write): """Generate the articles.""" for article in chain(self.translations, self.articles): write(article.save_as, self.get_template(article.template), - self.context, article=article, category=article.category) + self.context, article=article, category=article.category, + override_output=hasattr(article, 'override_save_as')) def generate_period_archives(self, write): """Generate per-year, per-month, and per-day archives.""" @@ -533,7 +535,8 @@ def generate_output(self, writer): self.hidden_translations, self.hidden_pages): writer.write_file(page.save_as, self.get_template(page.template), self.context, page=page, - relative_urls=self.settings['RELATIVE_URLS']) + relative_urls=self.settings['RELATIVE_URLS'], + override_output=hasattr(page, 'override_save_as')) class StaticGenerator(Generator): diff --git a/pelican/writers.py b/pelican/writers.py --- a/pelican/writers.py +++ b/pelican/writers.py @@ -24,6 +24,7 @@ def __init__(self, output_path, settings=None): self.reminder = dict() self.settings = settings or {} self._written_files = set() + self._overridden_files = set() def _create_new_feed(self, feed_type, context): feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed @@ -49,13 +50,26 @@ def _add_item_to_the_feed(self, feed, item): pubdate=set_date_tzinfo(item.date, self.settings.get('TIMEZONE', None))) - def _open_w(self, filename, encoding): + def _open_w(self, filename, encoding, override=False): """Open a file to write some content to it. - Exit if we have already written to that file. + Exit if we have already written to that file, unless one (and no more + than one) of the writes has the override parameter set to True. """ - if filename in self._written_files: - raise IOError('File %s is to be overwritten' % filename) + if filename in self._overridden_files: + if override: + raise StandardError('File %s is set to be overridden twice' + % filename) + else: + logger.info('skipping %s' % filename) + filename = os.devnull + elif filename in self._written_files: + if override: + logger.info('overwriting %s' % filename) + else: + raise StandardError('File %s is to be overwritten' % filename) + if override: + self._overridden_files.add(filename) self._written_files.add(filename) return open(filename, 'w', encoding=encoding) @@ -103,7 +117,7 @@ def write_feed(self, elements, context, path=None, feed_type='atom'): locale.setlocale(locale.LC_ALL, old_locale) def write_file(self, name, template, context, relative_urls=False, - paginated=None, **kwargs): + paginated=None, override_output=False, **kwargs): """Render the template and write the file. :param name: name of the file to output @@ -112,6 +126,9 @@ def write_file(self, name, template, context, relative_urls=False, :param relative_urls: use relative urls or absolutes ones :param paginated: dict of article list to paginate - must have the same length (same list in different orders) + :param override_output: boolean telling if we can override previous + output with the same name (and if next files written with the same + name should be skipped to keep that one) :param **kwargs: additional variables to pass to the templates """ @@ -121,7 +138,7 @@ def write_file(self, name, template, context, relative_urls=False, # other stuff, just return for now return - def _write_file(template, localcontext, output_path, name): + def _write_file(template, localcontext, output_path, name, override): """Render the template write the file.""" old_locale = locale.setlocale(locale.LC_ALL) locale.setlocale(locale.LC_ALL, str('C')) @@ -134,7 +151,7 @@ def _write_file(template, localcontext, output_path, name): os.makedirs(os.path.dirname(path)) except Exception: pass - with self._open_w(path, 'utf-8') as f: + with self._open_w(path, 'utf-8', override=override) as f: f.write(output) logger.info('writing {}'.format(path)) @@ -180,7 +197,8 @@ def _write_file(template, localcontext, output_path, name): '%s_next_page' % key: next_page}) _write_file(template, paginated_localcontext, self.output_path, - page.save_as) + page.save_as, override_output) else: # no pagination - _write_file(template, localcontext, self.output_path, name) + _write_file(template, localcontext, self.output_path, name, + override_output)
diff --git a/pelican/tests/output/basic/a-markdown-powered-article.html b/pelican/tests/output/basic/a-markdown-powered-article.html --- a/pelican/tests/output/basic/a-markdown-powered-article.html +++ b/pelican/tests/output/basic/a-markdown-powered-article.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/archives.html b/pelican/tests/output/basic/archives.html --- a/pelican/tests/output/basic/archives.html +++ b/pelican/tests/output/basic/archives.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> @@ -45,6 +46,8 @@ <h1>Archives for A Pelican Blog</h1> <dd><a href="/oh-yeah.html">Oh yeah !</a></dd> <dt>Fri 15 October 2010</dt> <dd><a href="/unbelievable.html">Unbelievable !</a></dd> + <dt>Sun 14 March 2010</dt> + <dd><a href="/tag/baz.html">The baz tag</a></dd> </dl> </section> <section id="extras" class="body"> diff --git a/pelican/tests/output/basic/article-1.html b/pelican/tests/output/basic/article-1.html --- a/pelican/tests/output/basic/article-1.html +++ b/pelican/tests/output/basic/article-1.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/article-2.html b/pelican/tests/output/basic/article-2.html --- a/pelican/tests/output/basic/article-2.html +++ b/pelican/tests/output/basic/article-2.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/article-3.html b/pelican/tests/output/basic/article-3.html --- a/pelican/tests/output/basic/article-3.html +++ b/pelican/tests/output/basic/article-3.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/author/alexis-metaireau.html b/pelican/tests/output/basic/author/alexis-metaireau.html --- a/pelican/tests/output/basic/author/alexis-metaireau.html +++ b/pelican/tests/output/basic/author/alexis-metaireau.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/categories.html b/pelican/tests/output/basic/categories.html --- a/pelican/tests/output/basic/categories.html +++ b/pelican/tests/output/basic/categories.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/category/bar.html b/pelican/tests/output/basic/category/bar.html --- a/pelican/tests/output/basic/category/bar.html +++ b/pelican/tests/output/basic/category/bar.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li class="active"><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/category/cat1.html b/pelican/tests/output/basic/category/cat1.html --- a/pelican/tests/output/basic/category/cat1.html +++ b/pelican/tests/output/basic/category/cat1.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/category/misc.html b/pelican/tests/output/basic/category/misc.html --- a/pelican/tests/output/basic/category/misc.html +++ b/pelican/tests/output/basic/category/misc.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> @@ -85,6 +86,26 @@ <h1><a href="/unbelievable.html" rel="bookmark" <a class="readmore" href="/unbelievable.html">read more</a> </div><!-- /.entry-content --> </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="/tag/baz.html" rel="bookmark" + title="Permalink to The baz tag">The baz tag</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-03-14T00:00:00"> + Sun 14 March 2010 + </abbr> + +<p>In <a href="/category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>This article overrides the listening of the articles under the <em>baz</em> tag.</p> + + <a class="readmore" href="/tag/baz.html">read more</a> + </div><!-- /.entry-content --> + </article></li> </ol><!-- /#posts-list --> </section><!-- /#content --> <section id="extras" class="body"> diff --git a/pelican/tests/output/basic/category/yeah.html b/pelican/tests/output/basic/category/yeah.html --- a/pelican/tests/output/basic/category/yeah.html +++ b/pelican/tests/output/basic/category/yeah.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/feeds/all-en.atom.xml b/pelican/tests/output/basic/feeds/all-en.atom.xml --- a/pelican/tests/output/basic/feeds/all-en.atom.xml +++ b/pelican/tests/output/basic/feeds/all-en.atom.xml @@ -27,4 +27,5 @@ YEAH !&lt;/p&gt; </summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00Z</updated><author><name></name></author><id>tag:,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00Z</updated><author><name></name></author><id>tag:,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/all.atom.xml b/pelican/tests/output/basic/feeds/all.atom.xml --- a/pelican/tests/output/basic/feeds/all.atom.xml +++ b/pelican/tests/output/basic/feeds/all.atom.xml @@ -28,4 +28,5 @@ YEAH !&lt;/p&gt; </summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00Z</updated><author><name></name></author><id>tag:,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00Z</updated><author><name></name></author><id>tag:,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/misc.atom.xml b/pelican/tests/output/basic/feeds/misc.atom.xml --- a/pelican/tests/output/basic/feeds/misc.atom.xml +++ b/pelican/tests/output/basic/feeds/misc.atom.xml @@ -4,4 +4,5 @@ </summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00Z</updated><author><name></name></author><id>tag:,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00Z</updated><author><name></name></author><id>tag:,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/filename_metadata-example.html b/pelican/tests/output/basic/filename_metadata-example.html --- a/pelican/tests/output/basic/filename_metadata-example.html +++ b/pelican/tests/output/basic/filename_metadata-example.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/index.html b/pelican/tests/output/basic/index.html --- a/pelican/tests/output/basic/index.html +++ b/pelican/tests/output/basic/index.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> @@ -218,6 +219,26 @@ <h1><a href="/unbelievable.html" rel="bookmark" <a class="readmore" href="/unbelievable.html">read more</a> </div><!-- /.entry-content --> </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="/tag/baz.html" rel="bookmark" + title="Permalink to The baz tag">The baz tag</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-03-14T00:00:00"> + Sun 14 March 2010 + </abbr> + +<p>In <a href="/category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>This article overrides the listening of the articles under the <em>baz</em> tag.</p> + + <a class="readmore" href="/tag/baz.html">read more</a> + </div><!-- /.entry-content --> + </article></li> </ol><!-- /#posts-list --> </section><!-- /#content --> <section id="extras" class="body"> diff --git a/pelican/tests/output/basic/oh-yeah.html b/pelican/tests/output/basic/oh-yeah.html --- a/pelican/tests/output/basic/oh-yeah.html +++ b/pelican/tests/output/basic/oh-yeah.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li class="active"><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/override/index.html b/pelican/tests/output/basic/override/index.html --- a/pelican/tests/output/basic/override/index.html +++ b/pelican/tests/output/basic/override/index.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li class="active"><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/pages/this-is-a-test-hidden-page.html b/pelican/tests/output/basic/pages/this-is-a-test-hidden-page.html --- a/pelican/tests/output/basic/pages/this-is-a-test-hidden-page.html +++ b/pelican/tests/output/basic/pages/this-is-a-test-hidden-page.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/pages/this-is-a-test-page.html b/pelican/tests/output/basic/pages/this-is-a-test-page.html --- a/pelican/tests/output/basic/pages/this-is-a-test-page.html +++ b/pelican/tests/output/basic/pages/this-is-a-test-page.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li class="active"><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/second-article-fr.html b/pelican/tests/output/basic/second-article-fr.html --- a/pelican/tests/output/basic/second-article-fr.html +++ b/pelican/tests/output/basic/second-article-fr.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/second-article.html b/pelican/tests/output/basic/second-article.html --- a/pelican/tests/output/basic/second-article.html +++ b/pelican/tests/output/basic/second-article.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/tag/bar.html b/pelican/tests/output/basic/tag/bar.html --- a/pelican/tests/output/basic/tag/bar.html +++ b/pelican/tests/output/basic/tag/bar.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/tag/baz.html b/pelican/tests/output/basic/tag/baz.html --- a/pelican/tests/output/basic/tag/baz.html +++ b/pelican/tests/output/basic/tag/baz.html @@ -2,7 +2,7 @@ <html lang="en"> <head> <meta charset="utf-8"> - <title>A Pelican Blog - baz</title> + <title>The baz tag</title> <link rel="stylesheet" href="/theme/css/main.css"> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> @@ -15,32 +15,37 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> <li><a href="/category/cat1.html">cat1</a></li> - <li><a href="/category/misc.html">misc</a></li> + <li class="active"><a href="/category/misc.html">misc</a></li> <li><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="/tag/baz.html" rel="bookmark" + title="Permalink to The baz tag">The baz tag</a></h1> + </header> - <aside id="featured" class="body"> - <article> - <h1 class="entry-title"><a href="/second-article.html">Second article</a></h1> + <div class="entry-content"> <footer class="post-info"> - <abbr class="published" title="2012-02-29T00:00:00"> - Wed 29 February 2012 + <abbr class="published" title="2010-03-14T00:00:00"> + Sun 14 March 2010 </abbr> <p>In <a href="/category/misc.html">misc</a>. </p> -<p>tags: <a href="/tag/foo.html">foo</a><a href="/tag/bar.html">bar</a><a href="/tag/baz.html">baz</a></p>Translations: - <a href="/second-article-fr.html">fr</a> - -</footer><!-- /.post-info --><p>This is some article, in english</p> - </article> - </aside><!-- /#featured --> - </ol><!-- /#posts-list --> - </section><!-- /#content --> + +</footer><!-- /.post-info --> <p>This article overrides the listening of the articles under the <em>baz</em> tag.</p> + + </div><!-- /.entry-content --> + + </article> +</section> <section id="extras" class="body"> <div class="social"> <h2>social</h2> diff --git a/pelican/tests/output/basic/tag/foo.html b/pelican/tests/output/basic/tag/foo.html --- a/pelican/tests/output/basic/tag/foo.html +++ b/pelican/tests/output/basic/tag/foo.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/tag/foobar.html b/pelican/tests/output/basic/tag/foobar.html --- a/pelican/tests/output/basic/tag/foobar.html +++ b/pelican/tests/output/basic/tag/foobar.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/tag/oh.html b/pelican/tests/output/basic/tag/oh.html --- a/pelican/tests/output/basic/tag/oh.html +++ b/pelican/tests/output/basic/tag/oh.html @@ -2,7 +2,7 @@ <html lang="en"> <head> <meta charset="utf-8"> - <title>A Pelican Blog - oh</title> + <title>Oh Oh Oh</title> <link rel="stylesheet" href="/theme/css/main.css"> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li class="active"><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> @@ -23,30 +24,12 @@ <h1><a href="/">A Pelican Blog </a></h1> <li><a href="/category/yeah.html">yeah</a></li> </ul></nav> </header><!-- /#banner --> +<section id="content" class="body"> + <h1 class="entry-title">Oh Oh Oh</h1> + + <p>This page overrides the listening of the articles under the <em>oh</em> tag.</p> - <aside id="featured" class="body"> - <article> - <h1 class="entry-title"><a href="/oh-yeah.html">Oh yeah !</a></h1> -<footer class="post-info"> - <abbr class="published" title="2010-10-20T10:14:00"> - Wed 20 October 2010 - </abbr> - - <address class="vcard author"> - By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> - </address> -<p>In <a href="/category/bar.html">bar</a>. </p> -<p>tags: <a href="/tag/oh.html">oh</a><a href="/tag/bar.html">bar</a><a href="/tag/yeah.html">yeah</a></p> -</footer><!-- /.post-info --><div class="section" id="why-not"> -<h2>Why not ?</h2> -<p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !</p> -<img alt="alternate text" src="|filename|/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> -</div> - </article> - </aside><!-- /#featured --> - </ol><!-- /#posts-list --> - </section><!-- /#content --> +</section> <section id="extras" class="body"> <div class="social"> <h2>social</h2> diff --git a/pelican/tests/output/basic/tag/yeah.html b/pelican/tests/output/basic/tag/yeah.html --- a/pelican/tests/output/basic/tag/yeah.html +++ b/pelican/tests/output/basic/tag/yeah.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/this-is-a-super-article.html b/pelican/tests/output/basic/this-is-a-super-article.html --- a/pelican/tests/output/basic/this-is-a-super-article.html +++ b/pelican/tests/output/basic/this-is-a-super-article.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/basic/unbelievable.html b/pelican/tests/output/basic/unbelievable.html --- a/pelican/tests/output/basic/unbelievable.html +++ b/pelican/tests/output/basic/unbelievable.html @@ -15,6 +15,7 @@ <header id="banner" class="body"> <h1><a href="/">A Pelican Blog </a></h1> <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> <li><a href="/override/">Override url/save_as</a></li> <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="/category/bar.html">bar</a></li> diff --git a/pelican/tests/output/custom/a-markdown-powered-article.html b/pelican/tests/output/custom/a-markdown-powered-article.html --- a/pelican/tests/output/custom/a-markdown-powered-article.html +++ b/pelican/tests/output/custom/a-markdown-powered-article.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/archives.html b/pelican/tests/output/custom/archives.html --- a/pelican/tests/output/custom/archives.html +++ b/pelican/tests/output/custom/archives.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> @@ -49,6 +50,8 @@ <h1>Archives for Alexis' log</h1> <dd><a href="./oh-yeah.html">Oh yeah !</a></dd> <dt>Fri 15 October 2010</dt> <dd><a href="./unbelievable.html">Unbelievable !</a></dd> + <dt>Sun 14 March 2010</dt> + <dd><a href="./tag/baz.html">The baz tag</a></dd> </dl> </section> <section id="extras" class="body"> diff --git a/pelican/tests/output/custom/article-1.html b/pelican/tests/output/custom/article-1.html --- a/pelican/tests/output/custom/article-1.html +++ b/pelican/tests/output/custom/article-1.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/article-2.html b/pelican/tests/output/custom/article-2.html --- a/pelican/tests/output/custom/article-2.html +++ b/pelican/tests/output/custom/article-2.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/article-3.html b/pelican/tests/output/custom/article-3.html --- a/pelican/tests/output/custom/article-3.html +++ b/pelican/tests/output/custom/article-3.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/author/alexis-metaireau.html b/pelican/tests/output/custom/author/alexis-metaireau.html --- a/pelican/tests/output/custom/author/alexis-metaireau.html +++ b/pelican/tests/output/custom/author/alexis-metaireau.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/author/alexis-metaireau2.html b/pelican/tests/output/custom/author/alexis-metaireau2.html --- a/pelican/tests/output/custom/author/alexis-metaireau2.html +++ b/pelican/tests/output/custom/author/alexis-metaireau2.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/author/alexis-metaireau3.html b/pelican/tests/output/custom/author/alexis-metaireau3.html --- a/pelican/tests/output/custom/author/alexis-metaireau3.html +++ b/pelican/tests/output/custom/author/alexis-metaireau3.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> @@ -54,6 +55,29 @@ <h1><a href="../unbelievable.html" rel="bookmark" <a class="readmore" href="../unbelievable.html">read more</a> <p>There are <a href="../unbelievable.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="../tag/baz.html" rel="bookmark" + title="Permalink to The baz tag">The baz tag</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-03-14T00:00:00"> + Sun 14 March 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>This article overrides the listening of the articles under the <em>baz</em> tag.</p> + + <a class="readmore" href="../tag/baz.html">read more</a> +<p>There are <a href="../tag/baz.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> </ol><!-- /#posts-list --> <p class="paginator"> <a href="../author/alexis-metaireau2.html">&laquo;</a> diff --git a/pelican/tests/output/custom/categories.html b/pelican/tests/output/custom/categories.html --- a/pelican/tests/output/custom/categories.html +++ b/pelican/tests/output/custom/categories.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/category/bar.html b/pelican/tests/output/custom/category/bar.html --- a/pelican/tests/output/custom/category/bar.html +++ b/pelican/tests/output/custom/category/bar.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/category/cat1.html b/pelican/tests/output/custom/category/cat1.html --- a/pelican/tests/output/custom/category/cat1.html +++ b/pelican/tests/output/custom/category/cat1.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/category/misc.html b/pelican/tests/output/custom/category/misc.html --- a/pelican/tests/output/custom/category/misc.html +++ b/pelican/tests/output/custom/category/misc.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> @@ -98,6 +99,29 @@ <h1><a href="../unbelievable.html" rel="bookmark" <a class="readmore" href="../unbelievable.html">read more</a> <p>There are <a href="../unbelievable.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="../tag/baz.html" rel="bookmark" + title="Permalink to The baz tag">The baz tag</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-03-14T00:00:00"> + Sun 14 March 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>This article overrides the listening of the articles under the <em>baz</em> tag.</p> + + <a class="readmore" href="../tag/baz.html">read more</a> +<p>There are <a href="../tag/baz.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> </ol><!-- /#posts-list --> <p class="paginator"> Page 1 / 1 diff --git a/pelican/tests/output/custom/category/yeah.html b/pelican/tests/output/custom/category/yeah.html --- a/pelican/tests/output/custom/category/yeah.html +++ b/pelican/tests/output/custom/category/yeah.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li class="active"><a href="../category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/drafts/a-draft-article.html b/pelican/tests/output/custom/drafts/a-draft-article.html --- a/pelican/tests/output/custom/drafts/a-draft-article.html +++ b/pelican/tests/output/custom/drafts/a-draft-article.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/feeds/all-en.atom.xml b/pelican/tests/output/custom/feeds/all-en.atom.xml --- a/pelican/tests/output/custom/feeds/all-en.atom.xml +++ b/pelican/tests/output/custom/feeds/all-en.atom.xml @@ -27,4 +27,5 @@ YEAH !&lt;/p&gt; </summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/all.atom.xml b/pelican/tests/output/custom/feeds/all.atom.xml --- a/pelican/tests/output/custom/feeds/all.atom.xml +++ b/pelican/tests/output/custom/feeds/all.atom.xml @@ -29,4 +29,5 @@ YEAH !&lt;/p&gt; </summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/all.rss.xml b/pelican/tests/output/custom/feeds/all.rss.xml --- a/pelican/tests/output/custom/feeds/all.rss.xml +++ b/pelican/tests/output/custom/feeds/all.rss.xml @@ -29,4 +29,5 @@ YEAH !&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/misc.atom.xml b/pelican/tests/output/custom/feeds/misc.atom.xml --- a/pelican/tests/output/custom/feeds/misc.atom.xml +++ b/pelican/tests/output/custom/feeds/misc.atom.xml @@ -4,4 +4,5 @@ </summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/misc.rss.xml b/pelican/tests/output/custom/feeds/misc.rss.xml --- a/pelican/tests/output/custom/feeds/misc.rss.xml +++ b/pelican/tests/output/custom/feeds/misc.rss.xml @@ -4,4 +4,5 @@ </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:second-article.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom/filename_metadata-example.html b/pelican/tests/output/custom/filename_metadata-example.html --- a/pelican/tests/output/custom/filename_metadata-example.html +++ b/pelican/tests/output/custom/filename_metadata-example.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/index.html b/pelican/tests/output/custom/index.html --- a/pelican/tests/output/custom/index.html +++ b/pelican/tests/output/custom/index.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/index2.html b/pelican/tests/output/custom/index2.html --- a/pelican/tests/output/custom/index2.html +++ b/pelican/tests/output/custom/index2.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/index3.html b/pelican/tests/output/custom/index3.html --- a/pelican/tests/output/custom/index3.html +++ b/pelican/tests/output/custom/index3.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> @@ -54,6 +55,29 @@ <h1><a href="./unbelievable.html" rel="bookmark" <a class="readmore" href="./unbelievable.html">read more</a> <p>There are <a href="./unbelievable.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="./tag/baz.html" rel="bookmark" + title="Permalink to The baz tag">The baz tag</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-03-14T00:00:00"> + Sun 14 March 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="./category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>This article overrides the listening of the articles under the <em>baz</em> tag.</p> + + <a class="readmore" href="./tag/baz.html">read more</a> +<p>There are <a href="./tag/baz.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> </ol><!-- /#posts-list --> <p class="paginator"> <a href="./index2.html">&laquo;</a> diff --git a/pelican/tests/output/custom/jinja2_template.html b/pelican/tests/output/custom/jinja2_template.html --- a/pelican/tests/output/custom/jinja2_template.html +++ b/pelican/tests/output/custom/jinja2_template.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/oh-yeah-fr.html b/pelican/tests/output/custom/oh-yeah-fr.html --- a/pelican/tests/output/custom/oh-yeah-fr.html +++ b/pelican/tests/output/custom/oh-yeah-fr.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/oh-yeah.html b/pelican/tests/output/custom/oh-yeah.html --- a/pelican/tests/output/custom/oh-yeah.html +++ b/pelican/tests/output/custom/oh-yeah.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/override/index.html b/pelican/tests/output/custom/override/index.html --- a/pelican/tests/output/custom/override/index.html +++ b/pelican/tests/output/custom/override/index.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li class="active"><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/pages/this-is-a-test-hidden-page.html b/pelican/tests/output/custom/pages/this-is-a-test-hidden-page.html --- a/pelican/tests/output/custom/pages/this-is-a-test-hidden-page.html +++ b/pelican/tests/output/custom/pages/this-is-a-test-hidden-page.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/pages/this-is-a-test-page.html b/pelican/tests/output/custom/pages/this-is-a-test-page.html --- a/pelican/tests/output/custom/pages/this-is-a-test-page.html +++ b/pelican/tests/output/custom/pages/this-is-a-test-page.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li class="active"><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/second-article-fr.html b/pelican/tests/output/custom/second-article-fr.html --- a/pelican/tests/output/custom/second-article-fr.html +++ b/pelican/tests/output/custom/second-article-fr.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/second-article.html b/pelican/tests/output/custom/second-article.html --- a/pelican/tests/output/custom/second-article.html +++ b/pelican/tests/output/custom/second-article.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/tag/bar.html b/pelican/tests/output/custom/tag/bar.html --- a/pelican/tests/output/custom/tag/bar.html +++ b/pelican/tests/output/custom/tag/bar.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/tag/baz.html b/pelican/tests/output/custom/tag/baz.html --- a/pelican/tests/output/custom/tag/baz.html +++ b/pelican/tests/output/custom/tag/baz.html @@ -2,7 +2,7 @@ <html lang="en"> <head> <meta charset="utf-8"> - <title>Alexis' log - baz</title> + <title>The baz tag</title> <link rel="stylesheet" href="../theme/css/main.css"> <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> @@ -19,38 +19,53 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> - <li><a href="../category/misc.html">misc</a></li> + <li class="active"><a href="../category/misc.html">misc</a></li> <li><a href="../category/cat1.html">cat1</a></li> <li><a href="../category/bar.html">bar</a></li> </ul></nav> </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="../tag/baz.html" rel="bookmark" + title="Permalink to The baz tag">The baz tag</a></h1> + </header> - <aside id="featured" class="body"> - <article> - <h1 class="entry-title"><a href="../second-article.html">Second article</a></h1> + <div class="entry-content"> <footer class="post-info"> - <abbr class="published" title="2012-02-29T00:00:00"> - Wed 29 February 2012 + <abbr class="published" title="2010-03-14T00:00:00"> + Sun 14 March 2010 </abbr> <address class="vcard author"> By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> </address> <p>In <a href="../category/misc.html">misc</a>. </p> -<p>tags: <a href="../tag/foo.html">foo</a><a href="../tag/bar.html">bar</a><a href="../tag/baz.html">baz</a></p>Translations: - <a href="../second-article-fr.html">fr</a> -</footer><!-- /.post-info --><p>This is some article, in english</p> -<p>There are <a href="../second-article.html#disqus_thread">comments</a>.</p> </article> -<p class="paginator"> - Page 1 / 1 -</p> - </aside><!-- /#featured --> - </ol><!-- /#posts-list --> - </section><!-- /#content --> +</footer><!-- /.post-info --> <p>This article overrides the listening of the articles under the <em>baz</em> tag.</p> + + </div><!-- /.entry-content --> + <div class="comments"> + <h2>Comments !</h2> + <div id="disqus_thread"></div> + <script type="text/javascript"> + var disqus_identifier = "tag/baz.html"; + var disqus_url = "../tag/baz.html"; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = 'http://blog-notmyidea.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + </script> + </div> + + </article> +</section> <section id="extras" class="body"> <div class="blogroll"> <h2>blogroll</h2> diff --git a/pelican/tests/output/custom/tag/foo.html b/pelican/tests/output/custom/tag/foo.html --- a/pelican/tests/output/custom/tag/foo.html +++ b/pelican/tests/output/custom/tag/foo.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/tag/foobar.html b/pelican/tests/output/custom/tag/foobar.html --- a/pelican/tests/output/custom/tag/foobar.html +++ b/pelican/tests/output/custom/tag/foobar.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/tag/oh.html b/pelican/tests/output/custom/tag/oh.html --- a/pelican/tests/output/custom/tag/oh.html +++ b/pelican/tests/output/custom/tag/oh.html @@ -2,7 +2,7 @@ <html lang="en"> <head> <meta charset="utf-8"> - <title>Alexis' log - oh</title> + <title>Oh Oh Oh</title> <link rel="stylesheet" href="../theme/css/main.css"> <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li class="active"><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> @@ -27,35 +28,12 @@ <h1><a href="../">Alexis' log </a></h1> <li><a href="../category/bar.html">bar</a></li> </ul></nav> </header><!-- /#banner --> +<section id="content" class="body"> + <h1 class="entry-title">Oh Oh Oh</h1> + + <p>This page overrides the listening of the articles under the <em>oh</em> tag.</p> - <aside id="featured" class="body"> - <article> - <h1 class="entry-title"><a href="../oh-yeah.html">Oh yeah !</a></h1> -<footer class="post-info"> - <abbr class="published" title="2010-10-20T10:14:00"> - Wed 20 October 2010 - </abbr> - - <address class="vcard author"> - By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> - </address> -<p>In <a href="../category/bar.html">bar</a>. </p> -<p>tags: <a href="../tag/oh.html">oh</a><a href="../tag/bar.html">bar</a><a href="../tag/yeah.html">yeah</a></p>Translations: - <a href="../oh-yeah-fr.html">fr</a> - -</footer><!-- /.post-info --><div class="section" id="why-not"> -<h2>Why not ?</h2> -<p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !</p> -<img alt="alternate text" src="../pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> -</div> -<p>There are <a href="../oh-yeah.html#disqus_thread">comments</a>.</p> </article> -<p class="paginator"> - Page 1 / 1 -</p> - </aside><!-- /#featured --> - </ol><!-- /#posts-list --> - </section><!-- /#content --> +</section> <section id="extras" class="body"> <div class="blogroll"> <h2>blogroll</h2> diff --git a/pelican/tests/output/custom/tag/yeah.html b/pelican/tests/output/custom/tag/yeah.html --- a/pelican/tests/output/custom/tag/yeah.html +++ b/pelican/tests/output/custom/tag/yeah.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="../">Alexis' log </a></h1> <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> <li><a href="../override/">Override url/save_as</a></li> <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="../category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/this-is-a-super-article.html b/pelican/tests/output/custom/this-is-a-super-article.html --- a/pelican/tests/output/custom/this-is-a-super-article.html +++ b/pelican/tests/output/custom/this-is-a-super-article.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li class="active"><a href="./category/yeah.html">yeah</a></li> diff --git a/pelican/tests/output/custom/unbelievable.html b/pelican/tests/output/custom/unbelievable.html --- a/pelican/tests/output/custom/unbelievable.html +++ b/pelican/tests/output/custom/unbelievable.html @@ -19,6 +19,7 @@ <header id="banner" class="body"> <h1><a href="./">Alexis' log </a></h1> <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> <li><a href="./override/">Override url/save_as</a></li> <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> <li><a href="./category/yeah.html">yeah</a></li>
Regression when specifying static page as home According to http://docs.getpelican.com/en/3.2/faq.html#how-can-i-use-a-static-page-as-my-home-page it is possible to specify index.html to simply override the page that is used as the homepage. Commit [ff7410ce2ada85b486a67ae11874d60d135ff939](https://github.com/getpelican/pelican/commit/ff7410ce2ada85b486a67ae11874d60d135ff939) made this no longer possible, as overwriting an existing file is no longer allowed. The FAQ and the current HEAD of the repository contradict each other. This could be resolved by either changing the FAQ before releasing or allowing selective overwriting.
Regression confirmed in my testing: ``` CRITICAL: File /mysite/output/index.html is to be overwritten ``` @bbinet included functional test output when he implemented the page override feature in d0e9c52 — I'm not sure why that test didn't alert us to the regression, but I haven't had time to delve into it in detail. @Rogdham: Do you think you could look at the issue @cdecker raised and see if you have any ideas? As I understand the discussions about the override feature, it was a feature requested by a lot of people, so we do not want to remove it at all! So changing the doc is not an option, it must be fixed properly. I will give it a try. Give me some time to see why the tests did not fail, and try to fix it as well.
2013-06-22T14:12:47Z
[]
[]
getpelican/pelican
944
getpelican__pelican-944
[ "574" ]
dd9f55c8bb0979d230c42cd28bb8b6fbe6d41d98
diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -49,6 +49,7 @@ def __init__(self, settings): self.markup = settings['MARKUP'] self.ignore_files = settings['IGNORE_FILES'] self.delete_outputdir = settings['DELETE_OUTPUT_DIRECTORY'] + self.output_retention = settings['OUTPUT_RETENTION'] self.init_path() self.init_plugins() @@ -175,7 +176,7 @@ def run(self): # explicitely asked if (self.delete_outputdir and not os.path.realpath(self.path).startswith(self.output_path)): - clean_output_dir(self.output_path) + clean_output_dir(self.output_path, self.output_retention) writer = self.get_writer() diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -54,6 +54,7 @@ 'NEWEST_FIRST_ARCHIVES': True, 'REVERSE_CATEGORY_ORDER': False, 'DELETE_OUTPUT_DIRECTORY': False, + 'OUTPUT_RETENTION': (), 'ARTICLE_URL': '{slug}.html', 'ARTICLE_SAVE_AS': '{slug}.html', 'ARTICLE_LANG_URL': '{slug}-{lang}.html', diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -298,8 +298,8 @@ def copy(path, source, destination, destination_path=None, overwrite=False): logger.warning('skipped copy %s to %s' % (source_, destination_)) -def clean_output_dir(path): - """Remove all the files from the output directory""" +def clean_output_dir(path, retention): + """Remove all files from output directory except those in retention list""" if not os.path.exists(path): logger.debug("Directory already removed: %s" % path) @@ -312,10 +312,13 @@ def clean_output_dir(path): logger.error("Unable to delete file %s; %s" % (path, str(e))) return - # remove all the existing content from the output folder + # remove existing content from output folder unless in retention list for filename in os.listdir(path): file = os.path.join(path, filename) - if os.path.isdir(file): + if any(filename == retain for retain in retention): + logger.debug("Skipping deletion; %s is on retention list: %s" \ + % (filename, file)) + elif os.path.isdir(file): try: shutil.rmtree(file) logger.debug("Deleted directory %s" % file)
diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py --- a/pelican/tests/test_utils.py +++ b/pelican/tests/test_utils.py @@ -193,28 +193,31 @@ def test_watchers(self): shutil.rmtree(empty_path, True) def test_clean_output_dir(self): + retention = () test_directory = os.path.join(os.path.dirname(__file__), 'clean_output') content = os.path.join(os.path.dirname(__file__), 'content') shutil.copytree(content, test_directory) - utils.clean_output_dir(test_directory) + utils.clean_output_dir(test_directory, retention) self.assertTrue(os.path.isdir(test_directory)) self.assertListEqual([], os.listdir(test_directory)) shutil.rmtree(test_directory) def test_clean_output_dir_not_there(self): + retention = () test_directory = os.path.join(os.path.dirname(__file__), 'does_not_exist') - utils.clean_output_dir(test_directory) + utils.clean_output_dir(test_directory, retention) self.assertFalse(os.path.exists(test_directory)) def test_clean_output_dir_is_file(self): + retention = () test_directory = os.path.join(os.path.dirname(__file__), 'this_is_a_file') f = open(test_directory, 'w') f.write('') f.close() - utils.clean_output_dir(test_directory) + utils.clean_output_dir(test_directory, retention) self.assertFalse(os.path.exists(test_directory)) def test_strftime(self):
clean should not remove .git metadata I am deploying my pelican-generated site via heroku, so the target directory is a git repository. make clean deletes everything at the output directory, so I had to hack up my makefile to stop that behaviour.
@msoulier Can you share your solution please (or point me to the source)? @amanjeev ``` make clean: find $(OUTPUTDIR) -mindepth 1 -name '.git*' -prune -o -delete ``` This will also exclude `.gitignore`, `.gitconfig` etc. @rupert: In my testing, those parameters to `find` preserves the .git directory itself but nukes everything inside it. Ah sorry, I've updated the `find` command above. ``` bash $ find output output output/.git output/.git/hello output/.gitignore output/foo output/foo/bar output/hello $ find output -mindepth 1 -name '.git*' -prune -o -print output/foo output/foo/bar output/hello ``` Great. Any idea as to the full command to delete everything but the `.git` directory and its contents? Delete everything except `.git`: ``` bash $ find $(OUTPUTDIR) -mindepth 1 -name .git -prune -o -delete ``` Delete everything except `.git`, `.gitignore` etc: ``` bash $ find $(OUTPUTDIR) -mindepth 1 -name '.git*' -prune -o -delete ``` In my testing, those commands do the same as I noted previously: preserves the .git directory itself but nukes everything inside it. I wrote the commands for Mac OS X `find`. I've since found that the GNU version of `find` doesn't support using `-prune` with `-delete`. @justinmayer, can you try: ``` bash $ find $(OUTPUTDIR) -mindepth 1 -name '.git*' -prune -o -print0 | xargs -0 rm -rf ``` ``` bash $ find output output output/.git output/.git/hello output/.gitignore output/foo output/foo/bar output/hello $ make clean find /Users/rupert/rupertb.com/output -mindepth 1 -name '.git*' -prune -o -print0 | xargs -0 rm -rf $ find output output output/.git output/.git/hello output/.gitignore ``` I actually did my testing on Mac OS X 10.8.2 to produce the behavior described above. I thought about `xargs`, but once we get to the point of piping things to `rm -rf`, I think we've crossed the line over into... a potentially bad place. ;^) Honestly, if folks want to avoid nuking `.git`, `.hg`, and other files/directories in the output folder, they are probably better off installing and using `ack` instead of wrangling with `find`. Perhaps someone would care to write up an appropriate line for the Makefile using `ack` and then add it to our [Tips](https://github.com/getpelican/pelican/blob/master/docs/tips.rst) page?
2013-06-23T18:51:53Z
[]
[]
getpelican/pelican
964
getpelican__pelican-964
[ "926" ]
9f0ad2bd954cf82dfd8a4477cbb2649f91921c6a
diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -141,14 +141,21 @@ def url_format(self): """Returns the URL, formatted with the proper values""" metadata = copy.copy(self.metadata) path = self.metadata.get('path', self.get_relative_source_path()) + default_category = self.settings['DEFAULT_CATEGORY'] + slug_substitutions = self.settings.get('SLUG_SUBSTITUTIONS', ()) metadata.update({ 'path': path_to_url(path), 'slug': getattr(self, 'slug', ''), 'lang': getattr(self, 'lang', 'en'), 'date': getattr(self, 'date', datetime.now()), - 'author': getattr(self, 'author', ''), - 'category': getattr(self, 'category', - self.settings['DEFAULT_CATEGORY']), + 'author': slugify( + getattr(self, 'author', ''), + slug_substitutions + ), + 'category': slugify( + getattr(self, 'category', default_category), + slug_substitutions + ) }) return metadata
diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -191,6 +191,20 @@ def test_template(self): custom_article = Article(**article_kwargs) self.assertEqual('custom', custom_article.template) + def test_slugify_category_author(self): + settings = get_settings() + settings['SLUG_SUBSTITUTIONS'] = [ ('C#', 'csharp') ] + settings['ARTICLE_URL'] = '{author}/{category}/{slug}/' + settings['ARTICLE_SAVE_AS'] = '{author}/{category}/{slug}/index.html' + article_kwargs = self._copy_page_kwargs() + article_kwargs['metadata']['author'] = "O'Brien" + article_kwargs['metadata']['category'] = 'C# & stuff' + article_kwargs['metadata']['title'] = 'fnord' + article_kwargs['settings'] = settings + article = Article(**article_kwargs) + self.assertEqual(article.url, 'obrien/csharp-stuff/fnord/') + self.assertEqual(article.save_as, 'obrien/csharp-stuff/fnord/index.html') + class TestURLWrapper(unittest.TestCase): def test_comparisons(self):
{category} in URLs isn't slugified When trying to save articles using: ``` CATEGORY_SAVE_AS = "{slug}/index.html" ARTICLE_SAVE_AS = "{category}/{slug}/index.html" ``` the `{category}` in the second instance isn't slugified, so the URLs don't line up neatly and may end up with spaces or other URL-unfriendly characters in them. Ditto `{author}`.
Actually, I was thinking about this today, and there are some problems with my previous approach. Slugs for categories may interfere, which is not at all good: ``` >>> from pelican.utils import slugify >>> slugify("C") u'c' >>> slugify("C++") u'c' ``` So how about this as an alternative solution: a) url-escape `{category}` and `{author}`, so that the category "C#" at least makes a valid URL ("C%23") b) add a config option like ``` CATEGORY_SLUGS = { 'C#': 'csharp', 'C++', 'cplusplus', 'Odds and Ends': 'etc' } ``` to let the SEO-concerned make their own damned slugs. Categories not listed just get url escaped instead. Hi Nick. Would Andrew's proposed changes in pull request #931 address the issue you raised here? I considered URL-escaping in my change, but rejected it on the basis that it would make all URLs look unpleasant for the sake of the rare cases where it's required. I figured that a static mapping was a reasonable solution which keeps the common case pleasant at the expense of requiring a little effort from authors. If this approach proves successful I suppose the defaults could be updated to include common cases like `C++`, but I guess that would need a little care (for example, settings files would probably want to merge new entries into the dictionary as opposed to replacing it entirely). Hiya Justin & Andrew ... yep, I think that'd be work just fine. I wrote a very small patch to call slugify() on 'category' and 'author' ... https://github.com/nickzoic/pelican/commit/d25d3503611059b29eb23e704ce506b7734a8f93 Combined with Andrew's patch expanding slugify to respect SLUG_SUBSTITUTIONS, it'd also solve the C vs C++ vs C# problem. On the issue of default substitutions, I'd be tempted to just chuck the usual suspects (C++, C#) in the default pelicanconf.py file ... they're unlikely enough sequences that non-technical types are unlikely to stumble over them, and anyone smart enough to be blogging about F# or J++ can probably work it out from the examples :-) I would ask that there be a similar fix for tags, as they currently suffer from this same issue. @cdhowie: totally agree, I'd forgotten about tags because I hadn't used them (yet). I'll add that to my patch, and when I get a moment I'll turn it all into a pull request based on @Cartroo 's SLUG_SUBSTITUTIONS pull request #931
2013-07-14T14:32:38Z
[]
[]
getpelican/pelican
1,002
getpelican__pelican-1002
[ "963" ]
ea6d0cf5b539c5771e361106bd312eaa5dbf6dc2
diff --git a/pelican/rstdirectives.py b/pelican/rstdirectives.py --- a/pelican/rstdirectives.py +++ b/pelican/rstdirectives.py @@ -32,7 +32,7 @@ def run(self): # no lexer found - use the text one instead of an exception lexer = TextLexer() # take an arbitrary option if more than one is given - formatter = self.options and VARIANTS[self.options.keys()[0]] \ + formatter = self.options and VARIANTS[list(self.options.keys())[0]] \ or DEFAULT parsed = highlight('\n'.join(self.content), lexer, formatter) return [nodes.raw('', parsed, format='html')]
diff --git a/pelican/tests/output/basic/category/misc.html b/pelican/tests/output/basic/category/misc.html --- a/pelican/tests/output/basic/category/misc.html +++ b/pelican/tests/output/basic/category/misc.html @@ -82,6 +82,12 @@ <h1><a href="/unbelievable.html" rel="bookmark" </footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> <p><a class="reference external" href="/a-markdown-powered-article.html">a root-relative link to markdown-article</a> <a class="reference external" href="/a-markdown-powered-article.html">a file-relative link to markdown-article</a></p> +<div class="section" id="testing-sourcecode-directive"> +<h2>Testing sourcecode directive</h2> +<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +</pre></div> +</td></tr></table><p>Lovely.</p> +</div> <a class="readmore" href="/unbelievable.html">read more</a> </div><!-- /.entry-content --> diff --git a/pelican/tests/output/basic/feeds/all-en.atom.xml b/pelican/tests/output/basic/feeds/all-en.atom.xml --- a/pelican/tests/output/basic/feeds/all-en.atom.xml +++ b/pelican/tests/output/basic/feeds/all-en.atom.xml @@ -27,5 +27,11 @@ YEAH !&lt;/p&gt; </summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00Z</updated><author><name></name></author><id>tag:,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; </summary></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00Z</updated><author><name></name></author><id>tag:,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/all.atom.xml b/pelican/tests/output/basic/feeds/all.atom.xml --- a/pelican/tests/output/basic/feeds/all.atom.xml +++ b/pelican/tests/output/basic/feeds/all.atom.xml @@ -28,5 +28,11 @@ YEAH !&lt;/p&gt; </summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00Z</updated><author><name></name></author><id>tag:,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; </summary></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00Z</updated><author><name></name></author><id>tag:,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/misc.atom.xml b/pelican/tests/output/basic/feeds/misc.atom.xml --- a/pelican/tests/output/basic/feeds/misc.atom.xml +++ b/pelican/tests/output/basic/feeds/misc.atom.xml @@ -4,5 +4,11 @@ </summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00Z</updated><author><name></name></author><id>tag:,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; </summary></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00Z</updated><author><name></name></author><id>tag:,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/index.html b/pelican/tests/output/basic/index.html --- a/pelican/tests/output/basic/index.html +++ b/pelican/tests/output/basic/index.html @@ -215,6 +215,12 @@ <h1><a href="/unbelievable.html" rel="bookmark" </footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> <p><a class="reference external" href="/a-markdown-powered-article.html">a root-relative link to markdown-article</a> <a class="reference external" href="/a-markdown-powered-article.html">a file-relative link to markdown-article</a></p> +<div class="section" id="testing-sourcecode-directive"> +<h2>Testing sourcecode directive</h2> +<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +</pre></div> +</td></tr></table><p>Lovely.</p> +</div> <a class="readmore" href="/unbelievable.html">read more</a> </div><!-- /.entry-content --> diff --git a/pelican/tests/output/basic/unbelievable.html b/pelican/tests/output/basic/unbelievable.html --- a/pelican/tests/output/basic/unbelievable.html +++ b/pelican/tests/output/basic/unbelievable.html @@ -43,6 +43,12 @@ <h1 class="entry-title"> </footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> <p><a class="reference external" href="/a-markdown-powered-article.html">a root-relative link to markdown-article</a> <a class="reference external" href="/a-markdown-powered-article.html">a file-relative link to markdown-article</a></p> +<div class="section" id="testing-sourcecode-directive"> +<h2>Testing sourcecode directive</h2> +<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +</pre></div> +</td></tr></table><p>Lovely.</p> +</div> </div><!-- /.entry-content --> diff --git a/pelican/tests/output/custom/author/alexis-metaireau3.html b/pelican/tests/output/custom/author/alexis-metaireau3.html --- a/pelican/tests/output/custom/author/alexis-metaireau3.html +++ b/pelican/tests/output/custom/author/alexis-metaireau3.html @@ -51,6 +51,12 @@ <h1><a href="../unbelievable.html" rel="bookmark" </footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> <p><a class="reference external" href="../a-markdown-powered-article.html">a root-relative link to markdown-article</a> <a class="reference external" href="../a-markdown-powered-article.html">a file-relative link to markdown-article</a></p> +<div class="section" id="testing-sourcecode-directive"> +<h2>Testing sourcecode directive</h2> +<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +</pre></div> +</td></tr></table><p>Lovely.</p> +</div> <a class="readmore" href="../unbelievable.html">read more</a> <p>There are <a href="../unbelievable.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> diff --git a/pelican/tests/output/custom/category/misc.html b/pelican/tests/output/custom/category/misc.html --- a/pelican/tests/output/custom/category/misc.html +++ b/pelican/tests/output/custom/category/misc.html @@ -95,6 +95,12 @@ <h1><a href="../unbelievable.html" rel="bookmark" </footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> <p><a class="reference external" href="../a-markdown-powered-article.html">a root-relative link to markdown-article</a> <a class="reference external" href="../a-markdown-powered-article.html">a file-relative link to markdown-article</a></p> +<div class="section" id="testing-sourcecode-directive"> +<h2>Testing sourcecode directive</h2> +<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +</pre></div> +</td></tr></table><p>Lovely.</p> +</div> <a class="readmore" href="../unbelievable.html">read more</a> <p>There are <a href="../unbelievable.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> diff --git a/pelican/tests/output/custom/feeds/all-en.atom.xml b/pelican/tests/output/custom/feeds/all-en.atom.xml --- a/pelican/tests/output/custom/feeds/all-en.atom.xml +++ b/pelican/tests/output/custom/feeds/all-en.atom.xml @@ -27,5 +27,11 @@ YEAH !&lt;/p&gt; </summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; </summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/all.atom.xml b/pelican/tests/output/custom/feeds/all.atom.xml --- a/pelican/tests/output/custom/feeds/all.atom.xml +++ b/pelican/tests/output/custom/feeds/all.atom.xml @@ -29,5 +29,11 @@ YEAH !&lt;/p&gt; </summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; </summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/all.rss.xml b/pelican/tests/output/custom/feeds/all.rss.xml --- a/pelican/tests/output/custom/feeds/all.rss.xml +++ b/pelican/tests/output/custom/feeds/all.rss.xml @@ -29,5 +29,11 @@ YEAH !&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/misc.atom.xml b/pelican/tests/output/custom/feeds/misc.atom.xml --- a/pelican/tests/output/custom/feeds/misc.atom.xml +++ b/pelican/tests/output/custom/feeds/misc.atom.xml @@ -4,5 +4,11 @@ </summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; </summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/misc.rss.xml b/pelican/tests/output/custom/feeds/misc.rss.xml --- a/pelican/tests/output/custom/feeds/misc.rss.xml +++ b/pelican/tests/output/custom/feeds/misc.rss.xml @@ -4,5 +4,11 @@ </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:second-article.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom/index3.html b/pelican/tests/output/custom/index3.html --- a/pelican/tests/output/custom/index3.html +++ b/pelican/tests/output/custom/index3.html @@ -51,6 +51,12 @@ <h1><a href="./unbelievable.html" rel="bookmark" </footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> <p><a class="reference external" href="./a-markdown-powered-article.html">a root-relative link to markdown-article</a> <a class="reference external" href="./a-markdown-powered-article.html">a file-relative link to markdown-article</a></p> +<div class="section" id="testing-sourcecode-directive"> +<h2>Testing sourcecode directive</h2> +<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +</pre></div> +</td></tr></table><p>Lovely.</p> +</div> <a class="readmore" href="./unbelievable.html">read more</a> <p>There are <a href="./unbelievable.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> diff --git a/pelican/tests/output/custom/unbelievable.html b/pelican/tests/output/custom/unbelievable.html --- a/pelican/tests/output/custom/unbelievable.html +++ b/pelican/tests/output/custom/unbelievable.html @@ -50,6 +50,12 @@ <h1 class="entry-title"> </footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> <p><a class="reference external" href="./a-markdown-powered-article.html">a root-relative link to markdown-article</a> <a class="reference external" href="./a-markdown-powered-article.html">a file-relative link to markdown-article</a></p> +<div class="section" id="testing-sourcecode-directive"> +<h2>Testing sourcecode directive</h2> +<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +</pre></div> +</td></tr></table><p>Lovely.</p> +</div> </div><!-- /.entry-content --> <div class="comments">
"'dict_keys' object does not support indexing" in Python 3 using sourcecode directive When I include a code-block with an option, like this: ``` .. sourcecode:: python :linenos: ... ``` a WARNING appears and the corresponding file is not processed: ``` WARNING: Could not process /home/juanlu/Development/Python/pelican_test/myproject/content/2013-07-14_hello-world.rst 'dict_keys' object does not support indexing ``` The problem is here: https://github.com/getpelican/pelican/blob/master/pelican/rstdirectives.py#L35 and the solution is detailed here: http://stackoverflow.com/questions/8953627/python-dictionary-keys-error I have read the guidelines but, even being a trivial fix: ``` --- rstdirectives.py 2013-07-14 12:41:00.188687997 +0200 +++ rstdirectives.py.new 2013-07-14 12:36:25.982005000 +0200 @@ -32,7 +32,7 @@ # no lexer found - use the text one instead of an exception lexer = TextLexer() # take an arbitrary option if more than one is given - formatter = self.options and VARIANTS[self.options.keys()[0]] \ + formatter = self.options and VARIANTS[list(self.options.keys())[0]] \ or DEFAULT parsed = highlight('\n'.join(self.content), lexer, formatter) return [nodes.raw('', parsed, format='html')] ``` I don't have time to add docs, tests, run the test suite and, summing up, doing it properly. Hence the issue without pull request.
Looks good to me. I'll make this a pull-req and do what is required. Thanks, Russ. You rock!
2013-08-04T08:41:38Z
[]
[]
getpelican/pelican
1,011
getpelican__pelican-1011
[ "866" ]
c3aa85831f34e31cae117ca89089aeb479b922e0
diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -17,6 +17,7 @@ StaticGenerator, SourceFileGenerator, TemplatePagesGenerator) from pelican.log import init +from pelican.readers import Readers from pelican.settings import read_settings from pelican.utils import clean_output_dir, folder_watcher, file_watcher from pelican.writers import Writer @@ -46,7 +47,6 @@ def __init__(self, settings): self.path = settings['PATH'] self.theme = settings['THEME'] self.output_path = settings['OUTPUT_PATH'] - self.markup = settings['MARKUP'] self.ignore_files = settings['IGNORE_FILES'] self.delete_outputdir = settings['DELETE_OUTPUT_DIRECTORY'] self.output_retention = settings['OUTPUT_RETENTION'] @@ -164,7 +164,6 @@ def run(self): path=self.path, theme=self.theme, output_path=self.output_path, - markup=self.markup, ) for cls in self.get_generator_classes() ] @@ -236,10 +235,6 @@ def parse_arguments(): help='Where to output the generated files. If not specified, a ' 'directory will be created, named "output" in the current path.') - parser.add_argument('-m', '--markup', dest='markup', - help='The list of markup language to use (rst or md). Please indicate ' - 'them separated by commas.') - parser.add_argument('-s', '--settings', dest='settings', help='The settings of the application, this is automatically set to ' '{0} if a file exists with this name.'.format(DEFAULT_CONFIG_NAME)) @@ -279,8 +274,6 @@ def get_config(args): if args.output: config['OUTPUT_PATH'] = \ os.path.abspath(os.path.expanduser(args.output)) - if args.markup: - config['MARKUP'] = [a.strip().lower() for a in args.markup.split(',')] if args.theme: abstheme = os.path.abspath(os.path.expanduser(args.theme)) config['THEME'] = abstheme if os.path.exists(abstheme) else args.theme @@ -296,8 +289,6 @@ def get_config(args): for key in config: if key in ('PATH', 'OUTPUT_PATH', 'THEME'): config[key] = config[key].decode(enc) - if key == "MARKUP": - config[key] = [a.decode(enc) for a in config[key]] return config @@ -315,16 +306,17 @@ def get_instance(args): module = __import__(module) cls = getattr(module, cls_name) - return cls(settings) + return cls(settings), settings def main(): args = parse_arguments() init(args.verbosity) - pelican = get_instance(args) + pelican, settings = get_instance(args) + readers = Readers(settings) watchers = {'content': folder_watcher(pelican.path, - pelican.markup, + readers.extensions, pelican.ignore_files), 'theme': folder_watcher(pelican.theme, [''], @@ -333,8 +325,8 @@ def main(): try: if args.autoreload: - print(' --- AutoReload Mode: Monitoring `content`, `theme` and `settings`' - ' for changes. ---') + print(' --- AutoReload Mode: Monitoring `content`, `theme` and' + ' `settings` for changes. ---') while True: try: diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -13,16 +13,13 @@ from itertools import chain, groupby from operator import attrgetter, itemgetter -from jinja2 import ( - Environment, FileSystemLoader, PrefixLoader, ChoiceLoader, BaseLoader, - TemplateNotFound -) +from jinja2 import (Environment, FileSystemLoader, PrefixLoader, ChoiceLoader, + BaseLoader, TemplateNotFound) from pelican.contents import Article, Page, Static, is_valid_content -from pelican.readers import read_file +from pelican.readers import Readers from pelican.utils import copy, process_translations, mkdir_p, DateFormatter from pelican import signals -import pelican.utils logger = logging.getLogger(__name__) @@ -31,23 +28,23 @@ class Generator(object): """Baseclass generator""" - def __init__(self, context, settings, path, theme, output_path, markup, - **kwargs): + def __init__(self, context, settings, path, theme, output_path, **kwargs): self.context = context self.settings = settings self.path = path self.theme = theme self.output_path = output_path - self.markup = markup for arg, value in kwargs.items(): setattr(self, arg, value) + self.readers = Readers(self.settings) + # templates cache self._templates = {} self._templates_path = [] self._templates_path.append(os.path.expanduser( - os.path.join(self.theme, 'templates'))) + os.path.join(self.theme, 'templates'))) self._templates_path += self.settings['EXTRA_TEMPLATES_PATHS'] theme_path = os.path.dirname(os.path.abspath(__file__)) @@ -85,9 +82,8 @@ def get_template(self, name): try: self._templates[name] = self.env.get_template(name + '.html') except TemplateNotFound: - raise Exception( - ('[templates] unable to load %s.html from %s' - % (name, self._templates_path))) + raise Exception('[templates] unable to load %s.html from %s' + % (name, self._templates_path)) return self._templates[name] def _include_path(self, path, extensions=None): @@ -98,7 +94,7 @@ def _include_path(self, path, extensions=None): extensions are allowed) """ if extensions is None: - extensions = tuple(self.markup) + extensions = tuple(self.readers.extensions) basename = os.path.basename(path) if extensions is False or basename.endswith(extensions): return True @@ -388,9 +384,9 @@ def generate_context(self): self.settings['ARTICLE_DIR'], exclude=self.settings['ARTICLE_EXCLUDES']): try: - article = read_file( + article = self.readers.read_file( base_path=self.path, path=f, content_class=Article, - settings=self.settings, context=self.context, + context=self.context, preread_signal=signals.article_generator_preread, preread_sender=self, context_signal=signals.article_generator_context, @@ -496,9 +492,9 @@ def generate_context(self): self.settings['PAGE_DIR'], exclude=self.settings['PAGE_EXCLUDES']): try: - page = read_file( + page = self.readers.read_file( base_path=self.path, path=f, content_class=Page, - settings=self.settings, context=self.context, + context=self.context, preread_signal=signals.page_generator_preread, preread_sender=self, context_signal=signals.page_generator_context, @@ -557,10 +553,9 @@ def generate_context(self): for static_path in self.settings['STATIC_PATHS']: for f in self.get_files( static_path, extensions=False): - static = read_file( + static = self.readers.read_file( base_path=self.path, path=f, content_class=Static, - fmt='static', - settings=self.settings, context=self.context, + fmt='static', context=self.context, preread_signal=signals.static_generator_preread, preread_sender=self, context_signal=signals.static_generator_context, diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -34,10 +34,10 @@ except ImportError: from HTMLParser import HTMLParser +from pelican import signals from pelican.contents import Page, Category, Tag, Author from pelican.utils import get_date, pelican_open -logger = logging.getLogger(__name__) METADATA_PROCESSORS = { 'tags': lambda x, y: [Tag(tag, y) for tag in x.split(',')], @@ -50,7 +50,19 @@ logger = logging.getLogger(__name__) -class Reader(object): +class BaseReader(object): + """Base class to read files. + + This class is used to process static files, and it can be inherited for + other types of file. A Reader class must have the following attributes: + + - enabled: (boolean) tell if the Reader class is enabled. It + generally depends on the import of some dependency. + - file_extensions: a list of file extensions that the Reader will process. + - extensions: a list of extensions to use in the reader (typical use is + Markdown). + + """ enabled = True file_extensions = ['static'] extensions = None @@ -110,7 +122,9 @@ def visit_image(self, node): return HTMLTranslator.visit_image(self, node) -class RstReader(Reader): +class RstReader(BaseReader): + """Reader for reStructuredText files""" + enabled = bool(docutils) file_extensions = ['rst'] @@ -166,7 +180,9 @@ def read(self, source_path): return content, metadata -class MarkdownReader(Reader): +class MarkdownReader(BaseReader): + """Reader for Markdown files""" + enabled = bool(Markdown) file_extensions = ['md', 'markdown', 'mkd', 'mdown'] @@ -174,7 +190,6 @@ def __init__(self, *args, **kwargs): super(MarkdownReader, self).__init__(*args, **kwargs) self.extensions = self.settings['MD_EXTENSIONS'] self.extensions.append('meta') - self._md = Markdown(extensions=self.extensions) def _parse_metadata(self, meta): """Return the dict containing document metadata""" @@ -194,6 +209,7 @@ def _parse_metadata(self, meta): def read(self, source_path): """Parse content and metadata of markdown files""" + self._md = Markdown(extensions=self.extensions) with pelican_open(source_path) as text: content = self._md.convert(text) @@ -201,8 +217,9 @@ def read(self, source_path): return content, metadata -class HTMLReader(Reader): +class HTMLReader(BaseReader): """Parses HTML files as input, looking for meta, title, and body tags""" + file_extensions = ['htm', 'html'] enabled = True @@ -312,7 +329,9 @@ def read(self, filename): return parser.body, metadata -class AsciiDocReader(Reader): +class AsciiDocReader(BaseReader): + """Reader for AsciiDoc files""" + enabled = bool(asciidoc) file_extensions = ['asc'] default_options = ["--no-header-footer", "-a newline=\\n"] @@ -344,109 +363,142 @@ def read(self, source_path): return content, metadata -EXTENSIONS = {} +class Readers(object): + """Interface for all readers. + + This class contains a mapping of file extensions / Reader classes, to know + which Reader class must be used to read a file (based on its extension). + This is customizable both with the 'READERS' setting, and with the + 'readers_init' signall for plugins. + + """ + def __init__(self, settings=None): + self.settings = settings or {} + self.readers = {} + self.reader_classes = {} + + for cls in [BaseReader] + BaseReader.__subclasses__(): + for ext in cls.file_extensions: + self.reader_classes[ext] = cls + + if self.settings['READERS']: + self.reader_classes.update(self.settings['READERS']) -for cls in [Reader] + Reader.__subclasses__(): - for ext in cls.file_extensions: - EXTENSIONS[ext] = cls + signals.readers_init.send(self) + for fmt, reader_class in self.reader_classes.items(): + if not reader_class: + continue -def read_file(base_path, path, content_class=Page, fmt=None, - settings=None, context=None, - preread_signal=None, preread_sender=None, - context_signal=None, context_sender=None): - """Return a content object parsed with the given format.""" - path = os.path.abspath(os.path.join(base_path, path)) - source_path = os.path.relpath(path, base_path) - base, ext = os.path.splitext(os.path.basename(path)) - logger.debug('read file {} -> {}'.format( + if not reader_class.enabled: + logger.warning('Missing dependencies for {}'.format(fmt)) + continue + + self.readers[fmt] = reader_class(self.settings) + + settings_key = '%s_EXTENSIONS' % fmt.upper() + + if settings_key in self.settings: + self.readers[fmt].extensions = self.settings[settings_key] + + @property + def extensions(self): + return self.readers.keys() + + def read_file(self, base_path, path, content_class=Page, fmt=None, + context=None, preread_signal=None, preread_sender=None, + context_signal=None, context_sender=None): + """Return a content object parsed with the given format.""" + + path = os.path.abspath(os.path.join(base_path, path)) + source_path = os.path.relpath(path, base_path) + logger.debug('read file {} -> {}'.format( source_path, content_class.__name__)) - if not fmt: - fmt = ext[1:] - if fmt not in EXTENSIONS: - raise TypeError('Pelican does not know how to parse {}'.format(path)) + if not fmt: + _, ext = os.path.splitext(os.path.basename(path)) + fmt = ext[1:] - if preread_signal: - logger.debug('signal {}.send({})'.format( + if fmt not in self.readers: + raise TypeError( + 'Pelican does not know how to parse {}'.format(path)) + + if preread_signal: + logger.debug('signal {}.send({})'.format( preread_signal, preread_sender)) - preread_signal.send(preread_sender) + preread_signal.send(preread_sender) - if settings is None: - settings = {} + reader = self.readers[fmt] - reader_class = EXTENSIONS[fmt] - if not reader_class.enabled: - raise ValueError('Missing dependencies for {}'.format(fmt)) + metadata = default_metadata( + settings=self.settings, process=reader.process_metadata) + metadata.update(path_metadata( + full_path=path, source_path=source_path, + settings=self.settings)) + metadata.update(parse_path_metadata( + source_path=source_path, settings=self.settings, + process=reader.process_metadata)) - reader = reader_class(settings) + content, reader_metadata = reader.read(path) + metadata.update(reader_metadata) - settings_key = '%s_EXTENSIONS' % fmt.upper() + if content: + # find images with empty alt + find_empty_alt(content, path) - if settings and settings_key in settings: - reader.extensions = settings[settings_key] + # eventually filter the content with typogrify if asked so + if content and self.settings['TYPOGRIFY']: + from typogrify.filters import typogrify + content = typogrify(content) + metadata['title'] = typogrify(metadata['title']) - metadata = default_metadata( - settings=settings, process=reader.process_metadata) - metadata.update(path_metadata( - full_path=path, source_path=source_path, settings=settings)) - metadata.update(parse_path_metadata( - source_path=source_path, settings=settings, - process=reader.process_metadata)) - content, reader_metadata = reader.read(path) - metadata.update(reader_metadata) - - # create warnings for all images with empty alt (up to a certain number) - # as they are really likely to be accessibility flaws - if content: - # find images with empty alt - imgs = re.compile(r""" - (?: - # src before alt - <img - [^\>]* - src=(['"])(.*)\1 - [^\>]* - alt=(['"])\3 - )|(?: - # alt before src - <img - [^\>]* - alt=(['"])\4 - [^\>]* - src=(['"])(.*)\5 - ) - """, re.X) - matches = re.findall(imgs, content) - # find a correct threshold - nb_warnings = 10 - if len(matches) == nb_warnings + 1: - nb_warnings += 1 # avoid bad looking case - # print one warning per image with empty alt until threshold - for match in matches[:nb_warnings]: - logger.warning('Empty alt attribute for image {} in {}'.format( - os.path.basename(match[1] + match[5]), path)) - # print one warning for the other images with empty alt - if len(matches) > nb_warnings: - logger.warning('{} other images with empty alt attributes'.format( - len(matches) - nb_warnings)) - - # eventually filter the content with typogrify if asked so - if content and settings and settings['TYPOGRIFY']: - from typogrify.filters import typogrify - content = typogrify(content) - metadata['title'] = typogrify(metadata['title']) - - if context_signal: - logger.debug('signal {}.send({}, <metadata>)'.format( + if context_signal: + logger.debug('signal {}.send({}, <metadata>)'.format( context_signal, context_sender)) - context_signal.send(context_sender, metadata=metadata) - return content_class( - content=content, - metadata=metadata, - settings=settings, - source_path=path, - context=context) + context_signal.send(context_sender, metadata=metadata) + + return content_class(content=content, metadata=metadata, + settings=self.settings, source_path=path, + context=context) + + +def find_empty_alt(content, path): + """Find images with empty alt + + Create warnings for all images with empty alt (up to a certain number), + as they are really likely to be accessibility flaws. + + """ + imgs = re.compile(r""" + (?: + # src before alt + <img + [^\>]* + src=(['"])(.*)\1 + [^\>]* + alt=(['"])\3 + )|(?: + # alt before src + <img + [^\>]* + alt=(['"])\4 + [^\>]* + src=(['"])(.*)\5 + ) + """, re.X) + matches = re.findall(imgs, content) + # find a correct threshold + nb_warnings = 10 + if len(matches) == nb_warnings + 1: + nb_warnings += 1 # avoid bad looking case + # print one warning per image with empty alt until threshold + for match in matches[:nb_warnings]: + logger.warning('Empty alt attribute for image {} in {}'.format( + os.path.basename(match[1] + match[5]), path)) + # print one warning for the other images with empty alt + if len(matches) > nb_warnings: + logger.warning('{} other images with empty alt attributes' + .format(len(matches) - nb_warnings)) def default_metadata(settings=None, process=None): @@ -469,7 +521,7 @@ def path_metadata(full_path, source_path, settings=None): metadata['date'] = datetime.datetime.fromtimestamp( os.stat(full_path).st_ctime) metadata.update(settings.get('EXTRA_PATH_METADATA', {}).get( - source_path, {})) + source_path, {})) return metadata @@ -482,7 +534,7 @@ def parse_path_metadata(source_path, settings=None, process=None): ... 'PATH_METADATA': ... '(?P<category>[^/]*)/(?P<date>\d{4}-\d{2}-\d{2})/.*', ... } - >>> reader = Reader(settings=settings) + >>> reader = BaseReader(settings=settings) >>> metadata = parse_path_metadata( ... source_path='my-cat/2013-01-01/my-slug.html', ... settings=settings, @@ -498,13 +550,12 @@ def parse_path_metadata(source_path, settings=None, process=None): subdir = os.path.basename(dirname) if settings: checks = [] - for key,data in [('FILENAME_METADATA', base), - ('PATH_METADATA', source_path), - ]: + for key, data in [('FILENAME_METADATA', base), + ('PATH_METADATA', source_path)]: checks.append((settings.get(key, None), data)) if settings.get('USE_FOLDER_AS_CATEGORY', None): checks.insert(0, ('(?P<category>.*)', subdir)) - for regexp,data in checks: + for regexp, data in checks: if regexp and data: match = re.match(regexp, data) if match: diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -33,7 +33,7 @@ 'PAGE_EXCLUDES': (), 'THEME': DEFAULT_THEME, 'OUTPUT_PATH': 'output', - 'MARKUP': ('rst', 'md'), + 'READERS': {}, 'STATIC_PATHS': ['images', ], 'THEME_STATIC_DIR': 'theme', 'THEME_STATIC_PATHS': ['static', ], @@ -112,6 +112,7 @@ 'SLUG_SUBSTITUTIONS': (), } + def read_settings(path=None, override=None): if path: local_settings = get_settings_from_file(path) @@ -120,7 +121,7 @@ def read_settings(path=None, override=None): if p in local_settings and local_settings[p] is not None \ and not isabs(local_settings[p]): absp = os.path.abspath(os.path.normpath(os.path.join( - os.path.dirname(path), local_settings[p]))) + os.path.dirname(path), local_settings[p]))) if p not in ('THEME', 'PLUGIN_PATH') or os.path.exists(absp): local_settings[p] = absp else: @@ -138,7 +139,7 @@ def get_settings_from_module(module=None, default_settings=DEFAULT_CONFIG): context = copy.deepcopy(default_settings) if module is not None: context.update( - (k, v) for k, v in inspect.getmembers(module) if k.isupper()) + (k, v) for k, v in inspect.getmembers(module) if k.isupper()) return context @@ -221,17 +222,18 @@ def configure_settings(settings): settings['FEED_DOMAIN'] = settings['SITEURL'] # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined - feed_keys = ['FEED_ATOM', 'FEED_RSS', - 'FEED_ALL_ATOM', 'FEED_ALL_RSS', - 'CATEGORY_FEED_ATOM', 'CATEGORY_FEED_RSS', - 'TAG_FEED_ATOM', 'TAG_FEED_RSS', - 'TRANSLATION_FEED_ATOM', 'TRANSLATION_FEED_RSS', - ] + feed_keys = [ + 'FEED_ATOM', 'FEED_RSS', + 'FEED_ALL_ATOM', 'FEED_ALL_RSS', + 'CATEGORY_FEED_ATOM', 'CATEGORY_FEED_RSS', + 'TAG_FEED_ATOM', 'TAG_FEED_RSS', + 'TRANSLATION_FEED_ATOM', 'TRANSLATION_FEED_RSS', + ] if any(settings.get(k) for k in feed_keys): if not settings.get('SITEURL'): - logger.warning('Feeds generated without SITEURL set properly may not' - ' be valid') + logger.warning('Feeds generated without SITEURL set properly may' + ' not be valid') if not 'TIMEZONE' in settings: logger.warning( @@ -255,26 +257,26 @@ def configure_settings(settings): # Save people from accidentally setting a string rather than a list path_keys = ( - 'ARTICLE_EXCLUDES', - 'DEFAULT_METADATA', - 'DIRECT_TEMPLATES', - 'EXTRA_TEMPLATES_PATHS', - 'FILES_TO_COPY', - 'IGNORE_FILES', - 'JINJA_EXTENSIONS', - 'MARKUP', - 'PAGINATED_DIRECT_TEMPLATES', - 'PLUGINS', - 'STATIC_PATHS', - 'THEME_STATIC_PATHS',) + 'ARTICLE_EXCLUDES', + 'DEFAULT_METADATA', + 'DIRECT_TEMPLATES', + 'EXTRA_TEMPLATES_PATHS', + 'FILES_TO_COPY', + 'IGNORE_FILES', + 'JINJA_EXTENSIONS', + 'PAGINATED_DIRECT_TEMPLATES', + 'PLUGINS', + 'STATIC_PATHS', + 'THEME_STATIC_PATHS', + ) for PATH_KEY in filter(lambda k: k in settings, path_keys): if isinstance(settings[PATH_KEY], six.string_types): - logger.warning("Detected misconfiguration with %s setting (must " - "be a list), falling back to the default" - % PATH_KEY) + logger.warning("Detected misconfiguration with %s setting " + "(must be a list), falling back to the default" + % PATH_KEY) settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY] - for old,new,doc in [ + for old, new, doc in [ ('LESS_GENERATOR', 'the Webassets plugin', None), ('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA', 'https://github.com/getpelican/pelican/blob/master/docs/settings.rst#path-metadata'), diff --git a/pelican/signals.py b/pelican/signals.py --- a/pelican/signals.py +++ b/pelican/signals.py @@ -8,6 +8,10 @@ get_generators = signal('get_generators') finalized = signal('pelican_finalized') +# Reader-level signals + +readers_init = signal('readers_init') + # Generator-level signals generator_init = signal('generator_init')
diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py --- a/pelican/tests/test_generators.py +++ b/pelican/tests/test_generators.py @@ -19,9 +19,9 @@ class TestGenerator(unittest.TestCase): def setUp(self): self.settings = get_settings() + self.settings['READERS'] = {'asc': None} self.generator = Generator(self.settings.copy(), self.settings, - CUR_DIR, self.settings['THEME'], None, - self.settings['MARKUP']) + CUR_DIR, self.settings['THEME'], None) def test_include_path(self): filename = os.path.join(CUR_DIR, 'content', 'article.rst') @@ -30,10 +30,6 @@ def test_include_path(self): self.assertTrue(include_path(filename, extensions=('rst',))) self.assertFalse(include_path(filename, extensions=('md',))) - # markup must be a tuple, test that this works also with a list - self.generator.markup = ['rst', 'md'] - self.assertTrue(include_path(filename)) - class TestArticlesGenerator(unittest.TestCase): @@ -42,11 +38,11 @@ def setUpClass(cls): settings = get_settings(filenames={}) settings['DEFAULT_CATEGORY'] = 'Default' settings['DEFAULT_DATE'] = (1970, 1, 1) + settings['READERS'] = {'asc': None} cls.generator = ArticlesGenerator( context=settings.copy(), settings=settings, - path=CONTENT_DIR, theme=settings['THEME'], - output_path=None, markup=settings['MARKUP']) + path=CONTENT_DIR, theme=settings['THEME'], output_path=None) cls.generator.generate_context() cls.articles = [[page.title, page.status, page.category.name, page.template] for page in cls.generator.articles] @@ -55,8 +51,7 @@ def test_generate_feeds(self): settings = get_settings() generator = ArticlesGenerator( context=settings, settings=settings, - path=None, theme=settings['THEME'], - output_path=None, markup=settings['MARKUP']) + path=None, theme=settings['THEME'], output_path=None) writer = MagicMock() generator.generate_feeds(writer) writer.write_feed.assert_called_with([], settings, @@ -64,8 +59,7 @@ def test_generate_feeds(self): generator = ArticlesGenerator( context=settings, settings=get_settings(FEED_ALL_ATOM=None), - path=None, theme=settings['THEME'], - output_path=None, markup=None) + path=None, theme=settings['THEME'], output_path=None) writer = MagicMock() generator.generate_feeds(writer) self.assertFalse(writer.write_feed.called) @@ -74,26 +68,33 @@ def test_generate_context(self): articles_expected = [ ['Article title', 'published', 'Default', 'article'], + ['Article with markdown and summary metadata multi', 'published', + 'Default', 'article'], ['Article with markdown and summary metadata single', 'published', 'Default', 'article'], - ['Article with markdown and summary metadata multi', 'published', + ['Article with markdown containing footnotes', 'published', 'Default', 'article'], ['Article with template', 'published', 'Default', 'custom'], - ['Test md File', 'published', 'test', 'article'], ['Rst with filename metadata', 'published', 'yeah', 'article'], ['Test Markdown extensions', 'published', 'Default', 'article'], + ['Test markdown File', 'published', 'test', 'article'], + ['Test md File', 'published', 'test', 'article'], + ['Test mdown File', 'published', 'test', 'article'], + ['Test mkd File', 'published', 'test', 'article'], ['This is a super article !', 'published', 'Yeah', 'article'], + ['This is a super article !', 'published', 'Yeah', 'article'], + ['This is a super article !', 'published', 'yeah', 'article'], + ['This is a super article !', 'published', 'yeah', 'article'], + ['This is a super article !', 'published', 'yeah', 'article'], + ['This is a super article !', 'published', 'Default', 'article'], ['This is an article with category !', 'published', 'yeah', - 'article'], + 'article'], ['This is an article without category !', 'published', 'Default', 'article'], ['This is an article without category !', 'published', 'TestCategory', 'article'], - ['This is a super article !', 'published', 'yeah', 'article'], - ['マックOS X 10.8でパイソンとVirtualenvをインストールと設定', - 'published', '指導書', 'article'], - ['Article with markdown containing footnotes', 'published', - 'Default', 'article'] + ['マックOS X 10.8でパイソンとVirtualenvをインストールと設定', 'published', + '指導書', 'article'], ] self.assertEqual(sorted(articles_expected), sorted(self.articles)) @@ -121,11 +122,11 @@ def test_do_not_use_folder_as_category(self): settings['DEFAULT_CATEGORY'] = 'Default' settings['DEFAULT_DATE'] = (1970, 1, 1) settings['USE_FOLDER_AS_CATEGORY'] = False + settings['READERS'] = {'asc': None} settings['filenames'] = {} generator = ArticlesGenerator( context=settings.copy(), settings=settings, - path=CONTENT_DIR, theme=settings['THEME'], - output_path=None, markup=settings['MARKUP']) + path=CONTENT_DIR, theme=settings['THEME'], output_path=None) generator.generate_context() # test for name # categories are grouped by slug; if two categories have the same slug @@ -147,8 +148,7 @@ def test_direct_templates_save_as_default(self): settings = get_settings(filenames={}) generator = ArticlesGenerator( context=settings, settings=settings, - path=None, theme=settings['THEME'], - output_path=None, markup=settings['MARKUP']) + path=None, theme=settings['THEME'], output_path=None) write = MagicMock() generator.generate_direct_templates(write) write.assert_called_with("archives.html", @@ -162,8 +162,7 @@ def test_direct_templates_save_as_modified(self): settings['ARCHIVES_SAVE_AS'] = 'archives/index.html' generator = ArticlesGenerator( context=settings, settings=settings, - path=None, theme=settings['THEME'], - output_path=None, markup=settings['MARKUP']) + path=None, theme=settings['THEME'], output_path=None) write = MagicMock() generator.generate_direct_templates(write) write.assert_called_with("archives/index.html", @@ -178,8 +177,7 @@ def test_direct_templates_save_as_false(self): settings['ARCHIVES_SAVE_AS'] = 'archives/index.html' generator = ArticlesGenerator( context=settings, settings=settings, - path=None, theme=settings['THEME'], - output_path=None, markup=settings['MARKUP']) + path=None, theme=settings['THEME'], output_path=None) write = MagicMock() generator.generate_direct_templates(write) write.assert_called_count == 0 @@ -212,8 +210,7 @@ def test_generate_context(self): generator = PagesGenerator( context=settings.copy(), settings=settings, - path=CUR_DIR, theme=settings['THEME'], - output_path=None, markup=settings['MARKUP']) + path=CUR_DIR, theme=settings['THEME'], output_path=None) generator.generate_context() pages = self.distill_pages(generator.pages) hidden_pages = self.distill_pages(generator.hidden_pages) @@ -252,13 +249,12 @@ def test_generate_output(self): settings = get_settings() settings['STATIC_PATHS'] = ['static'] settings['TEMPLATE_PAGES'] = { - 'template/source.html': 'generated/file.html' - } + 'template/source.html': 'generated/file.html' + } generator = TemplatePagesGenerator( context={'foo': 'bar'}, settings=settings, - path=self.temp_content, theme='', - output_path=self.temp_output, markup=None) + path=self.temp_content, theme='', output_path=self.temp_output) # create a dummy template file template_dir = os.path.join(self.temp_content, 'template') diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -19,8 +19,8 @@ class ReaderTest(unittest.TestCase): def read_file(self, path, **kwargs): # Isolate from future API changes to readers.read_file - return readers.read_file( - base_path=CONTENT_PATH, path=path, settings=get_settings(**kwargs)) + r = readers.Readers(settings=get_settings(**kwargs)) + return r.read_file(base_path=CONTENT_PATH, path=path) class RstReaderTest(ReaderTest): @@ -160,7 +160,7 @@ def test_article_with_footnote(self): ' with some footnotes' '<sup id="fnref:footnote"><a class="footnote-ref" ' 'href="#fn:footnote" rel="footnote">2</a></sup></p>\n' - + '<div class="footnote">\n' '<hr />\n<ol>\n<li id="fn:1">\n' '<p>Numbered footnote&#160;' diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py --- a/pelican/tests/test_utils.py +++ b/pelican/tests/test_utils.py @@ -353,12 +353,13 @@ def tearDown(self): 'French locale needed') def test_french_locale(self): settings = read_settings( - override = {'LOCALE': locale.normalize('fr_FR.UTF-8'), - 'TEMPLATE_PAGES': {'template/source.html': - 'generated/file.html'}}) + override={'LOCALE': locale.normalize('fr_FR.UTF-8'), + 'TEMPLATE_PAGES': {'template/source.html': + 'generated/file.html'}}) - generator = TemplatePagesGenerator({'date': self.date}, settings, - self.temp_content, '', self.temp_output, None) + generator = TemplatePagesGenerator( + {'date': self.date}, settings, + self.temp_content, '', self.temp_output) generator.env.filters.update({'strftime': utils.DateFormatter()}) writer = Writer(self.temp_output, settings=settings) @@ -385,8 +386,9 @@ def test_turkish_locale(self): 'TEMPLATE_PAGES': {'template/source.html': 'generated/file.html'}}) - generator = TemplatePagesGenerator({'date': self.date}, settings, - self.temp_content, '', self.temp_output, None) + generator = TemplatePagesGenerator( + {'date': self.date}, settings, + self.temp_content, '', self.temp_output) generator.env.filters.update({'strftime': utils.DateFormatter()}) writer = Writer(self.temp_output, settings=settings)
Default markup formats Issue #751 Explain to beginners of Pelican that .markdown is not supported by default and that they will need to add the MARKUP() setting to their config file if they want to use it.
I think this is a bad solution, instead of telling the user's about this. We should just solve the real issue instead of describing the issue. ``` diff - 'MARKUP': ('rst', 'md'), + 'MARKUP': tuple(RstReader.file_extensions + MarkdownReader.file_extensions), ``` Once I started doing that, I was thinking. Why does the `MARKUP` setting even exist, I think we should have something like: ``` python READERS = [ MarkdownReader, RstReader ] ``` Then abandon `MARKUP` completely? @kylef, I agree with you. Partially :). I don't know what's the use of selecting (or disabling) any reader. They should be enabled by default. We can move `.file_extensions` class variables to settings and let people customize that if necessary. Something like this: ``` RST_FILE_EXT = ['rst'] MD_FILE_EXT = ['md', 'mdown', 'markdown', 'mkd'] HTML_FILE_EXT = ['htm', 'html'] ASCII_FILE_EXT = ['asc'] ``` Not the best variable names perhaps but current `MD_EXTENSIONS` setting limits the choices in order to avoid confusion. I think this exists so you can disable certain renderer's, such as HTML. Currently, `MARKUP` won't work if you set something Pelican doesn't understand. Reader.file_extensions is used to make the actual match from an extension to reader. Having a `READERS` setting is far more modular, so I can actually subclass or add a new reader not supported by Pelican. Decoupling the extensions from the actual reader class to a setting makes it less self-contained too. Wouldn't adding or extending a reader best done by a plugin? Lol, this all started because I spent an hour or two trying to figure out why Pelican wasn't adding any content I wrote! Whatever happens, by enabling Markdown it should enable at least .md and .markdown extensions (might as well do the other two also). I agree, @kylef that sounds like the best option we have. I'm not sure we need this setting anymore. It was here in the first place to filter some content you wouldn't want to put there, but I don't think that's valuable anymore. Thanks for chiming in, @ametaireau. Folks asked me why the explicit content format definition was required in the first place, and since I didn't have a good answer, I was hoping you would join the discussion. I thought perhaps it was somehow due to performance considerations, but I'm gathering that's not the case. I agree that we should eliminate this source of confusion for folks, who clearly expect that supported formats will be processed by default instead of being silently ignored. That said, couldn't this be most easily solved by changing the default value of MARKUP to all the supported formats? The advantage of this approach is that users can, if they wish, "turn off" some formats and only have certain filetypes processed — which it seems is why this setting exists in the first place. I like @kylef's solution. It makes it easy to plugin new readers and plug-out ones you don't use.
2013-08-04T18:33:50Z
[]
[]
getpelican/pelican
1,123
getpelican__pelican-1123
[ "1117" ]
bc94c6b6e38ecd293ba81b568fa570857ce65ccb
diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -189,8 +189,8 @@ def _update_content(self, content, siteurl): instrasite_link_regex = self.settings['INTRASITE_LINK_REGEX'] regex = r""" - (?P<markup><\s*[^\>]* # match tag with src and href attr - (?:href|src)\s*=) + (?P<markup><\s*[^\>]* # match tag with all url-value attributes + (?:href|src|poster|data|cite|formaction|action)\s*=) (?P<quote>["\']) # require value to be quoted (?P<path>{0}(?P<value>.*?)) # the url value
diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -268,6 +268,61 @@ def test_intrasite_link(self): '?utm_whatever=234&highlight=word#section-2">link</a>' ) + def test_intrasite_link_more(self): + # type does not take unicode in PY2 and bytes in PY3, which in + # combination with unicode literals leads to following insane line: + cls_name = '_DummyAsset' if six.PY3 else b'_DummyAsset' + + args = self.page_kwargs.copy() + args['settings'] = get_settings() + args['source_path'] = 'content' + args['context']['filenames'] = { + 'images/poster.jpg': type(cls_name, (object,), {'url': 'images/poster.jpg'}), + 'assets/video.mp4': type(cls_name, (object,), {'url': 'assets/video.mp4'}), + 'images/graph.svg': type(cls_name, (object,), {'url': 'images/graph.svg'}), + 'reference.rst': type(cls_name, (object,), {'url': 'reference.html'}), + } + + # video.poster + args['content'] = ( + 'There is a video with poster ' + '<video controls poster="{filename}/images/poster.jpg">' + '<source src="|filename|/assets/video.mp4" type="video/mp4">' + '</video>' + ) + content = Page(**args).get_content('http://notmyidea.org') + self.assertEqual( + content, + 'There is a video with poster ' + '<video controls poster="http://notmyidea.org/images/poster.jpg">' + '<source src="http://notmyidea.org/assets/video.mp4" type="video/mp4">' + '</video>' + ) + + # object.data + args['content'] = ( + 'There is a svg object ' + '<object data="{filename}/images/graph.svg" type="image/svg+xml"></object>' + ) + content = Page(**args).get_content('http://notmyidea.org') + self.assertEqual( + content, + 'There is a svg object ' + '<object data="http://notmyidea.org/images/graph.svg" type="image/svg+xml"></object>' + ) + + # blockquote.cite + args['content'] = ( + 'There is a blockquote with cite attribute ' + '<blockquote cite="{filename}reference.rst">blah blah</blockquote>' + ) + content = Page(**args).get_content('http://notmyidea.org') + self.assertEqual( + content, + 'There is a blockquote with cite attribute ' + '<blockquote cite="http://notmyidea.org/reference.html">blah blah</blockquote>' + ) + class TestArticle(TestPage): def test_template(self):
Internal content link doesn't work on video.poster I'm using pelican 3.3.0, with python 2.7.5. The [internal content link](http://docs.getpelican.com/en/3.3.0/getting_started.html#linking-to-internal-content) feature is very useful that I don't need to determine where attachment files will be placed after site generation. But recently I find that this feature has no effect on poster property of a video tag. For example, I wrote this in my post.md: ``` html <video controls poster="{filename}/images/2011/07/poster.jpg"> <source src="{filename}/assets/2011/07/video.mp4" type="video/mp4"> </video> ``` After generation, I got this html code in my page: ``` html <video controls poster="{filename}/images/2011/07/poster.jpg"> <source src="/assets/2011/07/video.mp4" type="video/mp4"> </video> ``` As you can see, the `{filename}` still there, it should be replace to an empty string in this case.
The internal content linking feature was meant for... linking. ;^) So indeed, the use case you described is outside the scope of what was originally envisioned. That said, I can certainly understand what you're trying to do here and why a more general-purpose function would benefit alternative use cases (e.g., poster frames). Contributions to that effect would be welcome. Is this something you would like to work on? Thanks @justinmayer for the explanation. I'm not sure whether I could contribute to this, i'm just a newbie to Pelican :) I'd like to have a try, if i have time.
2013-10-16T09:11:23Z
[]
[]
getpelican/pelican
1,201
getpelican__pelican-1201
[ "1107" ]
58e817cb0fa1b271201bd23680d2c60a8cdfb692
diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -114,9 +114,10 @@ def _handle_deprecation(self): structure = re.sub('^/', '', structure) for setting in ('ARTICLE_URL', 'ARTICLE_LANG_URL', 'PAGE_URL', - 'PAGE_LANG_URL', 'ARTICLE_SAVE_AS', - 'ARTICLE_LANG_SAVE_AS', 'PAGE_SAVE_AS', - 'PAGE_LANG_SAVE_AS'): + 'PAGE_LANG_URL', 'DRAFT_URL', 'DRAFT_LANG_URL', + 'ARTICLE_SAVE_AS', 'ARTICLE_LANG_SAVE_AS', + 'DRAFT_SAVE_AS', 'DRAFT_LANG_SAVE_AS', + 'PAGE_SAVE_AS', 'PAGE_LANG_SAVE_AS'): self.settings[setting] = os.path.join(structure, self.settings[setting]) logger.warning("%s = '%s'" % (setting, self.settings[setting])) @@ -174,8 +175,11 @@ def run(self): pages_generator = next(g for g in generators if isinstance(g, PagesGenerator)) - print('Done: Processed {} articles and {} pages in {:.2f} seconds.'.format( + print('Done: Processed {} article(s), {} draft(s) and {} page(s) in ' \ + '{:.2f} seconds.'.format( len(articles_generator.articles) + len(articles_generator.translations), + len(articles_generator.drafts) + \ + len(articles_generator.drafts_translations), len(pages_generator.pages) + len(pages_generator.translations), time.time() - start_time)) diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -328,6 +328,11 @@ class Article(Page): default_template = 'article' +class Draft(Page): + mandatory_properties = ('title', 'category') + default_template = 'article' + + class Quote(Page): base_properties = ('author', 'date') diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -18,7 +18,7 @@ from jinja2 import (Environment, FileSystemLoader, PrefixLoader, ChoiceLoader, BaseLoader, TemplateNotFound) -from pelican.contents import Article, Page, Static, is_valid_content +from pelican.contents import Article, Draft, Page, Static, is_valid_content from pelican.readers import Readers from pelican.utils import copy, process_translations, mkdir_p, DateFormatter from pelican import signals @@ -190,7 +190,8 @@ def __init__(self, *args, **kwargs): self.categories = defaultdict(list) self.related_posts = [] self.authors = defaultdict(list) - self.drafts = [] + self.drafts = [] # only drafts in default language + self.drafts_translations = [] super(ArticlesGenerator, self).__init__(*args, **kwargs) signals.article_generator_init.send(self) @@ -376,11 +377,11 @@ def generate_authors(self, write): def generate_drafts(self, write): """Generate drafts pages.""" - for article in self.drafts: - write(os.path.join('drafts', '%s.html' % article.slug), - self.get_template(article.template), self.context, - article=article, category=article.category, - all_articles=self.articles) + for draft in chain(self.drafts_translations, self.drafts): + write(draft.save_as, self.get_template(draft.template), + self.context, article=draft, category=draft.category, + override_output=hasattr(draft, 'override_save_as'), + all_articles=self.articles) def generate_pages(self, writer): """Generate the pages on the disk""" @@ -403,6 +404,7 @@ def generate_context(self): """Add the articles into the shared context""" all_articles = [] + all_drafts = [] for f in self.get_files( self.settings['ARTICLE_DIR'], exclude=self.settings['ARTICLE_EXCLUDES']): @@ -426,13 +428,22 @@ def generate_context(self): if article.status.lower() == "published": all_articles.append(article) elif article.status.lower() == "draft": - self.drafts.append(article) + draft = self.readers.read_file( + base_path=self.path, path=f, content_class=Draft, + context=self.context, + preread_signal=signals.article_generator_preread, + preread_sender=self, + context_signal=signals.article_generator_context, + context_sender=self) + all_drafts.append(draft) else: logger.warning("Unknown status %s for file %s, skipping it." % (repr(article.status), repr(f))) self.articles, self.translations = process_translations(all_articles) + self.drafts, self.drafts_translations = \ + process_translations(all_drafts) signals.article_generator_pretaxonomy.send(self) diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -59,6 +59,10 @@ 'ARTICLE_SAVE_AS': '{slug}.html', 'ARTICLE_LANG_URL': '{slug}-{lang}.html', 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html', + 'DRAFT_URL': 'drafts/{slug}.html', + 'DRAFT_SAVE_AS': os.path.join('drafts', '{slug}.html'), + 'DRAFT_LANG_URL': 'drafts/{slug}-{lang}.html', + 'DRAFT_LANG_SAVE_AS': os.path.join('drafts', '{slug}-{lang}.html'), 'PAGE_URL': 'pages/{slug}.html', 'PAGE_SAVE_AS': os.path.join('pages', '{slug}.html'), 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',
diff --git a/pelican/tests/output/custom/drafts/a-draft-article.html b/pelican/tests/output/custom/drafts/a-draft-article.html --- a/pelican/tests/output/custom/drafts/a-draft-article.html +++ b/pelican/tests/output/custom/drafts/a-draft-article.html @@ -32,7 +32,7 @@ <h1><a href="../">Alexis' log </a></h1> <article> <header> <h1 class="entry-title"> - <a href="../a-draft-article.html" rel="bookmark" + <a href="../drafts/a-draft-article.html" rel="bookmark" title="Permalink to A draft article">A draft article</a></h1> </header> @@ -97,4 +97,4 @@ <h2>social</h2> }()); </script> </body> -</html> \ No newline at end of file +</html>
Draft with translations causes critical build error If there are files with same slug in draft status. test_en.md ``` Title: Test in English Date: 2013-10-4 Slug: test Lang: en Status: draft ``` test_ja.md ``` Title: 日本語テスト Date: 2013-10-4 Slug: test Lang: ja Status: draft ``` This causes build error ``` CRITICAL: File /path/to/blog/output/drafts/test.html is to be overwritten ``` pelican version is 3.3.0
連絡ありがとうございます。The current behavior is to save all drafts in the same location, so using the same slug for multiple posts will indeed trigger this error. In order to avoid this, perhaps Pelican could use the `ARTICLE_LANG_SAVE_AS` setting for drafts, much as it already does for the eventual production-ready posts. @litmisty: Might you be willing to assist with implementing a fix for this issue? Thank you for commenting. I'll see it if I can fix it. Thanks for the contribution, @litmisty. It seems Travis is reporting test failures, due to what appears to be changes in the functional test output. Perhaps you can run the tests on your development environment via `pip install -r dev_requirements.txt` and `python -m unittest discover` and then determine whether the changes in the functional test output are expected/warranted? @kylef pointed out that `article.save_as` might not be the right solution here. For example, the corresponding setting in my config file is: ``` ARTICLE_SAVE_AS = "{category}/{slug}/index.html" ``` That would create the draft in `/drafts/mycategory/title-of-my-post/index.html`, and I'm not sure I need or want the category sub-folder included in the drafts folder. An alternate solution might be to detect whether `article.lang` is different from `DEFAULT_LANG`, and if so, save the draft as `article.slug` + `_` + `article.lang` + `.html` (e.g., `drafts/my-post_jp.html`). Thoughts? @litmisty: There seem to be some unrelated commits in this pull request. Perhaps you could [rebase on current master and squash your commits](https://github.com/getpelican/pelican/wiki/Git-Tips#squashing-commits) into a single commit? That will help keep the version history clean and readable. Thanks!
2013-12-26T18:45:28Z
[]
[]
getpelican/pelican
1,322
getpelican__pelican-1322
[ "1344" ]
33e9ce1ddf7a2b383fc5495d7a957770079b7329
diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -63,9 +63,9 @@ def init_path(self): def init_plugins(self): self.plugins = [] - logger.debug('Temporarily adding PLUGIN_PATH to system path') + logger.debug('Temporarily adding PLUGIN_PATHS to system path') _sys_path = sys.path[:] - for pluginpath in self.settings['PLUGIN_PATH']: + for pluginpath in self.settings['PLUGIN_PATHS']: sys.path.insert(0, pluginpath) for plugin in self.settings['PLUGINS']: # if it's a string, then import it diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -110,29 +110,30 @@ def _include_path(self, path, extensions=None): return True return False - def get_files(self, path, exclude=[], extensions=None): + def get_files(self, paths, exclude=[], extensions=None): """Return a list of files to use, based on rules - :param path: the path to search (relative to self.path) + :param paths: the list pf paths to search (relative to self.path) :param exclude: the list of path to exclude :param extensions: the list of allowed extensions (if False, all extensions are allowed) """ files = [] - root = os.path.join(self.path, path) - - if os.path.isdir(root): - for dirpath, dirs, temp_files in os.walk(root, followlinks=True): - for e in exclude: - if e in dirs: - dirs.remove(e) - reldir = os.path.relpath(dirpath, self.path) - for f in temp_files: - fp = os.path.join(reldir, f) - if self._include_path(fp, extensions): - files.append(fp) - elif os.path.exists(root) and self._include_path(path, extensions): - files.append(path) # can't walk non-directories + for path in paths: + root = os.path.join(self.path, path) + + if os.path.isdir(root): + for dirpath, dirs, temp_files in os.walk(root, followlinks=True): + for e in exclude: + if e in dirs: + dirs.remove(e) + reldir = os.path.relpath(dirpath, self.path) + for f in temp_files: + fp = os.path.join(reldir, f) + if self._include_path(fp, extensions): + files.append(fp) + elif os.path.exists(root) and self._include_path(path, extensions): + files.append(path) # can't walk non-directories return files def add_source_path(self, content): @@ -462,7 +463,7 @@ def generate_context(self): all_articles = [] all_drafts = [] for f in self.get_files( - self.settings['ARTICLE_DIR'], + self.settings['ARTICLE_PATHS'], exclude=self.settings['ARTICLE_EXCLUDES']): article = self.get_cached_data(f, None) if article is None: @@ -586,7 +587,7 @@ def generate_context(self): all_pages = [] hidden_pages = [] for f in self.get_files( - self.settings['PAGE_DIR'], + self.settings['PAGE_PATHS'], exclude=self.settings['PAGE_EXCLUDES']): page = self.get_cached_data(f, None) if page is None: @@ -660,20 +661,17 @@ def _copy_paths(self, paths, source, destination, output_path, def generate_context(self): self.staticfiles = [] - - # walk static paths - for static_path in self.settings['STATIC_PATHS']: - for f in self.get_files( - static_path, extensions=False): - static = self.readers.read_file( - base_path=self.path, path=f, content_class=Static, - fmt='static', context=self.context, - preread_signal=signals.static_generator_preread, - preread_sender=self, - context_signal=signals.static_generator_context, - context_sender=self) - self.staticfiles.append(static) - self.add_source_path(static) + for f in self.get_files(self.settings['STATIC_PATHS'], + extensions=False): + static = self.readers.read_file( + base_path=self.path, path=f, content_class=Static, + fmt='static', context=self.context, + preread_signal=signals.static_generator_preread, + preread_sender=self, + context_signal=signals.static_generator_context, + context_sender=self) + self.staticfiles.append(static) + self.add_source_path(static) self._update_context(('staticfiles',)) signals.static_generator_finalized.send(self) diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -29,10 +29,10 @@ 'themes', 'notmyidea') DEFAULT_CONFIG = { 'PATH': os.curdir, - 'ARTICLE_DIR': '', - 'ARTICLE_EXCLUDES': ('pages',), - 'PAGE_DIR': 'pages', - 'PAGE_EXCLUDES': (), + 'ARTICLE_PATHS': [''], + 'ARTICLE_EXCLUDES': [], + 'PAGE_PATHS': ['pages'], + 'PAGE_EXCLUDES': [], 'THEME': DEFAULT_THEME, 'OUTPUT_PATH': 'output', 'READERS': {}, @@ -114,7 +114,7 @@ 'ARTICLE_PERMALINK_STRUCTURE': '', 'TYPOGRIFY': False, 'SUMMARY_MAX_LENGTH': 50, - 'PLUGIN_PATH': [], + 'PLUGIN_PATHS': [], 'PLUGINS': [], 'PYGMENTS_RST_OPTIONS': {}, 'TEMPLATE_PAGES': {}, @@ -147,13 +147,17 @@ def read_settings(path=None, override=None): if p not in ('THEME') or os.path.exists(absp): local_settings[p] = absp - if isinstance(local_settings['PLUGIN_PATH'], six.string_types): - logger.warning("Defining %s setting as string has been deprecated (should be a list)" % 'PLUGIN_PATH') - local_settings['PLUGIN_PATH'] = [local_settings['PLUGIN_PATH']] - else: - if 'PLUGIN_PATH' in local_settings and local_settings['PLUGIN_PATH'] is not None: - local_settings['PLUGIN_PATH'] = [os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(path), pluginpath))) - if not isabs(pluginpath) else pluginpath for pluginpath in local_settings['PLUGIN_PATH']] + if 'PLUGIN_PATH' in local_settings: + logger.warning('PLUGIN_PATH setting has been replaced by ' + 'PLUGIN_PATHS, moving it to the new setting name.') + local_settings['PLUGIN_PATHS'] = local_settings['PLUGIN_PATH'] + del local_settings['PLUGIN_PATH'] + if isinstance(local_settings['PLUGIN_PATHS'], six.string_types): + logger.warning("Defining %s setting as string has been deprecated (should be a list)" % 'PLUGIN_PATHS') + local_settings['PLUGIN_PATHS'] = [local_settings['PLUGIN_PATHS']] + elif local_settings['PLUGIN_PATHS'] is not None: + local_settings['PLUGIN_PATHS'] = [os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(path), pluginpath))) + if not isabs(pluginpath) else pluginpath for pluginpath in local_settings['PLUGIN_PATHS']] else: local_settings = copy.deepcopy(DEFAULT_CONFIG) @@ -311,6 +315,16 @@ def configure_settings(settings): key=lambda r: r[0], ) + # move {ARTICLE,PAGE}_DIR -> {ARTICLE,PAGE}_PATHS + for key in ['ARTICLE', 'PAGE']: + old_key = key + '_DIR' + new_key = key + '_PATHS' + if old_key in settings: + logger.warning('Deprecated setting {}, moving it to {} list'.format( + old_key, new_key)) + settings[new_key] = [settings[old_key]] # also make a list + del settings[old_key] + # Save people from accidentally setting a string rather than a list path_keys = ( 'ARTICLE_EXCLUDES', @@ -324,13 +338,27 @@ def configure_settings(settings): 'PLUGINS', 'STATIC_PATHS', 'THEME_STATIC_PATHS', + 'ARTICLE_PATHS', + 'PAGE_PATHS', ) for PATH_KEY in filter(lambda k: k in settings, path_keys): - if isinstance(settings[PATH_KEY], six.string_types): - logger.warning("Detected misconfiguration with %s setting " - "(must be a list), falling back to the default" - % PATH_KEY) - settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY] + if isinstance(settings[PATH_KEY], six.string_types): + logger.warning("Detected misconfiguration with %s setting " + "(must be a list), falling back to the default" + % PATH_KEY) + settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY] + + # Add {PAGE,ARTICLE}_PATHS to {ARTICLE,PAGE}_EXCLUDES + mutually_exclusive = ('ARTICLE', 'PAGE') + for type_1, type_2 in [mutually_exclusive, mutually_exclusive[::-1]]: + try: + includes = settings[type_1 + '_PATHS'] + excludes = settings[type_2 + '_EXCLUDES'] + for path in includes: + if path not in excludes: + excludes.append(path) + except KeyError: + continue # setting not specified, nothing to do for old, new, doc in [ ('LESS_GENERATOR', 'the Webassets plugin', None),
diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py --- a/pelican/tests/test_generators.py +++ b/pelican/tests/test_generators.py @@ -372,8 +372,8 @@ def distill_pages(self, pages): def test_generate_context(self): settings = get_settings(filenames={}) - settings['PAGE_DIR'] = 'TestPages' # relative to CUR_DIR settings['CACHE_PATH'] = self.temp_cache + settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR settings['DEFAULT_DATE'] = (1970, 1, 1) generator = PagesGenerator( diff --git a/pelican/tests/test_settings.py b/pelican/tests/test_settings.py --- a/pelican/tests/test_settings.py +++ b/pelican/tests/test_settings.py @@ -43,7 +43,10 @@ def test_read_empty_settings(self): # Providing no file should return the default values. settings = read_settings(None) expected = copy.deepcopy(DEFAULT_CONFIG) - expected['FEED_DOMAIN'] = '' # Added by configure settings + # Added by configure settings + expected['FEED_DOMAIN'] = '' + expected['ARTICLE_EXCLUDES'] = ['pages'] + expected['PAGE_EXCLUDES'] = [''] self.maxDiff = None self.assertDictEqual(settings, expected)
Rename PLUGIN_PATH to PLUGIN_PATHS PR #1276 addressed #981 by changing the `PLUGIN_PATH` setting value from a string to a list. As @smartass101 pointed out, perhaps we should change the name of this setting to `PLUGIN_PATHS` for the sake of consistency and clarity. The previous `PLUGIN_PATH` setting should still be honored, with a deprecation warning, so that existing Pelican users don't have any disruption when upgrading to the next release.
2014-04-21T09:38:38Z
[]
[]
getpelican/pelican
1,354
getpelican__pelican-1354
[ "1325", "1325" ]
6008f7e2ed2621f99224b437341cf4737c87e9a3
diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -584,7 +584,7 @@ def find_empty_alt(content, path): # src before alt <img [^\>]* - src=(['"])(.*)\1 + src=(['"])(.*?)\1 [^\>]* alt=(['"])\3 )|(?: @@ -593,7 +593,7 @@ def find_empty_alt(content, path): [^\>]* alt=(['"])\4 [^\>]* - src=(['"])(.*)\5 + src=(['"])(.*?)\5 ) """, re.X) for match in re.findall(imgs, content):
diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -9,6 +9,13 @@ from pelican.tests.support import get_settings, unittest from pelican.utils import SafeDatetime +try: + from unittest.mock import patch +except ImportError: + try: + from mock import patch + except ImportError: + patch = False CUR_DIR = os.path.dirname(__file__) CONTENT_PATH = os.path.join(CUR_DIR, 'content') @@ -81,6 +88,22 @@ def test_readfile_unknown_extension(self): with self.assertRaises(TypeError): self.read_file(path='article_with_metadata.unknownextension') + @unittest.skipUnless(patch, 'Needs Mock module') + def test_find_empty_alt(self): + with patch('pelican.readers.logger') as log_mock: + content = ['<img alt="" src="test-image.png" width="300px" />', + '<img src="test-image.png" width="300px" alt="" />'] + + for tag in content: + readers.find_empty_alt(tag, '/test/path') + log_mock.warning.assert_called_with( + u'Empty alt attribute for image %s in %s', + u'test-image.png', + u'/test/path', + extra={'limit_msg': + 'Other images have empty alt attributes'} + ) + class RstReaderTest(ReaderTest):
Empty alt attribute for image warning includes HTML When using reStructuredText image tags with width and height without an alt tag, the warning will include the raw HTML. See the result below. ``` rst .. image:: /images/versioning-with-xcode.png :width: 671px :height: 563px :align: center ``` ### Actual result ``` WARNING: Empty alt attribute for image versioning-with-xcode.png" style="width: 671px; height: 563px; in /Users/kylef/Projects/kylef/kylefuller.co.uk/content/posts/versioning-with-xcode-and-git.rst ``` ### Expected result ``` WARNING: Empty alt attribute for image versioning-with-xcode.png in /Users/kylef/Projects/kylef/kylefuller.co.uk/content/posts/versioning-with-xcode-and-git.rst ``` Empty alt attribute for image warning includes HTML When using reStructuredText image tags with width and height without an alt tag, the warning will include the raw HTML. See the result below. ``` rst .. image:: /images/versioning-with-xcode.png :width: 671px :height: 563px :align: center ``` ### Actual result ``` WARNING: Empty alt attribute for image versioning-with-xcode.png" style="width: 671px; height: 563px; in /Users/kylef/Projects/kylef/kylefuller.co.uk/content/posts/versioning-with-xcode-and-git.rst ``` ### Expected result ``` WARNING: Empty alt attribute for image versioning-with-xcode.png in /Users/kylef/Projects/kylef/kylefuller.co.uk/content/posts/versioning-with-xcode-and-git.rst ```
@kylef: Does the fix provided by @storeyio satisfactorily address the issue you reported here? Awesome, thanks @storeyio. @kylef: Does the fix provided by @storeyio satisfactorily address the issue you reported here? Awesome, thanks @storeyio.
2014-05-19T22:04:33Z
[]
[]
getpelican/pelican
1,376
getpelican__pelican-1376
[ "1355" ]
ef967056778a6610fe7b61c349acec0990073dd7
diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -17,11 +17,6 @@ from markdown import Markdown except ImportError: Markdown = False # NOQA -try: - from asciidocapi import AsciiDocAPI - asciidoc = True -except ImportError: - asciidoc = False try: from html import escape except ImportError: @@ -349,40 +344,6 @@ def read(self, filename): return parser.body, metadata -class AsciiDocReader(BaseReader): - """Reader for AsciiDoc files""" - - enabled = bool(asciidoc) - file_extensions = ['asc', 'adoc', 'asciidoc'] - default_options = ["--no-header-footer", "-a newline=\\n"] - - def read(self, source_path): - """Parse content and metadata of asciidoc files""" - from cStringIO import StringIO - with pelican_open(source_path) as source: - text = StringIO(source) - content = StringIO() - ad = AsciiDocAPI() - - options = self.settings['ASCIIDOC_OPTIONS'] - if isinstance(options, (str, unicode)): - options = [m.strip() for m in options.split(',')] - options = self.default_options + options - for o in options: - ad.options(*o.split()) - - ad.execute(text, content, backend="html4") - content = content.getvalue() - - metadata = {} - for name, value in ad.asciidoc.document.attributes.items(): - name = name.lower() - metadata[name] = self.process_metadata(name, value) - if 'doctitle' in metadata: - metadata['title'] = metadata['doctitle'] - return content, metadata - - class Readers(FileStampDataCacher): """Interface for all readers. diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -98,7 +98,6 @@ 'PELICAN_CLASS': 'pelican.Pelican', 'DEFAULT_DATE_FORMAT': '%a %d %B %Y', 'DATE_FORMATS': {}, - 'ASCIIDOC_OPTIONS': [], 'MD_EXTENSIONS': ['codehilite(css_class=highlight)', 'extra'], 'JINJA_EXTENSIONS': [], 'JINJA_FILTERS': {},
diff --git a/pelican/tests/content/article_with_asc_extension.asc b/pelican/tests/content/article_with_asc_extension.asc deleted file mode 100644 --- a/pelican/tests/content/article_with_asc_extension.asc +++ /dev/null @@ -1,12 +0,0 @@ -Test AsciiDoc File Header -========================= -:Author: Author O. Article -:Email: <[email protected]> -:Date: 2011-09-15 09:05 -:Category: Blog -:Tags: Linux, Python, Pelican - -Used for pelican test ---------------------- - -The quick brown fox jumped over the lazy dog's back. diff --git a/pelican/tests/content/article_with_asc_options.asc b/pelican/tests/content/article_with_asc_options.asc deleted file mode 100644 --- a/pelican/tests/content/article_with_asc_options.asc +++ /dev/null @@ -1,9 +0,0 @@ -Test AsciiDoc File Header -========================= - -Used for pelican test ---------------------- - -version {revision} - -The quick brown fox jumped over the lazy dog's back. diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -333,42 +333,6 @@ def test_article_with_filename_metadata(self): self.assertEqual(value, page.metadata[key], key) -class AdReaderTest(ReaderTest): - - @unittest.skipUnless(readers.asciidoc, "asciidoc isn't installed") - def test_article_with_asc_extension(self): - # Ensure the asc extension is being processed by the correct reader - page = self.read_file( - path='article_with_asc_extension.asc') - expected = ('<hr>\n<h2><a name="_used_for_pelican_test">' - '</a>Used for pelican test</h2>\n' - '<p>The quick brown fox jumped over' - ' the lazy dog&#8217;s back.</p>\n') - self.assertEqual(page.content, expected) - expected = { - 'category': 'Blog', - 'author': 'Author O. Article', - 'title': 'Test AsciiDoc File Header', - 'date': datetime.datetime(2011, 9, 15, 9, 5), - 'tags': ['Linux', 'Python', 'Pelican'], - } - - for key, value in expected.items(): - self.assertEqual(value, page.metadata[key], key) - - @unittest.skipUnless(readers.asciidoc, "asciidoc isn't installed") - def test_article_with_asc_options(self): - # test to ensure the ASCIIDOC_OPTIONS is being used - reader = readers.AsciiDocReader( - dict(ASCIIDOC_OPTIONS=["-a revision=1.0.42"])) - content, metadata = reader.read(_path('article_with_asc_options.asc')) - expected = ('<hr>\n<h2><a name="_used_for_pelican_test"></a>Used for' - ' pelican test</h2>\n<p>version 1.0.42</p>\n' - '<p>The quick brown fox jumped over the lazy' - ' dog&#8217;s back.</p>\n') - self.assertEqual(content, expected) - - class HTMLReaderTest(ReaderTest): def test_article_with_comments(self): page = self.read_file(path='article_with_comments.html')
Split asciidoc into a plugin I don't think it belongs in the core since it depends on non-python dependencies and it's not often used. This is exactly what plugins are for.
I agree. PS: I'm willing to work on this, if it's decided. I believe we have consensus on this topic. @avaris: Please feel free to get started. Thank you for volunteering to help out with this! I'll submit a PR that removes AsciiDocReader from core soon, but how shall we deal with AsciiDoc in the documentation? Remove it completely or leave a single note (probably in `Installing Pelican/Optional Packages` section) pointing to plugins repo?
2014-06-14T06:30:59Z
[]
[]
getpelican/pelican
1,381
getpelican__pelican-1381
[ "1198", "729", "1198" ]
2432a224009fc347b96fbc4a36b7a6080967a4dc
diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -11,13 +11,12 @@ import re import sys -from datetime import datetime from pelican import signals from pelican.settings import DEFAULT_CONFIG from pelican.utils import (slugify, truncate_html_words, memoized, strftime, python_2_unicode_compatible, deprecated_attribute, - path_to_url) + path_to_url, SafeDatetime) # Import these so that they're avalaible when you import from pelican.contents. from pelican.urlwrappers import (URLWrapper, Author, Category, Tag) # NOQA @@ -127,7 +126,7 @@ def __init__(self, content, metadata=None, settings=None, if not hasattr(self, 'status'): self.status = settings['DEFAULT_STATUS'] if not settings['WITH_FUTURE_DATES']: - if hasattr(self, 'date') and self.date > datetime.now(): + if hasattr(self, 'date') and self.date > SafeDatetime.now(): self.status = 'draft' # store the summary metadata if it is set @@ -161,7 +160,7 @@ def url_format(self): 'path': path_to_url(path), 'slug': getattr(self, 'slug', ''), 'lang': getattr(self, 'lang', 'en'), - 'date': getattr(self, 'date', datetime.now()), + 'date': getattr(self, 'date', SafeDatetime.now()), 'author': slugify( getattr(self, 'author', ''), slug_substitutions diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals, print_function import os +import six import math import random import logging @@ -348,31 +349,22 @@ def _generate_period_archives(dates, key, save_as_fmt): # format string syntax can be used for specifying the # period archive dates date = archive[0].date - # Under python 2, with non-ascii locales, u"{:%b}".format(date) might raise UnicodeDecodeError - # because u"{:%b}".format(date) will call date.__format__(u"%b"), which will return a byte string - # and not a unicode string. - # eg: - # locale.setlocale(locale.LC_ALL, 'ja_JP.utf8') - # date.__format__(u"%b") == '12\xe6\x9c\x88' # True - try: - save_as = save_as_fmt.format(date=date) - except UnicodeDecodeError: - # Python2 only: - # Let date.__format__() work with byte strings instead of characters since it fails to work with characters - bytes_save_as_fmt = save_as_fmt.encode('utf8') - bytes_save_as = bytes_save_as_fmt.format(date=date) - save_as = unicode(bytes_save_as,'utf8') + save_as = save_as_fmt.format(date=date) context = self.context.copy() if key == period_date_key['year']: context["period"] = (_period,) - elif key == period_date_key['month']: - context["period"] = (_period[0], - calendar.month_name[_period[1]]) else: - context["period"] = (_period[0], - calendar.month_name[_period[1]], - _period[2]) + month_name = calendar.month_name[_period[1]] + if not six.PY3: + month_name = month_name.decode('utf-8') + if key == period_date_key['month']: + context["period"] = (_period[0], + month_name) + else: + context["period"] = (_period[0], + month_name, + _period[2]) write(save_as, template, context, dates=archive, blog=True) diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- from __future__ import unicode_literals, print_function -import datetime import logging import os import re @@ -28,7 +27,7 @@ from pelican import signals from pelican.contents import Page, Category, Tag, Author -from pelican.utils import get_date, pelican_open, FileStampDataCacher +from pelican.utils import get_date, pelican_open, FileStampDataCacher, SafeDatetime METADATA_PROCESSORS = { @@ -494,7 +493,7 @@ def default_metadata(settings=None, process=None): value = process('category', value) metadata['category'] = value if settings.get('DEFAULT_DATE', None) and settings['DEFAULT_DATE'] != 'fs': - metadata['date'] = datetime.datetime(*settings['DEFAULT_DATE']) + metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE']) return metadata @@ -502,7 +501,7 @@ def path_metadata(full_path, source_path, settings=None): metadata = {} if settings: if settings.get('DEFAULT_DATE', None) == 'fs': - metadata['date'] = datetime.datetime.fromtimestamp( + metadata['date'] = SafeDatetime.fromtimestamp( os.stat(full_path).st_ctime) metadata.update(settings.get('EXTRA_PATH_METADATA', {}).get( source_path, {})) @@ -525,7 +524,7 @@ def parse_path_metadata(source_path, settings=None, process=None): ... process=reader.process_metadata) >>> pprint.pprint(metadata) # doctest: +ELLIPSIS {'category': <pelican.urlwrappers.Category object at ...>, - 'date': datetime.datetime(2013, 1, 1, 0, 0), + 'date': SafeDatetime(2013, 1, 1, 0, 0), 'slug': 'my-slug'} """ metadata = {} diff --git a/pelican/tools/pelican_import.py b/pelican/tools/pelican_import.py --- a/pelican/tools/pelican_import.py +++ b/pelican/tools/pelican_import.py @@ -21,7 +21,7 @@ from six.moves.urllib.parse import urlparse from six.moves.urllib.request import urlretrieve -from pelican.utils import slugify +from pelican.utils import slugify, SafeDatetime from pelican.log import init logger = logging.getLogger(__name__) @@ -303,7 +303,7 @@ def dc2fields(file): def posterous2fields(api_token, email, password): """Imports posterous posts""" import base64 - from datetime import datetime, timedelta + from datetime import timedelta try: # py3k import import json @@ -340,7 +340,7 @@ def get_posterous_posts(api_token, email, password, page = 1): slug = slugify(post.get('title')) tags = [tag.get('name') for tag in post.get('tags')] raw_date = post.get('display_date') - date_object = datetime.strptime(raw_date[:-6], "%Y/%m/%d %H:%M:%S") + date_object = SafeDatetime.strptime(raw_date[:-6], "%Y/%m/%d %H:%M:%S") offset = int(raw_date[-5:]) delta = timedelta(hours = offset / 100) date_object -= delta diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -14,6 +14,7 @@ import traceback import pickle import hashlib +import datetime from collections import Hashable from contextlib import contextmanager @@ -56,7 +57,10 @@ def strftime(date, date_format): for candidate in candidates: # test for valid C89 directives only if candidate[1] in 'aAbBcdfHIjmMpSUwWxXyYzZ%': - formatted = date.strftime(candidate) + if isinstance(date, SafeDatetime): + formatted = date.strftime(candidate, safe=False) + else: + formatted = date.strftime(candidate) # convert Py2 result to unicode if not six.PY3 and enc is not None: formatted = formatted.decode(enc) @@ -68,6 +72,17 @@ def strftime(date, date_format): return template % tuple(formatted_candidates) +class SafeDatetime(datetime.datetime): + '''Subclass of datetime that works with utf-8 format strings on PY2''' + + def strftime(self, fmt, safe=True): + '''Uses our custom strftime if supposed to be *safe*''' + if safe: + return strftime(self, fmt) + else: + return super(SafeDatetime, self).strftime(fmt) + + class DateFormatter(object): '''A date formatter object used as a jinja filter @@ -79,12 +94,18 @@ def __init__(self): self.locale = locale.setlocale(locale.LC_TIME) def __call__(self, date, date_format): - old_locale = locale.setlocale(locale.LC_TIME) + old_lc_time = locale.setlocale(locale.LC_TIME) + old_lc_ctype = locale.setlocale(locale.LC_CTYPE) + locale.setlocale(locale.LC_TIME, self.locale) + # on OSX, encoding from LC_CTYPE determines the unicode output in PY3 + # make sure it's same as LC_TIME + locale.setlocale(locale.LC_CTYPE, self.locale) formatted = strftime(date, date_format) - locale.setlocale(locale.LC_TIME, old_locale) + locale.setlocale(locale.LC_TIME, old_lc_time) + locale.setlocale(locale.LC_CTYPE, old_lc_ctype) return formatted @@ -183,8 +204,10 @@ def get_date(string): If no format matches the given date, raise a ValueError. """ string = re.sub(' +', ' ', string) + default = SafeDatetime.now().replace(hour=0, minute=0, + second=0, microsecond=0) try: - return dateutil.parser.parse(string) + return dateutil.parser.parse(string, default=default) except (TypeError, ValueError): raise ValueError('{0!r} is not a valid date'.format(string)) diff --git a/pelican/writers.py b/pelican/writers.py --- a/pelican/writers.py +++ b/pelican/writers.py @@ -151,12 +151,7 @@ def write_file(self, name, template, context, relative_urls=False, def _write_file(template, localcontext, output_path, name, override): """Render the template write the file.""" - old_locale = locale.setlocale(locale.LC_ALL) - locale.setlocale(locale.LC_ALL, str('C')) - try: - output = template.render(localcontext) - finally: - locale.setlocale(locale.LC_ALL, old_locale) + output = template.render(localcontext) path = os.path.join(output_path, name) try: os.makedirs(os.path.dirname(path)) diff --git a/samples/pelican.conf_FR.py b/samples/pelican.conf_FR.py new file mode 100644 --- /dev/null +++ b/samples/pelican.conf_FR.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +AUTHOR = 'Alexis Métaireau' +SITENAME = "Alexis' log" +SITEURL = 'http://blog.notmyidea.org' +TIMEZONE = "Europe/Paris" + +# can be useful in development, but set to False when you're ready to publish +RELATIVE_URLS = True + +GITHUB_URL = 'http://github.com/ametaireau/' +DISQUS_SITENAME = "blog-notmyidea" +PDF_GENERATOR = False +REVERSE_CATEGORY_ORDER = True +LOCALE = "fr_FR.UTF-8" +DEFAULT_PAGINATION = 4 +DEFAULT_DATE = (2012, 3, 2, 14, 1, 1) +DEFAULT_DATE_FORMAT = '%d %B %Y' + +ARTICLE_URL = 'posts/{date:%Y}/{date:%B}/{date:%d}/{slug}/' +ARTICLE_SAVE_AS = ARTICLE_URL + 'index.html' + +FEED_ALL_RSS = 'feeds/all.rss.xml' +CATEGORY_FEED_RSS = 'feeds/%s.rss.xml' + +LINKS = (('Biologeek', 'http://biologeek.org'), + ('Filyb', "http://filyb.info/"), + ('Libert-fr', "http://www.libert-fr.com"), + ('N1k0', "http://prendreuncafe.com/blog/"), + ('Tarek Ziadé', "http://ziade.org/blog"), + ('Zubin Mithra', "http://zubin71.wordpress.com/"),) + +SOCIAL = (('twitter', 'http://twitter.com/ametaireau'), + ('lastfm', 'http://lastfm.com/user/akounet'), + ('github', 'http://github.com/ametaireau'),) + +# global metadata to all the contents +DEFAULT_METADATA = (('yeah', 'it is'),) + +# path-specific metadata +EXTRA_PATH_METADATA = { + 'extra/robots.txt': {'path': 'robots.txt'}, + } + +# static paths will be copied without parsing their contents +STATIC_PATHS = [ + 'pictures', + 'extra/robots.txt', + ] + +# custom page generated with a jinja2 template +TEMPLATE_PAGES = {'pages/jinja2_template.html': 'jinja2_template.html'} + +# code blocks with line numbers +PYGMENTS_RST_OPTIONS = {'linenos': 'table'} + +# foobar will not be used, because it's not in caps. All configuration keys +# have to be in caps +foobar = "barbaz"
diff --git a/pelican/tests/output/custom_locale/archives.html b/pelican/tests/output/custom_locale/archives.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/archives.html @@ -0,0 +1,100 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log</title> + <link rel="stylesheet" href="./theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="./">Alexis' log </a></h1> + <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> + <li><a href="./override/">Override url/save_as</a></li> + <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="./category/yeah.html">yeah</a></li> + <li><a href="./category/misc.html">misc</a></li> + <li><a href="./category/cat1.html">cat1</a></li> + <li><a href="./category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> +<h1>Archives for Alexis' log</h1> + +<dl> + <dt>30 novembre 2012</dt> + <dd><a href="./posts/2012/novembre/30/filename_metadata-example/">FILENAME_METADATA example</a></dd> + <dt>29 février 2012</dt> + <dd><a href="./posts/2012/février/29/second-article/">Second article</a></dd> + <dt>20 avril 2011</dt> + <dd><a href="./posts/2011/avril/20/a-markdown-powered-article/">A markdown powered article</a></dd> + <dt>17 février 2011</dt> + <dd><a href="./posts/2011/février/17/article-1/">Article 1</a></dd> + <dt>17 février 2011</dt> + <dd><a href="./posts/2011/février/17/article-2/">Article 2</a></dd> + <dt>17 février 2011</dt> + <dd><a href="./posts/2011/février/17/article-3/">Article 3</a></dd> + <dt>02 décembre 2010</dt> + <dd><a href="./posts/2010/décembre/02/this-is-a-super-article/">This is a super article !</a></dd> + <dt>20 octobre 2010</dt> + <dd><a href="./posts/2010/octobre/20/oh-yeah/">Oh yeah !</a></dd> + <dt>15 octobre 2010</dt> + <dd><a href="./posts/2010/octobre/15/unbelievable/">Unbelievable !</a></dd> + <dt>14 mars 2010</dt> + <dd><a href="./tag/baz.html">The baz tag</a></dd> +</dl> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/author/alexis-metaireau.html b/pelican/tests/output/custom_locale/author/alexis-metaireau.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/author/alexis-metaireau.html @@ -0,0 +1,173 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log - Alexis Métaireau</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <aside id="featured" class="body"> + <article> + <h1 class="entry-title"><a href="../posts/2012/novembre/30/filename_metadata-example/">FILENAME_METADATA example</a></h1> +<footer class="post-info"> + <abbr class="published" title="2012-11-30T00:00:00"> + Published: 30 novembre 2012 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --><p>Some cool stuff!</p> +<p>There are <a href="../posts/2012/novembre/30/filename_metadata-example/#disqus_thread">comments</a>.</p> </article> + </aside><!-- /#featured --> + <section id="content" class="body"> + <h1>Other articles</h1> + <hr /> + <ol id="posts-list" class="hfeed"> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2012/février/29/second-article/" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2012-02-29T00:00:00"> + Published: 29 février 2012 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> +<p>tags: <a href="../tag/foo.html">foo</a> <a href="../tag/bar.html">bar</a> <a href="../tag/baz.html">baz</a> </p>Translations: + <a href="../second-article-fr.html">fr</a> + +</footer><!-- /.post-info --> <p>This is some article, in english</p> + + <a class="readmore" href="../posts/2012/février/29/second-article/">read more</a> +<p>There are <a href="../posts/2012/février/29/second-article/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2011/avril/20/a-markdown-powered-article/" rel="bookmark" + title="Permalink to A markdown powered article">A markdown powered article</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-04-20T00:00:00"> + Published: 20 avril 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>You're mutually oblivious.</p> +<p><a href="../posts/2010/octobre/15/unbelievable/">a root-relative link to unbelievable</a> +<a href="../posts/2010/octobre/15/unbelievable/">a file-relative link to unbelievable</a></p> + <a class="readmore" href="../posts/2011/avril/20/a-markdown-powered-article/">read more</a> +<p>There are <a href="../posts/2011/avril/20/a-markdown-powered-article/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2011/février/17/article-1/" rel="bookmark" + title="Permalink to Article 1">Article 1</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-02-17T00:00:00"> + Published: 17 février 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>Article 1</p> + + <a class="readmore" href="../posts/2011/février/17/article-1/">read more</a> +<p>There are <a href="../posts/2011/février/17/article-1/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + </ol><!-- /#posts-list --> +<p class="paginator"> + Page 1 / 3 + <a href="../author/alexis-metaireau2.html">&raquo;</a> +</p> + </section><!-- /#content --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/author/alexis-metaireau2.html b/pelican/tests/output/custom_locale/author/alexis-metaireau2.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/author/alexis-metaireau2.html @@ -0,0 +1,187 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log - Alexis Métaireau</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <section id="content" class="body"> + <ol id="posts-list" class="hfeed" start="3"> + <li><article class="hentry"> + <header> + <h1><a href="../posts/2011/février/17/article-2/" rel="bookmark" + title="Permalink to Article 2">Article 2</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-02-17T00:00:00"> + Published: 17 février 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>Article 2</p> + + <a class="readmore" href="../posts/2011/février/17/article-2/">read more</a> +<p>There are <a href="../posts/2011/février/17/article-2/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2011/février/17/article-3/" rel="bookmark" + title="Permalink to Article 3">Article 3</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-02-17T00:00:00"> + Published: 17 février 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>Article 3</p> + + <a class="readmore" href="../posts/2011/février/17/article-3/">read more</a> +<p>There are <a href="../posts/2011/février/17/article-3/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2010/décembre/02/this-is-a-super-article/" rel="bookmark" + title="Permalink to This is a super article !">This is a super article !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-12-02T10:14:00"> + Published: 02 décembre 2010 + </abbr> + <br /> + <abbr class="modified" title="2013-11-17T23:29:00"> + Updated: 17 novembre 2013 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/yeah.html">yeah</a>. </p> +<p>tags: <a href="../tag/foo.html">foo</a> <a href="../tag/bar.html">bar</a> <a href="../tag/foobar.html">foobar</a> </p> +</footer><!-- /.post-info --> <p class="first last">Multi-line metadata should be supported +as well as <strong>inline markup</strong>.</p> + + <a class="readmore" href="../posts/2010/décembre/02/this-is-a-super-article/">read more</a> +<p>There are <a href="../posts/2010/décembre/02/this-is-a-super-article/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2010/octobre/20/oh-yeah/" rel="bookmark" + title="Permalink to Oh yeah !">Oh yeah !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-10-20T10:14:00"> + Published: 20 octobre 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/bar.html">bar</a>. </p> +<p>tags: <a href="../tag/oh.html">oh</a> <a href="../tag/bar.html">bar</a> <a href="../tag/yeah.html">yeah</a> </p>Translations: + <a href="../oh-yeah-fr.html">fr</a> + +</footer><!-- /.post-info --> <div class="section" id="why-not"> +<h2>Why not ?</h2> +<p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! +YEAH !</p> +<img alt="alternate text" src="../pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +</div> + + <a class="readmore" href="../posts/2010/octobre/20/oh-yeah/">read more</a> +<p>There are <a href="../posts/2010/octobre/20/oh-yeah/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + </ol><!-- /#posts-list --> +<p class="paginator"> + <a href="../author/alexis-metaireau.html">&laquo;</a> + Page 2 / 3 + <a href="../author/alexis-metaireau3.html">&raquo;</a> +</p> + </section><!-- /#content --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/author/alexis-metaireau3.html b/pelican/tests/output/custom_locale/author/alexis-metaireau3.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/author/alexis-metaireau3.html @@ -0,0 +1,138 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log - Alexis Métaireau</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <section id="content" class="body"> + <ol id="posts-list" class="hfeed" start="3"> + <li><article class="hentry"> + <header> + <h1><a href="../posts/2010/octobre/15/unbelievable/" rel="bookmark" + title="Permalink to Unbelievable !">Unbelievable !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-10-15T20:30:00"> + Published: 15 octobre 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> +<p><a class="reference external" href="../posts/2011/avril/20/a-markdown-powered-article/">a root-relative link to markdown-article</a> +<a class="reference external" href="../posts/2011/avril/20/a-markdown-powered-article/">a file-relative link to markdown-article</a></p> +<div class="section" id="testing-sourcecode-directive"> +<h2>Testing sourcecode directive</h2> +<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +</pre></div> +</td></tr></table></div> +<div class="section" id="testing-another-case"> +<h2>Testing another case</h2> +<p>This will now have a line number in 'custom' since it's the default in +pelican.conf, it ...</p></div> + <a class="readmore" href="../posts/2010/octobre/15/unbelievable/">read more</a> +<p>There are <a href="../posts/2010/octobre/15/unbelievable/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="../tag/baz.html" rel="bookmark" + title="Permalink to The baz tag">The baz tag</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-03-14T00:00:00"> + Published: 14 mars 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>This article overrides the listening of the articles under the <em>baz</em> tag.</p> + + <a class="readmore" href="../tag/baz.html">read more</a> +<p>There are <a href="../tag/baz.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + </ol><!-- /#posts-list --> +<p class="paginator"> + <a href="../author/alexis-metaireau2.html">&laquo;</a> + Page 3 / 3 +</p> + </section><!-- /#content --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/authors.html b/pelican/tests/output/custom_locale/authors.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/authors.html @@ -0,0 +1,82 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log - Authors</title> + <link rel="stylesheet" href="./theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="./">Alexis' log </a></h1> + <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> + <li><a href="./override/">Override url/save_as</a></li> + <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="./category/yeah.html">yeah</a></li> + <li><a href="./category/misc.html">misc</a></li> + <li><a href="./category/cat1.html">cat1</a></li> + <li><a href="./category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + +<section id="content" class="body"> + <h1>Authors on Alexis' log</h1> + <ul> + <li><a href="./author/alexis-metaireau.html">Alexis Métaireau</a> (10)</li> + </ul> +</section> + + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/categories.html b/pelican/tests/output/custom_locale/categories.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/categories.html @@ -0,0 +1,80 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log</title> + <link rel="stylesheet" href="./theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="./">Alexis' log </a></h1> + <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> + <li><a href="./override/">Override url/save_as</a></li> + <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="./category/yeah.html">yeah</a></li> + <li><a href="./category/misc.html">misc</a></li> + <li><a href="./category/cat1.html">cat1</a></li> + <li><a href="./category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<ul> + <li><a href="./category/yeah.html">yeah</a></li> + <li><a href="./category/misc.html">misc</a></li> + <li><a href="./category/cat1.html">cat1</a></li> + <li><a href="./category/bar.html">bar</a></li> +</ul> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/category/bar.html b/pelican/tests/output/custom_locale/category/bar.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/category/bar.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log - bar</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li class="active"><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <aside id="featured" class="body"> + <article> + <h1 class="entry-title"><a href="../posts/2010/octobre/20/oh-yeah/">Oh yeah !</a></h1> +<footer class="post-info"> + <abbr class="published" title="2010-10-20T10:14:00"> + Published: 20 octobre 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/bar.html">bar</a>. </p> +<p>tags: <a href="../tag/oh.html">oh</a> <a href="../tag/bar.html">bar</a> <a href="../tag/yeah.html">yeah</a> </p>Translations: + <a href="../oh-yeah-fr.html">fr</a> + +</footer><!-- /.post-info --><div class="section" id="why-not"> +<h2>Why not ?</h2> +<p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! +YEAH !</p> +<img alt="alternate text" src="../pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +</div> +<p>There are <a href="../posts/2010/octobre/20/oh-yeah/#disqus_thread">comments</a>.</p> </article> +<p class="paginator"> + Page 1 / 1 +</p> + </aside><!-- /#featured --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/category/cat1.html b/pelican/tests/output/custom_locale/category/cat1.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/category/cat1.html @@ -0,0 +1,170 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log - cat1</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li class="active"><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <aside id="featured" class="body"> + <article> + <h1 class="entry-title"><a href="../posts/2011/avril/20/a-markdown-powered-article/">A markdown powered article</a></h1> +<footer class="post-info"> + <abbr class="published" title="2011-04-20T00:00:00"> + Published: 20 avril 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --><p>You're mutually oblivious.</p> +<p><a href="../posts/2010/octobre/15/unbelievable/">a root-relative link to unbelievable</a> +<a href="../posts/2010/octobre/15/unbelievable/">a file-relative link to unbelievable</a></p><p>There are <a href="../posts/2011/avril/20/a-markdown-powered-article/#disqus_thread">comments</a>.</p> </article> + </aside><!-- /#featured --> + <section id="content" class="body"> + <h1>Other articles</h1> + <hr /> + <ol id="posts-list" class="hfeed"> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2011/février/17/article-1/" rel="bookmark" + title="Permalink to Article 1">Article 1</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-02-17T00:00:00"> + Published: 17 février 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>Article 1</p> + + <a class="readmore" href="../posts/2011/février/17/article-1/">read more</a> +<p>There are <a href="../posts/2011/février/17/article-1/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2011/février/17/article-2/" rel="bookmark" + title="Permalink to Article 2">Article 2</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-02-17T00:00:00"> + Published: 17 février 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>Article 2</p> + + <a class="readmore" href="../posts/2011/février/17/article-2/">read more</a> +<p>There are <a href="../posts/2011/février/17/article-2/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2011/février/17/article-3/" rel="bookmark" + title="Permalink to Article 3">Article 3</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-02-17T00:00:00"> + Published: 17 février 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>Article 3</p> + + <a class="readmore" href="../posts/2011/février/17/article-3/">read more</a> +<p>There are <a href="../posts/2011/février/17/article-3/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + </ol><!-- /#posts-list --> +<p class="paginator"> + Page 1 / 1 +</p> + </section><!-- /#content --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/category/misc.html b/pelican/tests/output/custom_locale/category/misc.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/category/misc.html @@ -0,0 +1,181 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log - misc</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li class="active"><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <aside id="featured" class="body"> + <article> + <h1 class="entry-title"><a href="../posts/2012/novembre/30/filename_metadata-example/">FILENAME_METADATA example</a></h1> +<footer class="post-info"> + <abbr class="published" title="2012-11-30T00:00:00"> + Published: 30 novembre 2012 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --><p>Some cool stuff!</p> +<p>There are <a href="../posts/2012/novembre/30/filename_metadata-example/#disqus_thread">comments</a>.</p> </article> + </aside><!-- /#featured --> + <section id="content" class="body"> + <h1>Other articles</h1> + <hr /> + <ol id="posts-list" class="hfeed"> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2012/février/29/second-article/" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2012-02-29T00:00:00"> + Published: 29 février 2012 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> +<p>tags: <a href="../tag/foo.html">foo</a> <a href="../tag/bar.html">bar</a> <a href="../tag/baz.html">baz</a> </p>Translations: + <a href="../second-article-fr.html">fr</a> + +</footer><!-- /.post-info --> <p>This is some article, in english</p> + + <a class="readmore" href="../posts/2012/février/29/second-article/">read more</a> +<p>There are <a href="../posts/2012/février/29/second-article/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2010/octobre/15/unbelievable/" rel="bookmark" + title="Permalink to Unbelievable !">Unbelievable !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-10-15T20:30:00"> + Published: 15 octobre 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> +<p><a class="reference external" href="../posts/2011/avril/20/a-markdown-powered-article/">a root-relative link to markdown-article</a> +<a class="reference external" href="../posts/2011/avril/20/a-markdown-powered-article/">a file-relative link to markdown-article</a></p> +<div class="section" id="testing-sourcecode-directive"> +<h2>Testing sourcecode directive</h2> +<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +</pre></div> +</td></tr></table></div> +<div class="section" id="testing-another-case"> +<h2>Testing another case</h2> +<p>This will now have a line number in 'custom' since it's the default in +pelican.conf, it ...</p></div> + <a class="readmore" href="../posts/2010/octobre/15/unbelievable/">read more</a> +<p>There are <a href="../posts/2010/octobre/15/unbelievable/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="../tag/baz.html" rel="bookmark" + title="Permalink to The baz tag">The baz tag</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-03-14T00:00:00"> + Published: 14 mars 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>This article overrides the listening of the articles under the <em>baz</em> tag.</p> + + <a class="readmore" href="../tag/baz.html">read more</a> +<p>There are <a href="../tag/baz.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + </ol><!-- /#posts-list --> +<p class="paginator"> + Page 1 / 1 +</p> + </section><!-- /#content --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/category/yeah.html b/pelican/tests/output/custom_locale/category/yeah.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/category/yeah.html @@ -0,0 +1,109 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log - yeah</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li class="active"><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <aside id="featured" class="body"> + <article> + <h1 class="entry-title"><a href="../posts/2010/décembre/02/this-is-a-super-article/">This is a super article !</a></h1> +<footer class="post-info"> + <abbr class="published" title="2010-12-02T10:14:00"> + Published: 02 décembre 2010 + </abbr> + <br /> + <abbr class="modified" title="2013-11-17T23:29:00"> + Updated: 17 novembre 2013 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/yeah.html">yeah</a>. </p> +<p>tags: <a href="../tag/foo.html">foo</a> <a href="../tag/bar.html">bar</a> <a href="../tag/foobar.html">foobar</a> </p> +</footer><!-- /.post-info --><p>Some content here !</p> +<div class="section" id="this-is-a-simple-title"> +<h2>This is a simple title</h2> +<p>And here comes the cool <a class="reference external" href="http://books.couchdb.org/relax/design-documents/views">stuff</a>.</p> +<img alt="alternate text" src="../pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="../pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /> +<pre class="literal-block"> +&gt;&gt;&gt; from ipdb import set_trace +&gt;&gt;&gt; set_trace() +</pre> +<p>→ And now try with some utf8 hell: ééé</p> +</div> +<p>There are <a href="../posts/2010/décembre/02/this-is-a-super-article/#disqus_thread">comments</a>.</p> </article> +<p class="paginator"> + Page 1 / 1 +</p> + </aside><!-- /#featured --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/drafts/a-draft-article.html b/pelican/tests/output/custom_locale/drafts/a-draft-article.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/drafts/a-draft-article.html @@ -0,0 +1,100 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>A draft article</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li class="active"><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="../drafts/a-draft-article.html" rel="bookmark" + title="Permalink to A draft article">A draft article</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2012-03-02T14:01:01"> + Published: 02 mars 2012 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>This is a draft article, it should live under the /drafts/ folder and not be +listed anywhere else.</p> + + </div><!-- /.entry-content --> + + </article> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml @@ -0,0 +1,61 @@ +<?xml version="1.0" encoding="utf-8"?> +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/November/30/filename_metadata-example/" rel="alternate"></link><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/November/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/February/29/second-article/" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/February/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/" rel="alternate"></link><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/April/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +&lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/February/17/article-1/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/February/17/article-2/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/February/17/article-3/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/December/02/this-is-a-super-article/" rel="alternate"></link><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/December/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +&lt;div class="section" id="this-is-a-simple-title"&gt; +&lt;h2&gt;This is a simple title&lt;/h2&gt; +&lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;pre class="literal-block"&gt; +&amp;gt;&amp;gt;&amp;gt; from ipdb import set_trace +&amp;gt;&amp;gt;&amp;gt; set_trace() +&lt;/pre&gt; +&lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; +&lt;/div&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/October/20/oh-yeah/" rel="alternate"></link><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/October/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; +&lt;h2&gt;Why not ?&lt;/h2&gt; +&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! +YEAH !&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;/div&gt; +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:posts/2010/October/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +&lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; +&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;/div&gt; +&lt;div class="section" id="testing-another-case"&gt; +&lt;h2&gt;Testing another case&lt;/h2&gt; +&lt;p&gt;This will now have a line number in 'custom' since it's the default in +pelican.conf, it will have nothing in default.&lt;/p&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing more sourcecode directives&lt;/h2&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span id="foo-8"&gt;&lt;a name="foo-8"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt; 8&lt;/span&gt; &lt;span class="testingk"&gt;def&lt;/span&gt; &lt;span class="testingnf"&gt;run&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-9"&gt;&lt;a name="foo-9"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;assert_has_content&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-10"&gt;&lt;a name="foo-10"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;10&lt;/span&gt; &lt;span class="testingk"&gt;try&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-11"&gt;&lt;a name="foo-11"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;get_lexer_by_name&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;arguments&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingmi"&gt;0&lt;/span&gt;&lt;span class="testingp"&gt;])&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-12"&gt;&lt;a name="foo-12"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;12&lt;/span&gt; &lt;span class="testingk"&gt;except&lt;/span&gt; &lt;span class="testingne"&gt;ValueError&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-13"&gt;&lt;a name="foo-13"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingc"&gt;# no lexer found - use the text one instead of an exception&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-14"&gt;&lt;a name="foo-14"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;14&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;TextLexer&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-15"&gt;&lt;a name="foo-15"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-16"&gt;&lt;a name="foo-16"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;16&lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt; &lt;span class="testingow"&gt;and&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-17"&gt;&lt;a name="foo-17"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingow"&gt;not&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;inline&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-18"&gt;&lt;a name="foo-18"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;18&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-19"&gt;&lt;a name="foo-19"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-20"&gt;&lt;a name="foo-20"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;20&lt;/span&gt; &lt;span class="testingk"&gt;for&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;nowrap&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;nobackground&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;anchorlinenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-21"&gt;&lt;a name="foo-21"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-22"&gt;&lt;a name="foo-22"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;22&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;flag&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingbp"&gt;True&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-23"&gt;&lt;a name="foo-23"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-24"&gt;&lt;a name="foo-24"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;24&lt;/span&gt; &lt;span class="testingc"&gt;# noclasses should already default to False, but just in case...&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-25"&gt;&lt;a name="foo-25"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;HtmlFormatter&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingn"&gt;noclasses&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testingbp"&gt;False&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingo"&gt;**&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-26"&gt;&lt;a name="foo-26"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;26&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;highlight&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingse"&gt;\n&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;join&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;content&lt;/span&gt;&lt;span class="testingp"&gt;),&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-27"&gt;&lt;a name="foo-27"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;return&lt;/span&gt; &lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;nodes&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;raw&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;format&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;html&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)]&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-even-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing even more sourcecode directives&lt;/h2&gt; +&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-overriding-config-defaults"&gt; +&lt;h2&gt;Testing overriding config defaults&lt;/h2&gt; +&lt;p&gt;Even if the default is line numbers, we can override it here&lt;/p&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/alexis-metaireau.rss.xml b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.rss.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.rss.xml @@ -0,0 +1,61 @@ +<?xml version="1.0" encoding="utf-8"?> +<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/alexis-metaireau.rss.xml" rel="self"></atom:link><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/November/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-11-30:posts/2012/November/30/filename_metadata-example/</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/posts/2012/February/29/second-article/</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:posts/2012/February/29/second-article/</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +&lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2011-04-20:posts/2011/April/20/a-markdown-powered-article/</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/posts/2011/February/17/article-1/</link><description>&lt;p&gt;Article 1&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-1/</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/posts/2011/February/17/article-2/</link><description>&lt;p&gt;Article 2&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-2/</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/posts/2011/February/17/article-3/</link><description>&lt;p&gt;Article 3&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-3/</guid></item><item><title>This is a super article !</title><link>http://blog.notmyidea.org/posts/2010/December/02/this-is-a-super-article/</link><description>&lt;p&gt;Some content here !&lt;/p&gt; +&lt;div class="section" id="this-is-a-simple-title"&gt; +&lt;h2&gt;This is a simple title&lt;/h2&gt; +&lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;pre class="literal-block"&gt; +&amp;gt;&amp;gt;&amp;gt; from ipdb import set_trace +&amp;gt;&amp;gt;&amp;gt; set_trace() +&lt;/pre&gt; +&lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; +&lt;/div&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 17 Nov 2013 23:29:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-12-02:posts/2010/December/02/this-is-a-super-article/</guid><category>foo</category><category>bar</category><category>foobar</category></item><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/posts/2010/October/20/oh-yeah/</link><description>&lt;div class="section" id="why-not"&gt; +&lt;h2&gt;Why not ?&lt;/h2&gt; +&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! +YEAH !&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;/div&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-20:posts/2010/October/20/oh-yeah/</guid><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/posts/2010/October/15/unbelievable/</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +&lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; +&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;/div&gt; +&lt;div class="section" id="testing-another-case"&gt; +&lt;h2&gt;Testing another case&lt;/h2&gt; +&lt;p&gt;This will now have a line number in 'custom' since it's the default in +pelican.conf, it will have nothing in default.&lt;/p&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing more sourcecode directives&lt;/h2&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span id="foo-8"&gt;&lt;a name="foo-8"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt; 8&lt;/span&gt; &lt;span class="testingk"&gt;def&lt;/span&gt; &lt;span class="testingnf"&gt;run&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-9"&gt;&lt;a name="foo-9"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;assert_has_content&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-10"&gt;&lt;a name="foo-10"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;10&lt;/span&gt; &lt;span class="testingk"&gt;try&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-11"&gt;&lt;a name="foo-11"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;get_lexer_by_name&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;arguments&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingmi"&gt;0&lt;/span&gt;&lt;span class="testingp"&gt;])&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-12"&gt;&lt;a name="foo-12"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;12&lt;/span&gt; &lt;span class="testingk"&gt;except&lt;/span&gt; &lt;span class="testingne"&gt;ValueError&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-13"&gt;&lt;a name="foo-13"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingc"&gt;# no lexer found - use the text one instead of an exception&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-14"&gt;&lt;a name="foo-14"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;14&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;TextLexer&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-15"&gt;&lt;a name="foo-15"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-16"&gt;&lt;a name="foo-16"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;16&lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt; &lt;span class="testingow"&gt;and&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-17"&gt;&lt;a name="foo-17"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingow"&gt;not&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;inline&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-18"&gt;&lt;a name="foo-18"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;18&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-19"&gt;&lt;a name="foo-19"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-20"&gt;&lt;a name="foo-20"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;20&lt;/span&gt; &lt;span class="testingk"&gt;for&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;nowrap&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;nobackground&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;anchorlinenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-21"&gt;&lt;a name="foo-21"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-22"&gt;&lt;a name="foo-22"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;22&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;flag&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingbp"&gt;True&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-23"&gt;&lt;a name="foo-23"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-24"&gt;&lt;a name="foo-24"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;24&lt;/span&gt; &lt;span class="testingc"&gt;# noclasses should already default to False, but just in case...&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-25"&gt;&lt;a name="foo-25"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;HtmlFormatter&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingn"&gt;noclasses&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testingbp"&gt;False&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingo"&gt;**&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-26"&gt;&lt;a name="foo-26"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;26&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;highlight&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingse"&gt;\n&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;join&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;content&lt;/span&gt;&lt;span class="testingp"&gt;),&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-27"&gt;&lt;a name="foo-27"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;return&lt;/span&gt; &lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;nodes&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;raw&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;format&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;html&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)]&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-even-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing even more sourcecode directives&lt;/h2&gt; +&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-overriding-config-defaults"&gt; +&lt;h2&gt;Testing overriding config defaults&lt;/h2&gt; +&lt;p&gt;Even if the default is line numbers, we can override it here&lt;/p&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:posts/2010/October/15/unbelievable/</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/all-en.atom.xml b/pelican/tests/output/custom_locale/feeds/all-en.atom.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/all-en.atom.xml @@ -0,0 +1,61 @@ +<?xml version="1.0" encoding="utf-8"?> +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-en.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/November/30/filename_metadata-example/" rel="alternate"></link><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/November/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/February/29/second-article/" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/February/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/" rel="alternate"></link><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/April/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +&lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/February/17/article-1/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/February/17/article-2/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/February/17/article-3/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/December/02/this-is-a-super-article/" rel="alternate"></link><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/December/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +&lt;div class="section" id="this-is-a-simple-title"&gt; +&lt;h2&gt;This is a simple title&lt;/h2&gt; +&lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;pre class="literal-block"&gt; +&amp;gt;&amp;gt;&amp;gt; from ipdb import set_trace +&amp;gt;&amp;gt;&amp;gt; set_trace() +&lt;/pre&gt; +&lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; +&lt;/div&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/October/20/oh-yeah/" rel="alternate"></link><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/October/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; +&lt;h2&gt;Why not ?&lt;/h2&gt; +&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! +YEAH !&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;/div&gt; +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:posts/2010/October/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +&lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; +&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;/div&gt; +&lt;div class="section" id="testing-another-case"&gt; +&lt;h2&gt;Testing another case&lt;/h2&gt; +&lt;p&gt;This will now have a line number in 'custom' since it's the default in +pelican.conf, it will have nothing in default.&lt;/p&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing more sourcecode directives&lt;/h2&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span id="foo-8"&gt;&lt;a name="foo-8"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt; 8&lt;/span&gt; &lt;span class="testingk"&gt;def&lt;/span&gt; &lt;span class="testingnf"&gt;run&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-9"&gt;&lt;a name="foo-9"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;assert_has_content&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-10"&gt;&lt;a name="foo-10"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;10&lt;/span&gt; &lt;span class="testingk"&gt;try&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-11"&gt;&lt;a name="foo-11"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;get_lexer_by_name&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;arguments&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingmi"&gt;0&lt;/span&gt;&lt;span class="testingp"&gt;])&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-12"&gt;&lt;a name="foo-12"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;12&lt;/span&gt; &lt;span class="testingk"&gt;except&lt;/span&gt; &lt;span class="testingne"&gt;ValueError&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-13"&gt;&lt;a name="foo-13"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingc"&gt;# no lexer found - use the text one instead of an exception&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-14"&gt;&lt;a name="foo-14"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;14&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;TextLexer&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-15"&gt;&lt;a name="foo-15"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-16"&gt;&lt;a name="foo-16"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;16&lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt; &lt;span class="testingow"&gt;and&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-17"&gt;&lt;a name="foo-17"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingow"&gt;not&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;inline&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-18"&gt;&lt;a name="foo-18"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;18&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-19"&gt;&lt;a name="foo-19"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-20"&gt;&lt;a name="foo-20"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;20&lt;/span&gt; &lt;span class="testingk"&gt;for&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;nowrap&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;nobackground&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;anchorlinenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-21"&gt;&lt;a name="foo-21"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-22"&gt;&lt;a name="foo-22"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;22&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;flag&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingbp"&gt;True&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-23"&gt;&lt;a name="foo-23"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-24"&gt;&lt;a name="foo-24"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;24&lt;/span&gt; &lt;span class="testingc"&gt;# noclasses should already default to False, but just in case...&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-25"&gt;&lt;a name="foo-25"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;HtmlFormatter&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingn"&gt;noclasses&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testingbp"&gt;False&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingo"&gt;**&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-26"&gt;&lt;a name="foo-26"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;26&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;highlight&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingse"&gt;\n&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;join&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;content&lt;/span&gt;&lt;span class="testingp"&gt;),&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-27"&gt;&lt;a name="foo-27"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;return&lt;/span&gt; &lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;nodes&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;raw&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;format&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;html&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)]&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-even-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing even more sourcecode directives&lt;/h2&gt; +&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-overriding-config-defaults"&gt; +&lt;h2&gt;Testing overriding config defaults&lt;/h2&gt; +&lt;p&gt;Even if the default is line numbers, we can override it here&lt;/p&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml b/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml @@ -0,0 +1,4 @@ +<?xml version="1.0" encoding="utf-8"?> +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-fr.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-03-02T14:01:01+01:00</updated><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</summary></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/all.atom.xml b/pelican/tests/output/custom_locale/feeds/all.atom.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/all.atom.xml @@ -0,0 +1,63 @@ +<?xml version="1.0" encoding="utf-8"?> +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/November/30/filename_metadata-example/" rel="alternate"></link><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/November/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/February/29/second-article/" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/February/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/" rel="alternate"></link><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/April/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +&lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/February/17/article-1/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/February/17/article-2/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/February/17/article-3/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/December/02/this-is-a-super-article/" rel="alternate"></link><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/December/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +&lt;div class="section" id="this-is-a-simple-title"&gt; +&lt;h2&gt;This is a simple title&lt;/h2&gt; +&lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;pre class="literal-block"&gt; +&amp;gt;&amp;gt;&amp;gt; from ipdb import set_trace +&amp;gt;&amp;gt;&amp;gt; set_trace() +&lt;/pre&gt; +&lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; +&lt;/div&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/October/20/oh-yeah/" rel="alternate"></link><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/October/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; +&lt;h2&gt;Why not ?&lt;/h2&gt; +&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! +YEAH !&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;/div&gt; +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:posts/2010/October/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +&lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; +&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;/div&gt; +&lt;div class="section" id="testing-another-case"&gt; +&lt;h2&gt;Testing another case&lt;/h2&gt; +&lt;p&gt;This will now have a line number in 'custom' since it's the default in +pelican.conf, it will have nothing in default.&lt;/p&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing more sourcecode directives&lt;/h2&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span id="foo-8"&gt;&lt;a name="foo-8"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt; 8&lt;/span&gt; &lt;span class="testingk"&gt;def&lt;/span&gt; &lt;span class="testingnf"&gt;run&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-9"&gt;&lt;a name="foo-9"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;assert_has_content&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-10"&gt;&lt;a name="foo-10"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;10&lt;/span&gt; &lt;span class="testingk"&gt;try&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-11"&gt;&lt;a name="foo-11"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;get_lexer_by_name&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;arguments&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingmi"&gt;0&lt;/span&gt;&lt;span class="testingp"&gt;])&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-12"&gt;&lt;a name="foo-12"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;12&lt;/span&gt; &lt;span class="testingk"&gt;except&lt;/span&gt; &lt;span class="testingne"&gt;ValueError&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-13"&gt;&lt;a name="foo-13"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingc"&gt;# no lexer found - use the text one instead of an exception&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-14"&gt;&lt;a name="foo-14"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;14&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;TextLexer&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-15"&gt;&lt;a name="foo-15"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-16"&gt;&lt;a name="foo-16"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;16&lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt; &lt;span class="testingow"&gt;and&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-17"&gt;&lt;a name="foo-17"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingow"&gt;not&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;inline&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-18"&gt;&lt;a name="foo-18"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;18&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-19"&gt;&lt;a name="foo-19"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-20"&gt;&lt;a name="foo-20"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;20&lt;/span&gt; &lt;span class="testingk"&gt;for&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;nowrap&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;nobackground&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;anchorlinenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-21"&gt;&lt;a name="foo-21"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-22"&gt;&lt;a name="foo-22"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;22&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;flag&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingbp"&gt;True&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-23"&gt;&lt;a name="foo-23"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-24"&gt;&lt;a name="foo-24"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;24&lt;/span&gt; &lt;span class="testingc"&gt;# noclasses should already default to False, but just in case...&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-25"&gt;&lt;a name="foo-25"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;HtmlFormatter&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingn"&gt;noclasses&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testingbp"&gt;False&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingo"&gt;**&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-26"&gt;&lt;a name="foo-26"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;26&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;highlight&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingse"&gt;\n&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;join&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;content&lt;/span&gt;&lt;span class="testingp"&gt;),&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-27"&gt;&lt;a name="foo-27"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;return&lt;/span&gt; &lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;nodes&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;raw&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;format&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;html&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)]&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-even-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing even more sourcecode directives&lt;/h2&gt; +&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-overriding-config-defaults"&gt; +&lt;h2&gt;Testing overriding config defaults&lt;/h2&gt; +&lt;p&gt;Even if the default is line numbers, we can override it here&lt;/p&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/all.rss.xml b/pelican/tests/output/custom_locale/feeds/all.rss.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/all.rss.xml @@ -0,0 +1,63 @@ +<?xml version="1.0" encoding="utf-8"?> +<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/all.rss.xml" rel="self"></atom:link><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/November/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-11-30:posts/2012/November/30/filename_metadata-example/</guid></item><item><title>Trop bien !</title><link>http://blog.notmyidea.org/oh-yeah-fr.html</link><description>&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 02 Mar 2012 14:01:01 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/posts/2012/February/29/second-article/</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:posts/2012/February/29/second-article/</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Deuxième article</title><link>http://blog.notmyidea.org/second-article-fr.html</link><description>&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +&lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2011-04-20:posts/2011/April/20/a-markdown-powered-article/</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/posts/2011/February/17/article-1/</link><description>&lt;p&gt;Article 1&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-1/</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/posts/2011/February/17/article-2/</link><description>&lt;p&gt;Article 2&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-2/</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/posts/2011/February/17/article-3/</link><description>&lt;p&gt;Article 3&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-3/</guid></item><item><title>This is a super article !</title><link>http://blog.notmyidea.org/posts/2010/December/02/this-is-a-super-article/</link><description>&lt;p&gt;Some content here !&lt;/p&gt; +&lt;div class="section" id="this-is-a-simple-title"&gt; +&lt;h2&gt;This is a simple title&lt;/h2&gt; +&lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;pre class="literal-block"&gt; +&amp;gt;&amp;gt;&amp;gt; from ipdb import set_trace +&amp;gt;&amp;gt;&amp;gt; set_trace() +&lt;/pre&gt; +&lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; +&lt;/div&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 17 Nov 2013 23:29:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-12-02:posts/2010/December/02/this-is-a-super-article/</guid><category>foo</category><category>bar</category><category>foobar</category></item><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/posts/2010/October/20/oh-yeah/</link><description>&lt;div class="section" id="why-not"&gt; +&lt;h2&gt;Why not ?&lt;/h2&gt; +&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! +YEAH !&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;/div&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-20:posts/2010/October/20/oh-yeah/</guid><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/posts/2010/October/15/unbelievable/</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +&lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; +&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;/div&gt; +&lt;div class="section" id="testing-another-case"&gt; +&lt;h2&gt;Testing another case&lt;/h2&gt; +&lt;p&gt;This will now have a line number in 'custom' since it's the default in +pelican.conf, it will have nothing in default.&lt;/p&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing more sourcecode directives&lt;/h2&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span id="foo-8"&gt;&lt;a name="foo-8"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt; 8&lt;/span&gt; &lt;span class="testingk"&gt;def&lt;/span&gt; &lt;span class="testingnf"&gt;run&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-9"&gt;&lt;a name="foo-9"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;assert_has_content&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-10"&gt;&lt;a name="foo-10"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;10&lt;/span&gt; &lt;span class="testingk"&gt;try&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-11"&gt;&lt;a name="foo-11"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;get_lexer_by_name&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;arguments&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingmi"&gt;0&lt;/span&gt;&lt;span class="testingp"&gt;])&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-12"&gt;&lt;a name="foo-12"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;12&lt;/span&gt; &lt;span class="testingk"&gt;except&lt;/span&gt; &lt;span class="testingne"&gt;ValueError&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-13"&gt;&lt;a name="foo-13"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingc"&gt;# no lexer found - use the text one instead of an exception&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-14"&gt;&lt;a name="foo-14"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;14&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;TextLexer&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-15"&gt;&lt;a name="foo-15"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-16"&gt;&lt;a name="foo-16"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;16&lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt; &lt;span class="testingow"&gt;and&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-17"&gt;&lt;a name="foo-17"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingow"&gt;not&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;inline&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-18"&gt;&lt;a name="foo-18"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;18&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-19"&gt;&lt;a name="foo-19"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-20"&gt;&lt;a name="foo-20"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;20&lt;/span&gt; &lt;span class="testingk"&gt;for&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;nowrap&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;nobackground&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;anchorlinenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-21"&gt;&lt;a name="foo-21"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-22"&gt;&lt;a name="foo-22"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;22&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;flag&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingbp"&gt;True&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-23"&gt;&lt;a name="foo-23"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-24"&gt;&lt;a name="foo-24"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;24&lt;/span&gt; &lt;span class="testingc"&gt;# noclasses should already default to False, but just in case...&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-25"&gt;&lt;a name="foo-25"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;HtmlFormatter&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingn"&gt;noclasses&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testingbp"&gt;False&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingo"&gt;**&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-26"&gt;&lt;a name="foo-26"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;26&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;highlight&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingse"&gt;\n&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;join&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;content&lt;/span&gt;&lt;span class="testingp"&gt;),&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-27"&gt;&lt;a name="foo-27"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;return&lt;/span&gt; &lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;nodes&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;raw&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;format&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;html&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)]&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-even-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing even more sourcecode directives&lt;/h2&gt; +&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-overriding-config-defaults"&gt; +&lt;h2&gt;Testing overriding config defaults&lt;/h2&gt; +&lt;p&gt;Even if the default is line numbers, we can override it here&lt;/p&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:posts/2010/October/15/unbelievable/</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/bar.atom.xml b/pelican/tests/output/custom_locale/feeds/bar.atom.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/bar.atom.xml @@ -0,0 +1,8 @@ +<?xml version="1.0" encoding="utf-8"?> +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/bar.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2010-10-20T10:14:00+02:00</updated><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/October/20/oh-yeah/" rel="alternate"></link><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/October/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; +&lt;h2&gt;Why not ?&lt;/h2&gt; +&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! +YEAH !&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;/div&gt; +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/bar.rss.xml b/pelican/tests/output/custom_locale/feeds/bar.rss.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/bar.rss.xml @@ -0,0 +1,8 @@ +<?xml version="1.0" encoding="utf-8"?> +<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/bar.rss.xml" rel="self"></atom:link><lastBuildDate>Wed, 20 Oct 2010 10:14:00 +0200</lastBuildDate><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/posts/2010/October/20/oh-yeah/</link><description>&lt;div class="section" id="why-not"&gt; +&lt;h2&gt;Why not ?&lt;/h2&gt; +&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! +YEAH !&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;/div&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-20:posts/2010/October/20/oh-yeah/</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/cat1.atom.xml b/pelican/tests/output/custom_locale/feeds/cat1.atom.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/cat1.atom.xml @@ -0,0 +1,7 @@ +<?xml version="1.0" encoding="utf-8"?> +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/cat1.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2011-04-20T00:00:00+02:00</updated><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/" rel="alternate"></link><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/April/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +&lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/February/17/article-1/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/February/17/article-2/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/February/17/article-3/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +</summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/cat1.rss.xml b/pelican/tests/output/custom_locale/feeds/cat1.rss.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/cat1.rss.xml @@ -0,0 +1,7 @@ +<?xml version="1.0" encoding="utf-8"?> +<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/cat1.rss.xml" rel="self"></atom:link><lastBuildDate>Wed, 20 Apr 2011 00:00:00 +0200</lastBuildDate><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +&lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2011-04-20:posts/2011/April/20/a-markdown-powered-article/</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/posts/2011/February/17/article-1/</link><description>&lt;p&gt;Article 1&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-1/</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/posts/2011/February/17/article-2/</link><description>&lt;p&gt;Article 2&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-2/</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/posts/2011/February/17/article-3/</link><description>&lt;p&gt;Article 3&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/February/17/article-3/</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/misc.atom.xml b/pelican/tests/output/custom_locale/feeds/misc.atom.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/misc.atom.xml @@ -0,0 +1,38 @@ +<?xml version="1.0" encoding="utf-8"?> +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/misc.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-11-30T00:00:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/November/30/filename_metadata-example/" rel="alternate"></link><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/November/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/February/29/second-article/" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/February/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/October/15/unbelievable/" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:posts/2010/October/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +&lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; +&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;/div&gt; +&lt;div class="section" id="testing-another-case"&gt; +&lt;h2&gt;Testing another case&lt;/h2&gt; +&lt;p&gt;This will now have a line number in 'custom' since it's the default in +pelican.conf, it will have nothing in default.&lt;/p&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing more sourcecode directives&lt;/h2&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span id="foo-8"&gt;&lt;a name="foo-8"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt; 8&lt;/span&gt; &lt;span class="testingk"&gt;def&lt;/span&gt; &lt;span class="testingnf"&gt;run&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-9"&gt;&lt;a name="foo-9"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;assert_has_content&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-10"&gt;&lt;a name="foo-10"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;10&lt;/span&gt; &lt;span class="testingk"&gt;try&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-11"&gt;&lt;a name="foo-11"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;get_lexer_by_name&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;arguments&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingmi"&gt;0&lt;/span&gt;&lt;span class="testingp"&gt;])&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-12"&gt;&lt;a name="foo-12"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;12&lt;/span&gt; &lt;span class="testingk"&gt;except&lt;/span&gt; &lt;span class="testingne"&gt;ValueError&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-13"&gt;&lt;a name="foo-13"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingc"&gt;# no lexer found - use the text one instead of an exception&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-14"&gt;&lt;a name="foo-14"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;14&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;TextLexer&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-15"&gt;&lt;a name="foo-15"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-16"&gt;&lt;a name="foo-16"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;16&lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt; &lt;span class="testingow"&gt;and&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-17"&gt;&lt;a name="foo-17"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingow"&gt;not&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;inline&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-18"&gt;&lt;a name="foo-18"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;18&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-19"&gt;&lt;a name="foo-19"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-20"&gt;&lt;a name="foo-20"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;20&lt;/span&gt; &lt;span class="testingk"&gt;for&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;nowrap&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;nobackground&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;anchorlinenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-21"&gt;&lt;a name="foo-21"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-22"&gt;&lt;a name="foo-22"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;22&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;flag&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingbp"&gt;True&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-23"&gt;&lt;a name="foo-23"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-24"&gt;&lt;a name="foo-24"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;24&lt;/span&gt; &lt;span class="testingc"&gt;# noclasses should already default to False, but just in case...&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-25"&gt;&lt;a name="foo-25"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;HtmlFormatter&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingn"&gt;noclasses&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testingbp"&gt;False&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingo"&gt;**&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-26"&gt;&lt;a name="foo-26"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;26&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;highlight&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingse"&gt;\n&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;join&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;content&lt;/span&gt;&lt;span class="testingp"&gt;),&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-27"&gt;&lt;a name="foo-27"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;return&lt;/span&gt; &lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;nodes&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;raw&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;format&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;html&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)]&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-even-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing even more sourcecode directives&lt;/h2&gt; +&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-overriding-config-defaults"&gt; +&lt;h2&gt;Testing overriding config defaults&lt;/h2&gt; +&lt;p&gt;Even if the default is line numbers, we can override it here&lt;/p&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/misc.rss.xml b/pelican/tests/output/custom_locale/feeds/misc.rss.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/misc.rss.xml @@ -0,0 +1,38 @@ +<?xml version="1.0" encoding="utf-8"?> +<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/misc.rss.xml" rel="self"></atom:link><lastBuildDate>Fri, 30 Nov 2012 00:00:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/November/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-11-30:posts/2012/November/30/filename_metadata-example/</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/posts/2012/February/29/second-article/</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:posts/2012/February/29/second-article/</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/posts/2010/October/15/unbelievable/</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +&lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; +&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/April/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; +&lt;div class="section" id="testing-sourcecode-directive"&gt; +&lt;h2&gt;Testing sourcecode directive&lt;/h2&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;/div&gt; +&lt;div class="section" id="testing-another-case"&gt; +&lt;h2&gt;Testing another case&lt;/h2&gt; +&lt;p&gt;This will now have a line number in 'custom' since it's the default in +pelican.conf, it will have nothing in default.&lt;/p&gt; +&lt;table class="highlighttable"&gt;&lt;tr&gt;&lt;td class="linenos"&gt;&lt;div class="linenodiv"&gt;&lt;pre&gt;1&lt;/pre&gt;&lt;/div&gt;&lt;/td&gt;&lt;td class="code"&gt;&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing more sourcecode directives&lt;/h2&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span id="foo-8"&gt;&lt;a name="foo-8"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt; 8&lt;/span&gt; &lt;span class="testingk"&gt;def&lt;/span&gt; &lt;span class="testingnf"&gt;run&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-9"&gt;&lt;a name="foo-9"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;assert_has_content&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-10"&gt;&lt;a name="foo-10"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;10&lt;/span&gt; &lt;span class="testingk"&gt;try&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-11"&gt;&lt;a name="foo-11"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;get_lexer_by_name&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;arguments&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingmi"&gt;0&lt;/span&gt;&lt;span class="testingp"&gt;])&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-12"&gt;&lt;a name="foo-12"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;12&lt;/span&gt; &lt;span class="testingk"&gt;except&lt;/span&gt; &lt;span class="testingne"&gt;ValueError&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-13"&gt;&lt;a name="foo-13"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingc"&gt;# no lexer found - use the text one instead of an exception&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-14"&gt;&lt;a name="foo-14"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;14&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;TextLexer&lt;/span&gt;&lt;span class="testingp"&gt;()&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-15"&gt;&lt;a name="foo-15"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-16"&gt;&lt;a name="foo-16"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;16&lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt; &lt;span class="testingow"&gt;and&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-17"&gt;&lt;a name="foo-17"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingow"&gt;not&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;inline&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-18"&gt;&lt;a name="foo-18"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;18&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;linenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;table&amp;#39;&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-19"&gt;&lt;a name="foo-19"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-20"&gt;&lt;a name="foo-20"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;20&lt;/span&gt; &lt;span class="testingk"&gt;for&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;nowrap&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;nobackground&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testings"&gt;&amp;#39;anchorlinenos&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;):&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-21"&gt;&lt;a name="foo-21"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;if&lt;/span&gt; &lt;span class="testingn"&gt;flag&lt;/span&gt; &lt;span class="testingow"&gt;in&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;:&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-22"&gt;&lt;a name="foo-22"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;22&lt;/span&gt; &lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;flag&lt;/span&gt;&lt;span class="testingp"&gt;]&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingbp"&gt;True&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-23"&gt;&lt;a name="foo-23"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;br&gt;&lt;/span&gt;&lt;span id="foo-24"&gt;&lt;a name="foo-24"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;24&lt;/span&gt; &lt;span class="testingc"&gt;# noclasses should already default to False, but just in case...&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-25"&gt;&lt;a name="foo-25"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;HtmlFormatter&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingn"&gt;noclasses&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testingbp"&gt;False&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingo"&gt;**&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;options&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-26"&gt;&lt;a name="foo-26"&gt;&lt;/a&gt;&lt;span class="lineno special"&gt;26&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt; &lt;span class="testingo"&gt;=&lt;/span&gt; &lt;span class="testingn"&gt;highlight&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingse"&gt;\n&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;join&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testingbp"&gt;self&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;content&lt;/span&gt;&lt;span class="testingp"&gt;),&lt;/span&gt; &lt;span class="testingn"&gt;lexer&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;formatter&lt;/span&gt;&lt;span class="testingp"&gt;)&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;span id="foo-27"&gt;&lt;a name="foo-27"&gt;&lt;/a&gt;&lt;span class="lineno"&gt; &lt;/span&gt; &lt;span class="testingk"&gt;return&lt;/span&gt; &lt;span class="testingp"&gt;[&lt;/span&gt;&lt;span class="testingn"&gt;nodes&lt;/span&gt;&lt;span class="testingo"&gt;.&lt;/span&gt;&lt;span class="testingn"&gt;raw&lt;/span&gt;&lt;span class="testingp"&gt;(&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;parsed&lt;/span&gt;&lt;span class="testingp"&gt;,&lt;/span&gt; &lt;span class="testingn"&gt;format&lt;/span&gt;&lt;span class="testingo"&gt;=&lt;/span&gt;&lt;span class="testings"&gt;&amp;#39;html&amp;#39;&lt;/span&gt;&lt;span class="testingp"&gt;)]&lt;/span&gt;&lt;br&gt;&lt;/span&gt;&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-even-more-sourcecode-directives"&gt; +&lt;h2&gt;Testing even more sourcecode directives&lt;/h2&gt; +&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +&lt;div class="section" id="testing-overriding-config-defaults"&gt; +&lt;h2&gt;Testing overriding config defaults&lt;/h2&gt; +&lt;p&gt;Even if the default is line numbers, we can override it here&lt;/p&gt; +&lt;div class="highlight"&gt;&lt;pre&gt;&lt;span class="n"&gt;formatter&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt; &lt;span class="ow"&gt;and&lt;/span&gt; &lt;span class="n"&gt;VARIANTS&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="bp"&gt;self&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;options&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;keys&lt;/span&gt;&lt;span class="p"&gt;()[&lt;/span&gt;&lt;span class="mi"&gt;0&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; +&lt;/pre&gt;&lt;/div&gt; +&lt;p&gt;Lovely.&lt;/p&gt; +&lt;/div&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:posts/2010/October/15/unbelievable/</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/yeah.atom.xml b/pelican/tests/output/custom_locale/feeds/yeah.atom.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/yeah.atom.xml @@ -0,0 +1,14 @@ +<?xml version="1.0" encoding="utf-8"?> +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/yeah.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/December/02/this-is-a-super-article/" rel="alternate"></link><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/December/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +&lt;div class="section" id="this-is-a-simple-title"&gt; +&lt;h2&gt;This is a simple title&lt;/h2&gt; +&lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;pre class="literal-block"&gt; +&amp;gt;&amp;gt;&amp;gt; from ipdb import set_trace +&amp;gt;&amp;gt;&amp;gt; set_trace() +&lt;/pre&gt; +&lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; +&lt;/div&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/yeah.rss.xml b/pelican/tests/output/custom_locale/feeds/yeah.rss.xml new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/feeds/yeah.rss.xml @@ -0,0 +1,14 @@ +<?xml version="1.0" encoding="utf-8"?> +<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/yeah.rss.xml" rel="self"></atom:link><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>This is a super article !</title><link>http://blog.notmyidea.org/posts/2010/December/02/this-is-a-super-article/</link><description>&lt;p&gt;Some content here !&lt;/p&gt; +&lt;div class="section" id="this-is-a-simple-title"&gt; +&lt;h2&gt;This is a simple title&lt;/h2&gt; +&lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /&gt; +&lt;pre class="literal-block"&gt; +&amp;gt;&amp;gt;&amp;gt; from ipdb import set_trace +&amp;gt;&amp;gt;&amp;gt; set_trace() +&lt;/pre&gt; +&lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; +&lt;/div&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 17 Nov 2013 23:29:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-12-02:posts/2010/December/02/this-is-a-super-article/</guid><category>foo</category><category>bar</category><category>foobar</category></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/index.html b/pelican/tests/output/custom_locale/index.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/index.html @@ -0,0 +1,173 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log</title> + <link rel="stylesheet" href="./theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="./">Alexis' log </a></h1> + <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> + <li><a href="./override/">Override url/save_as</a></li> + <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="./category/yeah.html">yeah</a></li> + <li><a href="./category/misc.html">misc</a></li> + <li><a href="./category/cat1.html">cat1</a></li> + <li><a href="./category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <aside id="featured" class="body"> + <article> + <h1 class="entry-title"><a href="./posts/2012/novembre/30/filename_metadata-example/">FILENAME_METADATA example</a></h1> +<footer class="post-info"> + <abbr class="published" title="2012-11-30T00:00:00"> + Published: 30 novembre 2012 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="./category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --><p>Some cool stuff!</p> +<p>There are <a href="./posts/2012/novembre/30/filename_metadata-example/#disqus_thread">comments</a>.</p> </article> + </aside><!-- /#featured --> + <section id="content" class="body"> + <h1>Other articles</h1> + <hr /> + <ol id="posts-list" class="hfeed"> + + <li><article class="hentry"> + <header> + <h1><a href="./posts/2012/février/29/second-article/" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2012-02-29T00:00:00"> + Published: 29 février 2012 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="./category/misc.html">misc</a>. </p> +<p>tags: <a href="./tag/foo.html">foo</a> <a href="./tag/bar.html">bar</a> <a href="./tag/baz.html">baz</a> </p>Translations: + <a href="./second-article-fr.html">fr</a> + +</footer><!-- /.post-info --> <p>This is some article, in english</p> + + <a class="readmore" href="./posts/2012/février/29/second-article/">read more</a> +<p>There are <a href="./posts/2012/février/29/second-article/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="./posts/2011/avril/20/a-markdown-powered-article/" rel="bookmark" + title="Permalink to A markdown powered article">A markdown powered article</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-04-20T00:00:00"> + Published: 20 avril 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="./category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>You're mutually oblivious.</p> +<p><a href="./posts/2010/octobre/15/unbelievable/">a root-relative link to unbelievable</a> +<a href="./posts/2010/octobre/15/unbelievable/">a file-relative link to unbelievable</a></p> + <a class="readmore" href="./posts/2011/avril/20/a-markdown-powered-article/">read more</a> +<p>There are <a href="./posts/2011/avril/20/a-markdown-powered-article/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="./posts/2011/février/17/article-1/" rel="bookmark" + title="Permalink to Article 1">Article 1</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-02-17T00:00:00"> + Published: 17 février 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="./category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>Article 1</p> + + <a class="readmore" href="./posts/2011/février/17/article-1/">read more</a> +<p>There are <a href="./posts/2011/février/17/article-1/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + </ol><!-- /#posts-list --> +<p class="paginator"> + Page 1 / 3 + <a href="./index2.html">&raquo;</a> +</p> + </section><!-- /#content --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/index2.html b/pelican/tests/output/custom_locale/index2.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/index2.html @@ -0,0 +1,187 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log</title> + <link rel="stylesheet" href="./theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="./">Alexis' log </a></h1> + <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> + <li><a href="./override/">Override url/save_as</a></li> + <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="./category/yeah.html">yeah</a></li> + <li><a href="./category/misc.html">misc</a></li> + <li><a href="./category/cat1.html">cat1</a></li> + <li><a href="./category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <section id="content" class="body"> + <ol id="posts-list" class="hfeed" start="3"> + <li><article class="hentry"> + <header> + <h1><a href="./posts/2011/février/17/article-2/" rel="bookmark" + title="Permalink to Article 2">Article 2</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-02-17T00:00:00"> + Published: 17 février 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="./category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>Article 2</p> + + <a class="readmore" href="./posts/2011/février/17/article-2/">read more</a> +<p>There are <a href="./posts/2011/février/17/article-2/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="./posts/2011/février/17/article-3/" rel="bookmark" + title="Permalink to Article 3">Article 3</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-02-17T00:00:00"> + Published: 17 février 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="./category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>Article 3</p> + + <a class="readmore" href="./posts/2011/février/17/article-3/">read more</a> +<p>There are <a href="./posts/2011/février/17/article-3/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="./posts/2010/décembre/02/this-is-a-super-article/" rel="bookmark" + title="Permalink to This is a super article !">This is a super article !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-12-02T10:14:00"> + Published: 02 décembre 2010 + </abbr> + <br /> + <abbr class="modified" title="2013-11-17T23:29:00"> + Updated: 17 novembre 2013 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="./category/yeah.html">yeah</a>. </p> +<p>tags: <a href="./tag/foo.html">foo</a> <a href="./tag/bar.html">bar</a> <a href="./tag/foobar.html">foobar</a> </p> +</footer><!-- /.post-info --> <p class="first last">Multi-line metadata should be supported +as well as <strong>inline markup</strong>.</p> + + <a class="readmore" href="./posts/2010/décembre/02/this-is-a-super-article/">read more</a> +<p>There are <a href="./posts/2010/décembre/02/this-is-a-super-article/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="./posts/2010/octobre/20/oh-yeah/" rel="bookmark" + title="Permalink to Oh yeah !">Oh yeah !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-10-20T10:14:00"> + Published: 20 octobre 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="./category/bar.html">bar</a>. </p> +<p>tags: <a href="./tag/oh.html">oh</a> <a href="./tag/bar.html">bar</a> <a href="./tag/yeah.html">yeah</a> </p>Translations: + <a href="./oh-yeah-fr.html">fr</a> + +</footer><!-- /.post-info --> <div class="section" id="why-not"> +<h2>Why not ?</h2> +<p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! +YEAH !</p> +<img alt="alternate text" src="./pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +</div> + + <a class="readmore" href="./posts/2010/octobre/20/oh-yeah/">read more</a> +<p>There are <a href="./posts/2010/octobre/20/oh-yeah/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + </ol><!-- /#posts-list --> +<p class="paginator"> + <a href="./index.html">&laquo;</a> + Page 2 / 3 + <a href="./index3.html">&raquo;</a> +</p> + </section><!-- /#content --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/index3.html b/pelican/tests/output/custom_locale/index3.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/index3.html @@ -0,0 +1,138 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log</title> + <link rel="stylesheet" href="./theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="./">Alexis' log </a></h1> + <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> + <li><a href="./override/">Override url/save_as</a></li> + <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="./category/yeah.html">yeah</a></li> + <li><a href="./category/misc.html">misc</a></li> + <li><a href="./category/cat1.html">cat1</a></li> + <li><a href="./category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <section id="content" class="body"> + <ol id="posts-list" class="hfeed" start="3"> + <li><article class="hentry"> + <header> + <h1><a href="./posts/2010/octobre/15/unbelievable/" rel="bookmark" + title="Permalink to Unbelievable !">Unbelievable !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-10-15T20:30:00"> + Published: 15 octobre 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="./category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> +<p><a class="reference external" href="./posts/2011/avril/20/a-markdown-powered-article/">a root-relative link to markdown-article</a> +<a class="reference external" href="./posts/2011/avril/20/a-markdown-powered-article/">a file-relative link to markdown-article</a></p> +<div class="section" id="testing-sourcecode-directive"> +<h2>Testing sourcecode directive</h2> +<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +</pre></div> +</td></tr></table></div> +<div class="section" id="testing-another-case"> +<h2>Testing another case</h2> +<p>This will now have a line number in 'custom' since it's the default in +pelican.conf, it ...</p></div> + <a class="readmore" href="./posts/2010/octobre/15/unbelievable/">read more</a> +<p>There are <a href="./posts/2010/octobre/15/unbelievable/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="./tag/baz.html" rel="bookmark" + title="Permalink to The baz tag">The baz tag</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-03-14T00:00:00"> + Published: 14 mars 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="./category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>This article overrides the listening of the articles under the <em>baz</em> tag.</p> + + <a class="readmore" href="./tag/baz.html">read more</a> +<p>There are <a href="./tag/baz.html#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + </ol><!-- /#posts-list --> +<p class="paginator"> + <a href="./index2.html">&laquo;</a> + Page 3 / 3 +</p> + </section><!-- /#content --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/jinja2_template.html b/pelican/tests/output/custom_locale/jinja2_template.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/jinja2_template.html @@ -0,0 +1,77 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log</title> + <link rel="stylesheet" href="./theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="./">Alexis' log </a></h1> + <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> + <li><a href="./override/">Override url/save_as</a></li> + <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="./category/yeah.html">yeah</a></li> + <li><a href="./category/misc.html">misc</a></li> + <li><a href="./category/cat1.html">cat1</a></li> + <li><a href="./category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + +Some text + + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/oh-yeah-fr.html b/pelican/tests/output/custom_locale/oh-yeah-fr.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/oh-yeah-fr.html @@ -0,0 +1,116 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Trop bien !</title> + <link rel="stylesheet" href="./theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="./">Alexis' log </a></h1> + <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> + <li><a href="./override/">Override url/save_as</a></li> + <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="./category/yeah.html">yeah</a></li> + <li class="active"><a href="./category/misc.html">misc</a></li> + <li><a href="./category/cat1.html">cat1</a></li> + <li><a href="./category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="./oh-yeah-fr.html" rel="bookmark" + title="Permalink to Trop bien !">Trop bien !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2012-03-02T14:01:01"> + Published: 02 mars 2012 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="./category/misc.html">misc</a>. </p> +Translations: + <a href="./posts/2010/octobre/20/oh-yeah/">en</a> + +</footer><!-- /.post-info --> <p>Et voila du contenu en français</p> + + </div><!-- /.entry-content --> + <div class="comments"> + <h2>Comments !</h2> + <div id="disqus_thread"></div> + <script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + var disqus_identifier = 'oh-yeah-fr.html'; + var disqus_url = './oh-yeah-fr.html'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//blog-notmyidea.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + </script> + <noscript>Please enable JavaScript to view the comments.</noscript> + </div> + + </article> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/override/index.html b/pelican/tests/output/custom_locale/override/index.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/override/index.html @@ -0,0 +1,81 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Override url/save_as</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li class="active"><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <h1 class="entry-title">Override url/save_as</h1> + + <p>Test page which overrides save_as and url so that this page will be generated +at a custom location.</p> + +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/pages/this-is-a-test-hidden-page.html b/pelican/tests/output/custom_locale/pages/this-is-a-test-hidden-page.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/pages/this-is-a-test-hidden-page.html @@ -0,0 +1,81 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>This is a test hidden page</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <h1 class="entry-title">This is a test hidden page</h1> + + <p>This is great for things like error(404) pages +Anyone can see this page but it's not linked to anywhere!</p> + +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/pages/this-is-a-test-page.html b/pelican/tests/output/custom_locale/pages/this-is-a-test-page.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/pages/this-is-a-test-page.html @@ -0,0 +1,81 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>This is a test page</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li class="active"><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <h1 class="entry-title">This is a test page</h1> + + <p>Just an image.</p> +<img alt="alternate text" src="../pictures/Fat_Cat.jpg" style="width: 600px; height: 450px;" /> + +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/pictures/Fat_Cat.jpg b/pelican/tests/output/custom_locale/pictures/Fat_Cat.jpg new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/pictures/Fat_Cat.jpg differ diff --git a/pelican/tests/output/custom_locale/pictures/Sushi.jpg b/pelican/tests/output/custom_locale/pictures/Sushi.jpg new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/pictures/Sushi.jpg differ diff --git a/pelican/tests/output/custom_locale/pictures/Sushi_Macro.jpg b/pelican/tests/output/custom_locale/pictures/Sushi_Macro.jpg new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/pictures/Sushi_Macro.jpg differ diff --git "a/pelican/tests/output/custom_locale/posts/2010/d\303\251cembre/02/this-is-a-super-article/index.html" "b/pelican/tests/output/custom_locale/posts/2010/d\303\251cembre/02/this-is-a-super-article/index.html" new file mode 100644 --- /dev/null +++ "b/pelican/tests/output/custom_locale/posts/2010/d\303\251cembre/02/this-is-a-super-article/index.html" @@ -0,0 +1,129 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>This is a super article !</title> + <link rel="stylesheet" href="../../../../../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../../../../../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../../../../../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../../../../../override/">Override url/save_as</a></li> + <li><a href="../../../../../pages/this-is-a-test-page.html">This is a test page</a></li> + <li class="active"><a href="../../../../../category/yeah.html">yeah</a></li> + <li><a href="../../../../../category/misc.html">misc</a></li> + <li><a href="../../../../../category/cat1.html">cat1</a></li> + <li><a href="../../../../../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="../../../../../posts/2010/décembre/02/this-is-a-super-article/" rel="bookmark" + title="Permalink to This is a super article !">This is a super article !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-12-02T10:14:00"> + Published: 02 décembre 2010 + </abbr> + <br /> + <abbr class="modified" title="2013-11-17T23:29:00"> + Updated: 17 novembre 2013 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../../../../../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../../../../../category/yeah.html">yeah</a>. </p> +<p>tags: <a href="../../../../../tag/foo.html">foo</a> <a href="../../../../../tag/bar.html">bar</a> <a href="../../../../../tag/foobar.html">foobar</a> </p> +</footer><!-- /.post-info --> <p>Some content here !</p> +<div class="section" id="this-is-a-simple-title"> +<h2>This is a simple title</h2> +<p>And here comes the cool <a class="reference external" href="http://books.couchdb.org/relax/design-documents/views">stuff</a>.</p> +<img alt="alternate text" src="../../../../../pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="../../../../../pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /> +<pre class="literal-block"> +&gt;&gt;&gt; from ipdb import set_trace +&gt;&gt;&gt; set_trace() +</pre> +<p>→ And now try with some utf8 hell: ééé</p> +</div> + + </div><!-- /.entry-content --> + <div class="comments"> + <h2>Comments !</h2> + <div id="disqus_thread"></div> + <script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + var disqus_identifier = 'posts/2010/décembre/02/this-is-a-super-article/'; + var disqus_url = '../../../../../posts/2010/décembre/02/this-is-a-super-article/'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//blog-notmyidea.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + </script> + <noscript>Please enable JavaScript to view the comments.</noscript> + </div> + + </article> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/posts/2010/octobre/15/unbelievable/index.html b/pelican/tests/output/custom_locale/posts/2010/octobre/15/unbelievable/index.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/posts/2010/octobre/15/unbelievable/index.html @@ -0,0 +1,146 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Unbelievable !</title> + <link rel="stylesheet" href="../../../../../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../../../../../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../../../../../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../../../../../override/">Override url/save_as</a></li> + <li><a href="../../../../../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../../../../../category/yeah.html">yeah</a></li> + <li class="active"><a href="../../../../../category/misc.html">misc</a></li> + <li><a href="../../../../../category/cat1.html">cat1</a></li> + <li><a href="../../../../../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="../../../../../posts/2010/octobre/15/unbelievable/" rel="bookmark" + title="Permalink to Unbelievable !">Unbelievable !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-10-15T20:30:00"> + Published: 15 octobre 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../../../../../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../../../../../category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>Or completely awesome. Depends the needs.</p> +<p><a class="reference external" href="../../../../../posts/2011/avril/20/a-markdown-powered-article/">a root-relative link to markdown-article</a> +<a class="reference external" href="../../../../../posts/2011/avril/20/a-markdown-powered-article/">a file-relative link to markdown-article</a></p> +<div class="section" id="testing-sourcecode-directive"> +<h2>Testing sourcecode directive</h2> +<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +</pre></div> +</td></tr></table></div> +<div class="section" id="testing-another-case"> +<h2>Testing another case</h2> +<p>This will now have a line number in 'custom' since it's the default in +pelican.conf, it will have nothing in default.</p> +<table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="highlight"><pre><span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +</pre></div> +</td></tr></table><p>Lovely.</p> +</div> +<div class="section" id="testing-more-sourcecode-directives"> +<h2>Testing more sourcecode directives</h2> +<div class="highlight"><pre><span id="foo-8"><a name="foo-8"></a><span class="lineno special"> 8</span> <span class="testingk">def</span> <span class="testingnf">run</span><span class="testingp">(</span><span class="testingbp">self</span><span class="testingp">):</span><br></span><span id="foo-9"><a name="foo-9"></a><span class="lineno"> </span> <span class="testingbp">self</span><span class="testingo">.</span><span class="testingn">assert_has_content</span><span class="testingp">()</span><br></span><span id="foo-10"><a name="foo-10"></a><span class="lineno special">10</span> <span class="testingk">try</span><span class="testingp">:</span><br></span><span id="foo-11"><a name="foo-11"></a><span class="lineno"> </span> <span class="testingn">lexer</span> <span class="testingo">=</span> <span class="testingn">get_lexer_by_name</span><span class="testingp">(</span><span class="testingbp">self</span><span class="testingo">.</span><span class="testingn">arguments</span><span class="testingp">[</span><span class="testingmi">0</span><span class="testingp">])</span><br></span><span id="foo-12"><a name="foo-12"></a><span class="lineno special">12</span> <span class="testingk">except</span> <span class="testingne">ValueError</span><span class="testingp">:</span><br></span><span id="foo-13"><a name="foo-13"></a><span class="lineno"> </span> <span class="testingc"># no lexer found - use the text one instead of an exception</span><br></span><span id="foo-14"><a name="foo-14"></a><span class="lineno special">14</span> <span class="testingn">lexer</span> <span class="testingo">=</span> <span class="testingn">TextLexer</span><span class="testingp">()</span><br></span><span id="foo-15"><a name="foo-15"></a><span class="lineno"> </span> <br></span><span id="foo-16"><a name="foo-16"></a><span class="lineno special">16</span> <span class="testingk">if</span> <span class="testingp">(</span><span class="testings">&#39;linenos&#39;</span> <span class="testingow">in</span> <span class="testingbp">self</span><span class="testingo">.</span><span class="testingn">options</span> <span class="testingow">and</span><br></span><span id="foo-17"><a name="foo-17"></a><span class="lineno"> </span> <span class="testingbp">self</span><span class="testingo">.</span><span class="testingn">options</span><span class="testingp">[</span><span class="testings">&#39;linenos&#39;</span><span class="testingp">]</span> <span class="testingow">not</span> <span class="testingow">in</span> <span class="testingp">(</span><span class="testings">&#39;table&#39;</span><span class="testingp">,</span> <span class="testings">&#39;inline&#39;</span><span class="testingp">)):</span><br></span><span id="foo-18"><a name="foo-18"></a><span class="lineno special">18</span> <span class="testingbp">self</span><span class="testingo">.</span><span class="testingn">options</span><span class="testingp">[</span><span class="testings">&#39;linenos&#39;</span><span class="testingp">]</span> <span class="testingo">=</span> <span class="testings">&#39;table&#39;</span><br></span><span id="foo-19"><a name="foo-19"></a><span class="lineno"> </span> <br></span><span id="foo-20"><a name="foo-20"></a><span class="lineno special">20</span> <span class="testingk">for</span> <span class="testingn">flag</span> <span class="testingow">in</span> <span class="testingp">(</span><span class="testings">&#39;nowrap&#39;</span><span class="testingp">,</span> <span class="testings">&#39;nobackground&#39;</span><span class="testingp">,</span> <span class="testings">&#39;anchorlinenos&#39;</span><span class="testingp">):</span><br></span><span id="foo-21"><a name="foo-21"></a><span class="lineno"> </span> <span class="testingk">if</span> <span class="testingn">flag</span> <span class="testingow">in</span> <span class="testingbp">self</span><span class="testingo">.</span><span class="testingn">options</span><span class="testingp">:</span><br></span><span id="foo-22"><a name="foo-22"></a><span class="lineno special">22</span> <span class="testingbp">self</span><span class="testingo">.</span><span class="testingn">options</span><span class="testingp">[</span><span class="testingn">flag</span><span class="testingp">]</span> <span class="testingo">=</span> <span class="testingbp">True</span><br></span><span id="foo-23"><a name="foo-23"></a><span class="lineno"> </span> <br></span><span id="foo-24"><a name="foo-24"></a><span class="lineno special">24</span> <span class="testingc"># noclasses should already default to False, but just in case...</span><br></span><span id="foo-25"><a name="foo-25"></a><span class="lineno"> </span> <span class="testingn">formatter</span> <span class="testingo">=</span> <span class="testingn">HtmlFormatter</span><span class="testingp">(</span><span class="testingn">noclasses</span><span class="testingo">=</span><span class="testingbp">False</span><span class="testingp">,</span> <span class="testingo">**</span><span class="testingbp">self</span><span class="testingo">.</span><span class="testingn">options</span><span class="testingp">)</span><br></span><span id="foo-26"><a name="foo-26"></a><span class="lineno special">26</span> <span class="testingn">parsed</span> <span class="testingo">=</span> <span class="testingn">highlight</span><span class="testingp">(</span><span class="testings">&#39;</span><span class="testingse">\n</span><span class="testings">&#39;</span><span class="testingo">.</span><span class="testingn">join</span><span class="testingp">(</span><span class="testingbp">self</span><span class="testingo">.</span><span class="testingn">content</span><span class="testingp">),</span> <span class="testingn">lexer</span><span class="testingp">,</span> <span class="testingn">formatter</span><span class="testingp">)</span><br></span><span id="foo-27"><a name="foo-27"></a><span class="lineno"> </span> <span class="testingk">return</span> <span class="testingp">[</span><span class="testingn">nodes</span><span class="testingo">.</span><span class="testingn">raw</span><span class="testingp">(</span><span class="testings">&#39;&#39;</span><span class="testingp">,</span> <span class="testingn">parsed</span><span class="testingp">,</span> <span class="testingn">format</span><span class="testingo">=</span><span class="testings">&#39;html&#39;</span><span class="testingp">)]</span><br></span></pre></div> +<p>Lovely.</p> +</div> +<div class="section" id="testing-even-more-sourcecode-directives"> +<h2>Testing even more sourcecode directives</h2> +<span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +<p>Lovely.</p> +</div> +<div class="section" id="testing-overriding-config-defaults"> +<h2>Testing overriding config defaults</h2> +<p>Even if the default is line numbers, we can override it here</p> +<div class="highlight"><pre><span class="n">formatter</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">options</span> <span class="ow">and</span> <span class="n">VARIANTS</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="o">.</span><span class="n">keys</span><span class="p">()[</span><span class="mi">0</span><span class="p">]]</span> +</pre></div> +<p>Lovely.</p> +</div> + + </div><!-- /.entry-content --> + <div class="comments"> + <h2>Comments !</h2> + <div id="disqus_thread"></div> + <script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + var disqus_identifier = 'posts/2010/octobre/15/unbelievable/'; + var disqus_url = '../../../../../posts/2010/octobre/15/unbelievable/'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//blog-notmyidea.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + </script> + <noscript>Please enable JavaScript to view the comments.</noscript> + </div> + + </article> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/posts/2010/octobre/20/oh-yeah/index.html b/pelican/tests/output/custom_locale/posts/2010/octobre/20/oh-yeah/index.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/posts/2010/octobre/20/oh-yeah/index.html @@ -0,0 +1,121 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Oh yeah !</title> + <link rel="stylesheet" href="../../../../../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../../../../../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../../../../../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../../../../../override/">Override url/save_as</a></li> + <li><a href="../../../../../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../../../../../category/yeah.html">yeah</a></li> + <li><a href="../../../../../category/misc.html">misc</a></li> + <li><a href="../../../../../category/cat1.html">cat1</a></li> + <li class="active"><a href="../../../../../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="../../../../../posts/2010/octobre/20/oh-yeah/" rel="bookmark" + title="Permalink to Oh yeah !">Oh yeah !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-10-20T10:14:00"> + Published: 20 octobre 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../../../../../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../../../../../category/bar.html">bar</a>. </p> +<p>tags: <a href="../../../../../tag/oh.html">oh</a> <a href="../../../../../tag/bar.html">bar</a> <a href="../../../../../tag/yeah.html">yeah</a> </p>Translations: + <a href="../../../../../oh-yeah-fr.html">fr</a> + +</footer><!-- /.post-info --> <div class="section" id="why-not"> +<h2>Why not ?</h2> +<p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! +YEAH !</p> +<img alt="alternate text" src="../../../../../pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +</div> + + </div><!-- /.entry-content --> + <div class="comments"> + <h2>Comments !</h2> + <div id="disqus_thread"></div> + <script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + var disqus_identifier = 'posts/2010/octobre/20/oh-yeah/'; + var disqus_url = '../../../../../posts/2010/octobre/20/oh-yeah/'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//blog-notmyidea.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + </script> + <noscript>Please enable JavaScript to view the comments.</noscript> + </div> + + </article> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/posts/2011/avril/20/a-markdown-powered-article/index.html b/pelican/tests/output/custom_locale/posts/2011/avril/20/a-markdown-powered-article/index.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/posts/2011/avril/20/a-markdown-powered-article/index.html @@ -0,0 +1,115 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>A markdown powered article</title> + <link rel="stylesheet" href="../../../../../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../../../../../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../../../../../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../../../../../override/">Override url/save_as</a></li> + <li><a href="../../../../../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../../../../../category/yeah.html">yeah</a></li> + <li><a href="../../../../../category/misc.html">misc</a></li> + <li class="active"><a href="../../../../../category/cat1.html">cat1</a></li> + <li><a href="../../../../../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="../../../../../posts/2011/avril/20/a-markdown-powered-article/" rel="bookmark" + title="Permalink to A markdown powered article">A markdown powered article</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-04-20T00:00:00"> + Published: 20 avril 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../../../../../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../../../../../category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>You're mutually oblivious.</p> +<p><a href="../../../../../posts/2010/octobre/15/unbelievable/">a root-relative link to unbelievable</a> +<a href="../../../../../posts/2010/octobre/15/unbelievable/">a file-relative link to unbelievable</a></p> + </div><!-- /.entry-content --> + <div class="comments"> + <h2>Comments !</h2> + <div id="disqus_thread"></div> + <script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + var disqus_identifier = 'posts/2011/avril/20/a-markdown-powered-article/'; + var disqus_url = '../../../../../posts/2011/avril/20/a-markdown-powered-article/'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//blog-notmyidea.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + </script> + <noscript>Please enable JavaScript to view the comments.</noscript> + </div> + + </article> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git "a/pelican/tests/output/custom_locale/posts/2011/f\303\251vrier/17/article-1/index.html" "b/pelican/tests/output/custom_locale/posts/2011/f\303\251vrier/17/article-1/index.html" new file mode 100644 --- /dev/null +++ "b/pelican/tests/output/custom_locale/posts/2011/f\303\251vrier/17/article-1/index.html" @@ -0,0 +1,114 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Article 1</title> + <link rel="stylesheet" href="../../../../../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../../../../../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../../../../../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../../../../../override/">Override url/save_as</a></li> + <li><a href="../../../../../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../../../../../category/yeah.html">yeah</a></li> + <li><a href="../../../../../category/misc.html">misc</a></li> + <li class="active"><a href="../../../../../category/cat1.html">cat1</a></li> + <li><a href="../../../../../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="../../../../../posts/2011/février/17/article-1/" rel="bookmark" + title="Permalink to Article 1">Article 1</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-02-17T00:00:00"> + Published: 17 février 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../../../../../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../../../../../category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>Article 1</p> + + </div><!-- /.entry-content --> + <div class="comments"> + <h2>Comments !</h2> + <div id="disqus_thread"></div> + <script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + var disqus_identifier = 'posts/2011/février/17/article-1/'; + var disqus_url = '../../../../../posts/2011/février/17/article-1/'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//blog-notmyidea.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + </script> + <noscript>Please enable JavaScript to view the comments.</noscript> + </div> + + </article> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git "a/pelican/tests/output/custom_locale/posts/2011/f\303\251vrier/17/article-2/index.html" "b/pelican/tests/output/custom_locale/posts/2011/f\303\251vrier/17/article-2/index.html" new file mode 100644 --- /dev/null +++ "b/pelican/tests/output/custom_locale/posts/2011/f\303\251vrier/17/article-2/index.html" @@ -0,0 +1,114 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Article 2</title> + <link rel="stylesheet" href="../../../../../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../../../../../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../../../../../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../../../../../override/">Override url/save_as</a></li> + <li><a href="../../../../../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../../../../../category/yeah.html">yeah</a></li> + <li><a href="../../../../../category/misc.html">misc</a></li> + <li class="active"><a href="../../../../../category/cat1.html">cat1</a></li> + <li><a href="../../../../../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="../../../../../posts/2011/février/17/article-2/" rel="bookmark" + title="Permalink to Article 2">Article 2</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-02-17T00:00:00"> + Published: 17 février 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../../../../../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../../../../../category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>Article 2</p> + + </div><!-- /.entry-content --> + <div class="comments"> + <h2>Comments !</h2> + <div id="disqus_thread"></div> + <script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + var disqus_identifier = 'posts/2011/février/17/article-2/'; + var disqus_url = '../../../../../posts/2011/février/17/article-2/'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//blog-notmyidea.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + </script> + <noscript>Please enable JavaScript to view the comments.</noscript> + </div> + + </article> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git "a/pelican/tests/output/custom_locale/posts/2011/f\303\251vrier/17/article-3/index.html" "b/pelican/tests/output/custom_locale/posts/2011/f\303\251vrier/17/article-3/index.html" new file mode 100644 --- /dev/null +++ "b/pelican/tests/output/custom_locale/posts/2011/f\303\251vrier/17/article-3/index.html" @@ -0,0 +1,114 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Article 3</title> + <link rel="stylesheet" href="../../../../../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../../../../../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../../../../../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../../../../../override/">Override url/save_as</a></li> + <li><a href="../../../../../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../../../../../category/yeah.html">yeah</a></li> + <li><a href="../../../../../category/misc.html">misc</a></li> + <li class="active"><a href="../../../../../category/cat1.html">cat1</a></li> + <li><a href="../../../../../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="../../../../../posts/2011/février/17/article-3/" rel="bookmark" + title="Permalink to Article 3">Article 3</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2011-02-17T00:00:00"> + Published: 17 février 2011 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../../../../../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../../../../../category/cat1.html">cat1</a>. </p> + +</footer><!-- /.post-info --> <p>Article 3</p> + + </div><!-- /.entry-content --> + <div class="comments"> + <h2>Comments !</h2> + <div id="disqus_thread"></div> + <script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + var disqus_identifier = 'posts/2011/février/17/article-3/'; + var disqus_url = '../../../../../posts/2011/février/17/article-3/'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//blog-notmyidea.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + </script> + <noscript>Please enable JavaScript to view the comments.</noscript> + </div> + + </article> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git "a/pelican/tests/output/custom_locale/posts/2012/f\303\251vrier/29/second-article/index.html" "b/pelican/tests/output/custom_locale/posts/2012/f\303\251vrier/29/second-article/index.html" new file mode 100644 --- /dev/null +++ "b/pelican/tests/output/custom_locale/posts/2012/f\303\251vrier/29/second-article/index.html" @@ -0,0 +1,116 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Second article</title> + <link rel="stylesheet" href="../../../../../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../../../../../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../../../../../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../../../../../override/">Override url/save_as</a></li> + <li><a href="../../../../../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../../../../../category/yeah.html">yeah</a></li> + <li class="active"><a href="../../../../../category/misc.html">misc</a></li> + <li><a href="../../../../../category/cat1.html">cat1</a></li> + <li><a href="../../../../../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="../../../../../posts/2012/février/29/second-article/" rel="bookmark" + title="Permalink to Second article">Second article</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2012-02-29T00:00:00"> + Published: 29 février 2012 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../../../../../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../../../../../category/misc.html">misc</a>. </p> +<p>tags: <a href="../../../../../tag/foo.html">foo</a> <a href="../../../../../tag/bar.html">bar</a> <a href="../../../../../tag/baz.html">baz</a> </p>Translations: + <a href="../../../../../second-article-fr.html">fr</a> + +</footer><!-- /.post-info --> <p>This is some article, in english</p> + + </div><!-- /.entry-content --> + <div class="comments"> + <h2>Comments !</h2> + <div id="disqus_thread"></div> + <script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + var disqus_identifier = 'posts/2012/février/29/second-article/'; + var disqus_url = '../../../../../posts/2012/février/29/second-article/'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//blog-notmyidea.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + </script> + <noscript>Please enable JavaScript to view the comments.</noscript> + </div> + + </article> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/posts/2012/novembre/30/filename_metadata-example/index.html b/pelican/tests/output/custom_locale/posts/2012/novembre/30/filename_metadata-example/index.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/posts/2012/novembre/30/filename_metadata-example/index.html @@ -0,0 +1,114 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>FILENAME_METADATA example</title> + <link rel="stylesheet" href="../../../../../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../../../../../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../../../../../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../../../../../override/">Override url/save_as</a></li> + <li><a href="../../../../../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../../../../../category/yeah.html">yeah</a></li> + <li class="active"><a href="../../../../../category/misc.html">misc</a></li> + <li><a href="../../../../../category/cat1.html">cat1</a></li> + <li><a href="../../../../../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="../../../../../posts/2012/novembre/30/filename_metadata-example/" rel="bookmark" + title="Permalink to FILENAME_METADATA example">FILENAME_METADATA example</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2012-11-30T00:00:00"> + Published: 30 novembre 2012 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../../../../../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../../../../../category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>Some cool stuff!</p> + + </div><!-- /.entry-content --> + <div class="comments"> + <h2>Comments !</h2> + <div id="disqus_thread"></div> + <script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + var disqus_identifier = 'posts/2012/novembre/30/filename_metadata-example/'; + var disqus_url = '../../../../../posts/2012/novembre/30/filename_metadata-example/'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//blog-notmyidea.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + </script> + <noscript>Please enable JavaScript to view the comments.</noscript> + </div> + + </article> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/robots.txt b/pelican/tests/output/custom_locale/robots.txt new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/robots.txt @@ -0,0 +1,2 @@ +User-agent: * +Disallow: /pictures diff --git a/pelican/tests/output/custom_locale/second-article-fr.html b/pelican/tests/output/custom_locale/second-article-fr.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/second-article-fr.html @@ -0,0 +1,116 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Deuxième article</title> + <link rel="stylesheet" href="./theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="./">Alexis' log </a></h1> + <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> + <li><a href="./override/">Override url/save_as</a></li> + <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="./category/yeah.html">yeah</a></li> + <li class="active"><a href="./category/misc.html">misc</a></li> + <li><a href="./category/cat1.html">cat1</a></li> + <li><a href="./category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="./second-article-fr.html" rel="bookmark" + title="Permalink to Deuxième article">Deuxième article</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2012-02-29T00:00:00"> + Published: 29 février 2012 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="./author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="./category/misc.html">misc</a>. </p> +<p>tags: <a href="./tag/foo.html">foo</a> <a href="./tag/bar.html">bar</a> <a href="./tag/baz.html">baz</a> </p>Translations: + <a href="./posts/2012/février/29/second-article/">en</a> + +</footer><!-- /.post-info --> <p>Ceci est un article, en français.</p> + + </div><!-- /.entry-content --> + <div class="comments"> + <h2>Comments !</h2> + <div id="disqus_thread"></div> + <script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + var disqus_identifier = 'second-article-fr.html'; + var disqus_url = './second-article-fr.html'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//blog-notmyidea.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + </script> + <noscript>Please enable JavaScript to view the comments.</noscript> + </div> + + </article> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/tag/bar.html b/pelican/tests/output/custom_locale/tag/bar.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/tag/bar.html @@ -0,0 +1,160 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log - bar</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <aside id="featured" class="body"> + <article> + <h1 class="entry-title"><a href="../posts/2012/février/29/second-article/">Second article</a></h1> +<footer class="post-info"> + <abbr class="published" title="2012-02-29T00:00:00"> + Published: 29 février 2012 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> +<p>tags: <a href="../tag/foo.html">foo</a> <a href="../tag/bar.html">bar</a> <a href="../tag/baz.html">baz</a> </p>Translations: + <a href="../second-article-fr.html">fr</a> + +</footer><!-- /.post-info --><p>This is some article, in english</p> +<p>There are <a href="../posts/2012/février/29/second-article/#disqus_thread">comments</a>.</p> </article> + </aside><!-- /#featured --> + <section id="content" class="body"> + <h1>Other articles</h1> + <hr /> + <ol id="posts-list" class="hfeed"> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2010/décembre/02/this-is-a-super-article/" rel="bookmark" + title="Permalink to This is a super article !">This is a super article !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-12-02T10:14:00"> + Published: 02 décembre 2010 + </abbr> + <br /> + <abbr class="modified" title="2013-11-17T23:29:00"> + Updated: 17 novembre 2013 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/yeah.html">yeah</a>. </p> +<p>tags: <a href="../tag/foo.html">foo</a> <a href="../tag/bar.html">bar</a> <a href="../tag/foobar.html">foobar</a> </p> +</footer><!-- /.post-info --> <p class="first last">Multi-line metadata should be supported +as well as <strong>inline markup</strong>.</p> + + <a class="readmore" href="../posts/2010/décembre/02/this-is-a-super-article/">read more</a> +<p>There are <a href="../posts/2010/décembre/02/this-is-a-super-article/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2010/octobre/20/oh-yeah/" rel="bookmark" + title="Permalink to Oh yeah !">Oh yeah !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-10-20T10:14:00"> + Published: 20 octobre 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/bar.html">bar</a>. </p> +<p>tags: <a href="../tag/oh.html">oh</a> <a href="../tag/bar.html">bar</a> <a href="../tag/yeah.html">yeah</a> </p>Translations: + <a href="../oh-yeah-fr.html">fr</a> + +</footer><!-- /.post-info --> <div class="section" id="why-not"> +<h2>Why not ?</h2> +<p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! +YEAH !</p> +<img alt="alternate text" src="../pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +</div> + + <a class="readmore" href="../posts/2010/octobre/20/oh-yeah/">read more</a> +<p>There are <a href="../posts/2010/octobre/20/oh-yeah/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + </ol><!-- /#posts-list --> +<p class="paginator"> + Page 1 / 1 +</p> + </section><!-- /#content --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/tag/baz.html b/pelican/tests/output/custom_locale/tag/baz.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/tag/baz.html @@ -0,0 +1,114 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>The baz tag</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li class="active"><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="../tag/baz.html" rel="bookmark" + title="Permalink to The baz tag">The baz tag</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-03-14T00:00:00"> + Published: 14 mars 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> + +</footer><!-- /.post-info --> <p>This article overrides the listening of the articles under the <em>baz</em> tag.</p> + + </div><!-- /.entry-content --> + <div class="comments"> + <h2>Comments !</h2> + <div id="disqus_thread"></div> + <script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + var disqus_identifier = 'tag/baz.html'; + var disqus_url = '../tag/baz.html'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//blog-notmyidea.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + </script> + <noscript>Please enable JavaScript to view the comments.</noscript> + </div> + + </article> +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/tag/foo.html b/pelican/tests/output/custom_locale/tag/foo.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/tag/foo.html @@ -0,0 +1,130 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log - foo</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <aside id="featured" class="body"> + <article> + <h1 class="entry-title"><a href="../posts/2012/février/29/second-article/">Second article</a></h1> +<footer class="post-info"> + <abbr class="published" title="2012-02-29T00:00:00"> + Published: 29 février 2012 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/misc.html">misc</a>. </p> +<p>tags: <a href="../tag/foo.html">foo</a> <a href="../tag/bar.html">bar</a> <a href="../tag/baz.html">baz</a> </p>Translations: + <a href="../second-article-fr.html">fr</a> + +</footer><!-- /.post-info --><p>This is some article, in english</p> +<p>There are <a href="../posts/2012/février/29/second-article/#disqus_thread">comments</a>.</p> </article> + </aside><!-- /#featured --> + <section id="content" class="body"> + <h1>Other articles</h1> + <hr /> + <ol id="posts-list" class="hfeed"> + + <li><article class="hentry"> + <header> + <h1><a href="../posts/2010/décembre/02/this-is-a-super-article/" rel="bookmark" + title="Permalink to This is a super article !">This is a super article !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-12-02T10:14:00"> + Published: 02 décembre 2010 + </abbr> + <br /> + <abbr class="modified" title="2013-11-17T23:29:00"> + Updated: 17 novembre 2013 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/yeah.html">yeah</a>. </p> +<p>tags: <a href="../tag/foo.html">foo</a> <a href="../tag/bar.html">bar</a> <a href="../tag/foobar.html">foobar</a> </p> +</footer><!-- /.post-info --> <p class="first last">Multi-line metadata should be supported +as well as <strong>inline markup</strong>.</p> + + <a class="readmore" href="../posts/2010/décembre/02/this-is-a-super-article/">read more</a> +<p>There are <a href="../posts/2010/décembre/02/this-is-a-super-article/#disqus_thread">comments</a>.</p> </div><!-- /.entry-content --> + </article></li> + </ol><!-- /#posts-list --> +<p class="paginator"> + Page 1 / 1 +</p> + </section><!-- /#content --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/tag/foobar.html b/pelican/tests/output/custom_locale/tag/foobar.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/tag/foobar.html @@ -0,0 +1,109 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log - foobar</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <aside id="featured" class="body"> + <article> + <h1 class="entry-title"><a href="../posts/2010/décembre/02/this-is-a-super-article/">This is a super article !</a></h1> +<footer class="post-info"> + <abbr class="published" title="2010-12-02T10:14:00"> + Published: 02 décembre 2010 + </abbr> + <br /> + <abbr class="modified" title="2013-11-17T23:29:00"> + Updated: 17 novembre 2013 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/yeah.html">yeah</a>. </p> +<p>tags: <a href="../tag/foo.html">foo</a> <a href="../tag/bar.html">bar</a> <a href="../tag/foobar.html">foobar</a> </p> +</footer><!-- /.post-info --><p>Some content here !</p> +<div class="section" id="this-is-a-simple-title"> +<h2>This is a simple title</h2> +<p>And here comes the cool <a class="reference external" href="http://books.couchdb.org/relax/design-documents/views">stuff</a>.</p> +<img alt="alternate text" src="../pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="../pictures/Sushi_Macro.jpg" style="width: 600px; height: 450px;" /> +<pre class="literal-block"> +&gt;&gt;&gt; from ipdb import set_trace +&gt;&gt;&gt; set_trace() +</pre> +<p>→ And now try with some utf8 hell: ééé</p> +</div> +<p>There are <a href="../posts/2010/décembre/02/this-is-a-super-article/#disqus_thread">comments</a>.</p> </article> +<p class="paginator"> + Page 1 / 1 +</p> + </aside><!-- /#featured --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/tag/oh.html b/pelican/tests/output/custom_locale/tag/oh.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/tag/oh.html @@ -0,0 +1,80 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Oh Oh Oh</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li class="active"><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <h1 class="entry-title">Oh Oh Oh</h1> + + <p>This page overrides the listening of the articles under the <em>oh</em> tag.</p> + +</section> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/tag/yeah.html b/pelican/tests/output/custom_locale/tag/yeah.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/tag/yeah.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log - yeah</title> + <link rel="stylesheet" href="../theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="../">Alexis' log </a></h1> + <nav><ul> + <li><a href="../tag/oh.html">Oh Oh Oh</a></li> + <li><a href="../override/">Override url/save_as</a></li> + <li><a href="../pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="../category/yeah.html">yeah</a></li> + <li><a href="../category/misc.html">misc</a></li> + <li><a href="../category/cat1.html">cat1</a></li> + <li><a href="../category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + + <aside id="featured" class="body"> + <article> + <h1 class="entry-title"><a href="../posts/2010/octobre/20/oh-yeah/">Oh yeah !</a></h1> +<footer class="post-info"> + <abbr class="published" title="2010-10-20T10:14:00"> + Published: 20 octobre 2010 + </abbr> + + <address class="vcard author"> + By <a class="url fn" href="../author/alexis-metaireau.html">Alexis Métaireau</a> + </address> +<p>In <a href="../category/bar.html">bar</a>. </p> +<p>tags: <a href="../tag/oh.html">oh</a> <a href="../tag/bar.html">bar</a> <a href="../tag/yeah.html">yeah</a> </p>Translations: + <a href="../oh-yeah-fr.html">fr</a> + +</footer><!-- /.post-info --><div class="section" id="why-not"> +<h2>Why not ?</h2> +<p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! +YEAH !</p> +<img alt="alternate text" src="../pictures/Sushi.jpg" style="width: 600px; height: 450px;" /> +</div> +<p>There are <a href="../posts/2010/octobre/20/oh-yeah/#disqus_thread">comments</a>.</p> </article> +<p class="paginator"> + Page 1 / 1 +</p> + </aside><!-- /#featured --> + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/tags.html b/pelican/tests/output/custom_locale/tags.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/tags.html @@ -0,0 +1,87 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <title>Alexis' log - Tags</title> + <link rel="stylesheet" href="./theme/css/main.css" /> + <link href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="Alexis' log Atom Feed" /> + <link href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate" title="Alexis' log RSS Feed" /> + + <!--[if IE]> + <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> +</head> + +<body id="index" class="home"> +<a href="http://github.com/ametaireau/"> +<img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub" /> +</a> + <header id="banner" class="body"> + <h1><a href="./">Alexis' log </a></h1> + <nav><ul> + <li><a href="./tag/oh.html">Oh Oh Oh</a></li> + <li><a href="./override/">Override url/save_as</a></li> + <li><a href="./pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="./category/yeah.html">yeah</a></li> + <li><a href="./category/misc.html">misc</a></li> + <li><a href="./category/cat1.html">cat1</a></li> + <li><a href="./category/bar.html">bar</a></li> + </ul></nav> + </header><!-- /#banner --> + +<section id="content" class="body"> + <h1>Tags for Alexis' log</h1> + <ul> + <li><a href="./tag/bar.html">bar</a> (3)</li> + <li><a href="./tag/baz.html">baz</a> (1)</li> + <li><a href="./tag/foo.html">foo</a> (2)</li> + <li><a href="./tag/foobar.html">foobar</a> (1)</li> + <li><a href="./tag/oh.html">oh</a> (1)</li> + <li><a href="./tag/yeah.html">yeah</a> (1)</li> + </ul> +</section> + + <section id="extras" class="body"> + <div class="blogroll"> + <h2>blogroll</h2> + <ul> + <li><a href="http://biologeek.org">Biologeek</a></li> + <li><a href="http://filyb.info/">Filyb</a></li> + <li><a href="http://www.libert-fr.com">Libert-fr</a></li> + <li><a href="http://prendreuncafe.com/blog/">N1k0</a></li> + <li><a href="http://ziade.org/blog">Tarek Ziadé</a></li> + <li><a href="http://zubin71.wordpress.com/">Zubin Mithra</a></li> + </ul> + </div><!-- /.blogroll --> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="http://blog.notmyidea.org/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + <li><a href="http://blog.notmyidea.org/feeds/all.rss.xml" type="application/rss+xml" rel="alternate">rss feed</a></li> + + <li><a href="http://twitter.com/ametaireau">twitter</a></li> + <li><a href="http://lastfm.com/user/akounet">lastfm</a></li> + <li><a href="http://github.com/ametaireau">github</a></li> + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="http://getpelican.com/">Pelican</a>, which takes great advantage of <a href="http://python.org">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="http://coding.smashingmagazine.com/2009/08/04/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +<script type="text/javascript"> + var disqus_shortname = 'blog-notmyidea'; + (function () { + var s = document.createElement('script'); s.async = true; + s.type = 'text/javascript'; + s.src = '//' + disqus_shortname + '.disqus.com/count.js'; + (document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s); + }()); +</script> +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/theme/css/main.css b/pelican/tests/output/custom_locale/theme/css/main.css new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/theme/css/main.css @@ -0,0 +1,451 @@ +/* + Name: Smashing HTML5 + Date: July 2009 + Description: Sample layout for HTML5 and CSS3 goodness. + Version: 1.0 + License: MIT <http://opensource.org/licenses/MIT> + Licensed by: Smashing Media GmbH <http://www.smashingmagazine.com/> + Original author: Enrique Ramírez <http://enrique-ramirez.com/> +*/ + +/* Imports */ +@import url("reset.css"); +@import url("pygment.css"); +@import url("typogrify.css"); +@import url(//fonts.googleapis.com/css?family=Yanone+Kaffeesatz&subset=latin); + +/***** Global *****/ +/* Body */ +body { + background: #F5F4EF; + color: #000305; + font-size: 87.5%; /* Base font size: 14px */ + font-family: 'Trebuchet MS', Trebuchet, 'Lucida Sans Unicode', 'Lucida Grande', 'Lucida Sans', Arial, sans-serif; + line-height: 1.429; + margin: 0; + padding: 0; + text-align: left; +} + +/* Headings */ +h1 {font-size: 2em } +h2 {font-size: 1.571em} /* 22px */ +h3 {font-size: 1.429em} /* 20px */ +h4 {font-size: 1.286em} /* 18px */ +h5 {font-size: 1.143em} /* 16px */ +h6 {font-size: 1em} /* 14px */ + +h1, h2, h3, h4, h5, h6 { + font-weight: 400; + line-height: 1.1; + margin-bottom: .8em; + font-family: 'Yanone Kaffeesatz', arial, serif; +} + +h3, h4, h5, h6 { margin-top: .8em; } + +hr { border: 2px solid #EEEEEE; } + +/* Anchors */ +a {outline: 0;} +a img {border: 0px; text-decoration: none;} +a:link, a:visited { + color: #C74350; + padding: 0 1px; + text-decoration: underline; +} +a:hover, a:active { + background-color: #C74350; + color: #fff; + text-decoration: none; + text-shadow: 1px 1px 1px #333; +} + +h1 a:hover { + background-color: inherit +} + +/* Paragraphs */ +div.line-block, +p { margin-top: 1em; + margin-bottom: 1em;} + +strong, b {font-weight: bold;} +em, i {font-style: italic;} + +/* Lists */ +ul { + list-style: outside disc; + margin: 0em 0 0 1.5em; +} + +ol { + list-style: outside decimal; + margin: 0em 0 0 1.5em; +} + +li { margin-top: 0.5em;} + +.post-info { + float:right; + margin:10px; + padding:5px; +} + +.post-info p{ + margin-top: 1px; + margin-bottom: 1px; +} + +.readmore { float: right } + +dl {margin: 0 0 1.5em 0;} +dt {font-weight: bold;} +dd {margin-left: 1.5em;} + +pre{background-color: rgb(238, 238, 238); padding: 10px; margin: 10px; overflow: auto;} + +/* Quotes */ +blockquote { + margin: 20px; + font-style: italic; +} +cite {} + +q {} + +div.note { + float: right; + margin: 5px; + font-size: 85%; + max-width: 300px; +} + +/* Tables */ +table {margin: .5em auto 1.5em auto; width: 98%;} + + /* Thead */ + thead th {padding: .5em .4em; text-align: left;} + thead td {} + + /* Tbody */ + tbody td {padding: .5em .4em;} + tbody th {} + + tbody .alt td {} + tbody .alt th {} + + /* Tfoot */ + tfoot th {} + tfoot td {} + +/* HTML5 tags */ +header, section, footer, +aside, nav, article, figure { + display: block; +} + +/***** Layout *****/ +.body {clear: both; margin: 0 auto; width: 800px;} +img.right, figure.right {float: right; margin: 0 0 2em 2em;} +img.left, figure.left {float: left; margin: 0 2em 2em 0;} + +/* + Header +*****************/ +#banner { + margin: 0 auto; + padding: 2.5em 0 0 0; +} + + /* Banner */ + #banner h1 {font-size: 3.571em; line-height: 0;} + #banner h1 a:link, #banner h1 a:visited { + color: #000305; + display: block; + font-weight: bold; + margin: 0 0 .6em .2em; + text-decoration: none; + } + #banner h1 a:hover, #banner h1 a:active { + background: none; + color: #C74350; + text-shadow: none; + } + + #banner h1 strong {font-size: 0.36em; font-weight: normal;} + + /* Main Nav */ + #banner nav { + background: #000305; + font-size: 1.143em; + height: 40px; + line-height: 30px; + margin: 0 auto 2em auto; + padding: 0; + text-align: center; + width: 800px; + + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + } + + #banner nav ul {list-style: none; margin: 0 auto; width: 800px;} + #banner nav li {float: left; display: inline; margin: 0;} + + #banner nav a:link, #banner nav a:visited { + color: #fff; + display: inline-block; + height: 30px; + padding: 5px 1.5em; + text-decoration: none; + } + #banner nav a:hover, #banner nav a:active, + #banner nav .active a:link, #banner nav .active a:visited { + background: #C74451; + color: #fff; + text-shadow: none !important; + } + + #banner nav li:first-child a { + border-top-left-radius: 5px; + -moz-border-radius-topleft: 5px; + -webkit-border-top-left-radius: 5px; + + border-bottom-left-radius: 5px; + -moz-border-radius-bottomleft: 5px; + -webkit-border-bottom-left-radius: 5px; + } + +/* + Featured +*****************/ +#featured { + background: #fff; + margin-bottom: 2em; + overflow: hidden; + padding: 20px; + width: 760px; + + border-radius: 10px; + -moz-border-radius: 10px; + -webkit-border-radius: 10px; +} + +#featured figure { + border: 2px solid #eee; + float: right; + margin: 0.786em 2em 0 5em; + width: 248px; +} +#featured figure img {display: block; float: right;} + +#featured h2 {color: #C74451; font-size: 1.714em; margin-bottom: 0.333em;} +#featured h3 {font-size: 1.429em; margin-bottom: .5em;} + +#featured h3 a:link, #featured h3 a:visited {color: #000305; text-decoration: none;} +#featured h3 a:hover, #featured h3 a:active {color: #fff;} + +/* + Body +*****************/ +#content { + background: #fff; + margin-bottom: 2em; + overflow: hidden; + padding: 20px 20px; + width: 760px; + + border-radius: 10px; + -moz-border-radius: 10px; + -webkit-border-radius: 10px; +} + +/* + Extras +*****************/ +#extras {margin: 0 auto 3em auto; overflow: hidden;} + +#extras ul {list-style: none; margin: 0;} +#extras li {border-bottom: 1px solid #fff;} +#extras h2 { + color: #C74350; + font-size: 1.429em; + margin-bottom: .25em; + padding: 0 3px; +} + +#extras a:link, #extras a:visited { + color: #444; + display: block; + border-bottom: 1px solid #F4E3E3; + text-decoration: none; + padding: .3em .25em; +} + +#extras a:hover, #extras a:active {color: #fff;} + + /* Blogroll */ + #extras .blogroll { + float: left; + width: 615px; + } + + #extras .blogroll li {float: left; margin: 0 20px 0 0; width: 185px;} + + /* Social */ + #extras .social { + float: right; + width: 175px; + } + + #extras div[class='social'] a { + background-repeat: no-repeat; + background-position: 3px 6px; + padding-left: 25px; + } + + /* Icons */ + .social a[href*='about.me'] {background-image: url('../images/icons/aboutme.png');} + .social a[href*='bitbucket.org'] {background-image: url('../images/icons/bitbucket.png');} + .social a[href*='delicious.com'] {background-image: url('../images/icons/delicious.png');} + .social a[href*='digg.com'] {background-image: url('../images/icons/digg.png');} + .social a[href*='facebook.com'] {background-image: url('../images/icons/facebook.png');} + .social a[href*='gitorious.org'] {background-image: url('../images/icons/gitorious.png');} + .social a[href*='github.com'], + .social a[href*='git.io'] { + background-image: url('../images/icons/github.png'); + background-size: 16px 16px; + } + .social a[href*='gittip.com'] {background-image: url('../images/icons/gittip.png');} + .social a[href*='plus.google.com'] {background-image: url('../images/icons/google-plus.png');} + .social a[href*='groups.google.com'] {background-image: url('../images/icons/google-groups.png');} + .social a[href*='news.ycombinator.com'], + .social a[href*='hackernewsers.com'] {background-image: url('../images/icons/hackernews.png');} + .social a[href*='last.fm'], .social a[href*='lastfm.'] {background-image: url('../images/icons/lastfm.png');} + .social a[href*='linkedin.com'] {background-image: url('../images/icons/linkedin.png');} + .social a[href*='reddit.com'] {background-image: url('../images/icons/reddit.png');} + .social a[type$='atom+xml'], .social a[type$='rss+xml'] {background-image: url('../images/icons/rss.png');} + .social a[href*='slideshare.net'] {background-image: url('../images/icons/slideshare.png');} + .social a[href*='speakerdeck.com'] {background-image: url('../images/icons/speakerdeck.png');} + .social a[href*='stackoverflow.com'] {background-image: url('../images/icons/stackoverflow.png');} + .social a[href*='twitter.com'] {background-image: url('../images/icons/twitter.png');} + .social a[href*='vimeo.com'] {background-image: url('../images/icons/vimeo.png');} + .social a[href*='youtube.com'] {background-image: url('../images/icons/youtube.png');} + +/* + About +*****************/ +#about { + background: #fff; + font-style: normal; + margin-bottom: 2em; + overflow: hidden; + padding: 20px; + text-align: left; + width: 760px; + + border-radius: 10px; + -moz-border-radius: 10px; + -webkit-border-radius: 10px; +} + +#about .primary {float: left; width: 165px;} +#about .primary strong {color: #C64350; display: block; font-size: 1.286em;} +#about .photo {float: left; margin: 5px 20px;} + +#about .url:link, #about .url:visited {text-decoration: none;} + +#about .bio {float: right; width: 500px;} + +/* + Footer +*****************/ +#contentinfo {padding-bottom: 2em; text-align: right;} + +/***** Sections *****/ +/* Blog */ +.hentry { + display: block; + clear: both; + border-bottom: 1px solid #eee; + padding: 1.5em 0; +} +li:last-child .hentry, #content > .hentry {border: 0; margin: 0;} +#content > .hentry {padding: 1em 0;} +.hentry img{display : none ;} +.entry-title {font-size: 3em; margin-bottom: 10px; margin-top: 0;} +.entry-title a:link, .entry-title a:visited {text-decoration: none; color: #333;} +.entry-title a:visited {background-color: #fff;} + +.hentry .post-info * {font-style: normal;} + + /* Content */ + .hentry footer {margin-bottom: 2em;} + .hentry footer address {display: inline;} + #posts-list footer address {display: block;} + + /* Blog Index */ + #posts-list {list-style: none; margin: 0;} + #posts-list .hentry {padding-left: 10px; position: relative;} + + #posts-list footer { + left: 10px; + position: relative; + float: left; + top: 0.5em; + width: 190px; + } + + /* About the Author */ + #about-author { + background: #f9f9f9; + clear: both; + font-style: normal; + margin: 2em 0; + padding: 10px 20px 15px 20px; + + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + } + + #about-author strong { + color: #C64350; + clear: both; + display: block; + font-size: 1.429em; + } + + #about-author .photo {border: 1px solid #ddd; float: left; margin: 5px 1em 0 0;} + + /* Comments */ + #comments-list {list-style: none; margin: 0 1em;} + #comments-list blockquote { + background: #f8f8f8; + clear: both; + font-style: normal; + margin: 0; + padding: 15px 20px; + + border-radius: 5px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + } + #comments-list footer {color: #888; padding: .5em 1em 0 0; text-align: right;} + + #comments-list li:nth-child(2n) blockquote {background: #F5f5f5;} + + /* Add a Comment */ + #add-comment label {clear: left; float: left; text-align: left; width: 150px;} + #add-comment input[type='text'], + #add-comment input[type='email'], + #add-comment input[type='url'] {float: left; width: 200px;} + + #add-comment textarea {float: left; height: 150px; width: 495px;} + + #add-comment p.req {clear: both; margin: 0 .5em 1em 0; text-align: right;} + + #add-comment input[type='submit'] {float: right; margin: 0 .5em;} + #add-comment * {margin-bottom: .5em;} diff --git a/pelican/tests/output/custom_locale/theme/css/pygment.css b/pelican/tests/output/custom_locale/theme/css/pygment.css new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/theme/css/pygment.css @@ -0,0 +1,205 @@ +.hll { +background-color:#eee; +} +.c { +color:#408090; +font-style:italic; +} +.err { +border:1px solid #FF0000; +} +.k { +color:#007020; +font-weight:bold; +} +.o { +color:#666666; +} +.cm { +color:#408090; +font-style:italic; +} +.cp { +color:#007020; +} +.c1 { +color:#408090; +font-style:italic; +} +.cs { +background-color:#FFF0F0; +color:#408090; +} +.gd { +color:#A00000; +} +.ge { +font-style:italic; +} +.gr { +color:#FF0000; +} +.gh { +color:#000080; +font-weight:bold; +} +.gi { +color:#00A000; +} +.go { +color:#303030; +} +.gp { +color:#C65D09; +font-weight:bold; +} +.gs { +font-weight:bold; +} +.gu { +color:#800080; +font-weight:bold; +} +.gt { +color:#0040D0; +} +.kc { +color:#007020; +font-weight:bold; +} +.kd { +color:#007020; +font-weight:bold; +} +.kn { +color:#007020; +font-weight:bold; +} +.kp { +color:#007020; +} +.kr { +color:#007020; +font-weight:bold; +} +.kt { +color:#902000; +} +.m { +color:#208050; +} +.s { +color:#4070A0; +} +.na { +color:#4070A0; +} +.nb { +color:#007020; +} +.nc { +color:#0E84B5; +font-weight:bold; +} +.no { +color:#60ADD5; +} +.nd { +color:#555555; +font-weight:bold; +} +.ni { +color:#D55537; +font-weight:bold; +} +.ne { +color:#007020; +} +.nf { +color:#06287E; +} +.nl { +color:#002070; +font-weight:bold; +} +.nn { +color:#0E84B5; +font-weight:bold; +} +.nt { +color:#062873; +font-weight:bold; +} +.nv { +color:#BB60D5; +} +.ow { +color:#007020; +font-weight:bold; +} +.w { +color:#BBBBBB; +} +.mf { +color:#208050; +} +.mh { +color:#208050; +} +.mi { +color:#208050; +} +.mo { +color:#208050; +} +.sb { +color:#4070A0; +} +.sc { +color:#4070A0; +} +.sd { +color:#4070A0; +font-style:italic; +} +.s2 { +color:#4070A0; +} +.se { +color:#4070A0; +font-weight:bold; +} +.sh { +color:#4070A0; +} +.si { +color:#70A0D0; +font-style:italic; +} +.sx { +color:#C65D09; +} +.sr { +color:#235388; +} +.s1 { +color:#4070A0; +} +.ss { +color:#517918; +} +.bp { +color:#007020; +} +.vc { +color:#BB60D5; +} +.vg { +color:#BB60D5; +} +.vi { +color:#BB60D5; +} +.il { +color:#208050; +} diff --git a/pelican/tests/output/custom_locale/theme/css/reset.css b/pelican/tests/output/custom_locale/theme/css/reset.css new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/theme/css/reset.css @@ -0,0 +1,52 @@ +/* + Name: Reset Stylesheet + Description: Resets browser's default CSS + Author: Eric Meyer + Author URI: http://meyerweb.com/eric/tools/css/reset/ +*/ + +/* v1.0 | 20080212 */ +html, body, div, span, applet, object, iframe, +h1, h2, h3, h4, h5, h6, p, blockquote, pre, +a, abbr, acronym, address, big, cite, code, +del, dfn, em, font, img, ins, kbd, q, s, samp, +small, strike, strong, sub, sup, tt, var, +b, u, i, center, +dl, dt, dd, ol, ul, li, +fieldset, form, label, legend, +table, caption, tbody, tfoot, thead, tr, th, td { + background: transparent; + border: 0; + font-size: 100%; + margin: 0; + outline: 0; + padding: 0; + vertical-align: baseline; +} + +body {line-height: 1;} + +ol, ul {list-style: none;} + +blockquote, q {quotes: none;} + +blockquote:before, blockquote:after, +q:before, q:after { + content: ''; + content: none; +} + +/* remember to define focus styles! */ +:focus { + outline: 0; +} + +/* remember to highlight inserts somehow! */ +ins {text-decoration: none;} +del {text-decoration: line-through;} + +/* tables still need 'cellspacing="0"' in the markup */ +table { + border-collapse: collapse; + border-spacing: 0; +} \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/theme/css/typogrify.css b/pelican/tests/output/custom_locale/theme/css/typogrify.css new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/theme/css/typogrify.css @@ -0,0 +1,3 @@ +.caps {font-size:.92em;} +.amp {color:#666; font-size:1.05em;font-family:"Warnock Pro", "Goudy Old Style","Palatino","Book Antiqua",serif; font-style:italic;} +.dquo {margin-left:-.38em;} diff --git a/pelican/tests/output/custom_locale/theme/css/wide.css b/pelican/tests/output/custom_locale/theme/css/wide.css new file mode 100644 --- /dev/null +++ b/pelican/tests/output/custom_locale/theme/css/wide.css @@ -0,0 +1,48 @@ +@import url("main.css"); + +body { + font:1.3em/1.3 "Hoefler Text","Georgia",Georgia,serif,sans-serif; +} + +.post-info{ + display: none; +} + +#banner nav { + display: none; + -moz-border-radius: 0px; + margin-bottom: 20px; + overflow: hidden; + font-size: 1em; + background: #F5F4EF; +} + +#banner nav ul{ + padding-right: 50px; +} + +#banner nav li{ + float: right; + color: #000; +} + +#banner nav li a { + color: #000; +} + +#banner h1 { + margin-bottom: -18px; +} + +#featured, #extras { + padding: 50px; +} + +#featured { + padding-top: 20px; +} + +#extras { + padding-top: 0px; + padding-bottom: 0px; +} diff --git a/pelican/tests/output/custom_locale/theme/images/icons/aboutme.png b/pelican/tests/output/custom_locale/theme/images/icons/aboutme.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/aboutme.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/bitbucket.png b/pelican/tests/output/custom_locale/theme/images/icons/bitbucket.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/bitbucket.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/delicious.png b/pelican/tests/output/custom_locale/theme/images/icons/delicious.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/delicious.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/facebook.png b/pelican/tests/output/custom_locale/theme/images/icons/facebook.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/facebook.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/github.png b/pelican/tests/output/custom_locale/theme/images/icons/github.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/github.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/gitorious.png b/pelican/tests/output/custom_locale/theme/images/icons/gitorious.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/gitorious.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/gittip.png b/pelican/tests/output/custom_locale/theme/images/icons/gittip.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/gittip.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/google-groups.png b/pelican/tests/output/custom_locale/theme/images/icons/google-groups.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/google-groups.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/google-plus.png b/pelican/tests/output/custom_locale/theme/images/icons/google-plus.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/google-plus.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/hackernews.png b/pelican/tests/output/custom_locale/theme/images/icons/hackernews.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/hackernews.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/lastfm.png b/pelican/tests/output/custom_locale/theme/images/icons/lastfm.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/lastfm.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/linkedin.png b/pelican/tests/output/custom_locale/theme/images/icons/linkedin.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/linkedin.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/reddit.png b/pelican/tests/output/custom_locale/theme/images/icons/reddit.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/reddit.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/rss.png b/pelican/tests/output/custom_locale/theme/images/icons/rss.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/rss.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/slideshare.png b/pelican/tests/output/custom_locale/theme/images/icons/slideshare.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/slideshare.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/speakerdeck.png b/pelican/tests/output/custom_locale/theme/images/icons/speakerdeck.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/speakerdeck.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/stackoverflow.png b/pelican/tests/output/custom_locale/theme/images/icons/stackoverflow.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/stackoverflow.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/twitter.png b/pelican/tests/output/custom_locale/theme/images/icons/twitter.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/twitter.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/vimeo.png b/pelican/tests/output/custom_locale/theme/images/icons/vimeo.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/vimeo.png differ diff --git a/pelican/tests/output/custom_locale/theme/images/icons/youtube.png b/pelican/tests/output/custom_locale/theme/images/icons/youtube.png new file mode 100644 Binary files /dev/null and b/pelican/tests/output/custom_locale/theme/images/icons/youtube.png differ diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -2,7 +2,6 @@ from __future__ import unicode_literals, absolute_import import six -from datetime import datetime from sys import platform import locale @@ -10,7 +9,7 @@ from pelican.contents import Page, Article, URLWrapper from pelican.settings import DEFAULT_CONFIG -from pelican.utils import truncate_html_words +from pelican.utils import truncate_html_words, SafeDatetime from pelican.signals import content_object_init from jinja2.utils import generate_lorem_ipsum @@ -127,7 +126,7 @@ def test_metadata_url_format(self): def test_datetime(self): # If DATETIME is set to a tuple, it should be used to override LOCALE - dt = datetime(2015, 9, 13) + dt = SafeDatetime(2015, 9, 13) page_kwargs = self._copy_page_kwargs() diff --git a/pelican/tests/test_pelican.py b/pelican/tests/test_pelican.py --- a/pelican/tests/test_pelican.py +++ b/pelican/tests/test_pelican.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals, print_function import os +import sys from tempfile import mkdtemp from shutil import rmtree import locale @@ -10,7 +11,7 @@ from pelican import Pelican from pelican.settings import read_settings -from pelican.tests.support import LoggedTestCase, mute +from pelican.tests.support import LoggedTestCase, mute, locale_available, unittest CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) SAMPLES_PATH = os.path.abspath(os.path.join( @@ -19,6 +20,7 @@ INPUT_PATH = os.path.join(SAMPLES_PATH, "content") SAMPLE_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf.py") +SAMPLE_FR_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf_FR.py") def recursiveDiff(dcmp): @@ -102,6 +104,27 @@ def test_custom_generation_works(self): mute(True)(pelican.run)() self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'custom')) + @unittest.skipUnless(locale_available('fr_FR.UTF-8') or + locale_available('French'), 'French locale needed') + def test_custom_locale_generation_works(self): + '''Test that generation with fr_FR.UTF-8 locale works''' + old_locale = locale.setlocale(locale.LC_TIME) + + if sys.platform == 'win32': + our_locale = str('French') + else: + our_locale = str('fr_FR.UTF-8') + + settings = read_settings(path=SAMPLE_FR_CONFIG, override={ + 'PATH': INPUT_PATH, + 'OUTPUT_PATH': self.temp_path, + 'CACHE_PATH': self.temp_cache, + 'LOCALE': our_locale, + }) + pelican = Pelican(settings=settings) + mute(True)(pelican.run)() + self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'custom_locale')) + def test_theme_static_paths_copy(self): # the same thing with a specified set of settings should work settings = read_settings(path=SAMPLE_CONFIG, override={ diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- from __future__ import unicode_literals, print_function -import datetime import os from pelican import readers +from pelican.utils import SafeDatetime from pelican.tests.support import unittest, get_settings CUR_DIR = os.path.dirname(__file__) @@ -42,8 +42,8 @@ def test_article_with_metadata(self): ' supported\nas well as <strong>inline' ' markup</strong> and stuff to &quot;typogrify' '&quot;...</p>\n', - 'date': datetime.datetime(2010, 12, 2, 10, 14), - 'modified': datetime.datetime(2010, 12, 2, 10, 20), + 'date': SafeDatetime(2010, 12, 2, 10, 14), + 'modified': SafeDatetime(2010, 12, 2, 10, 20), 'tags': ['foo', 'bar', 'foobar'], 'custom_field': 'http://notmyidea.org', } @@ -70,7 +70,7 @@ def test_article_with_filename_metadata(self): 'category': 'yeah', 'author': 'Alexis Métaireau', 'title': 'Rst with filename metadata', - 'date': datetime.datetime(2012, 11, 29), + 'date': SafeDatetime(2012, 11, 29), } for key, value in page.metadata.items(): self.assertEqual(value, expected[key], key) @@ -85,7 +85,7 @@ def test_article_with_filename_metadata(self): 'category': 'yeah', 'author': 'Alexis Métaireau', 'title': 'Rst with filename metadata', - 'date': datetime.datetime(2012, 11, 29), + 'date': SafeDatetime(2012, 11, 29), 'slug': 'article_with_filename_metadata', 'mymeta': 'foo', } @@ -171,8 +171,8 @@ def test_article_with_metadata(self): 'category': 'test', 'title': 'Test md File', 'summary': '<p>I have a lot to test</p>', - 'date': datetime.datetime(2010, 12, 2, 10, 14), - 'modified': datetime.datetime(2010, 12, 2, 10, 20), + 'date': SafeDatetime(2010, 12, 2, 10, 14), + 'modified': SafeDatetime(2010, 12, 2, 10, 20), 'tags': ['foo', 'bar', 'foobar'], } for key, value in metadata.items(): @@ -184,8 +184,8 @@ def test_article_with_metadata(self): 'title': 'マックOS X 10.8でパイソンとVirtualenvをインストールと設定', 'summary': '<p>パイソンとVirtualenvをまっくでインストールする方法について明確に説明します。</p>', 'category': '指導書', - 'date': datetime.datetime(2012, 12, 20), - 'modified': datetime.datetime(2012, 12, 22), + 'date': SafeDatetime(2012, 12, 20), + 'modified': SafeDatetime(2012, 12, 22), 'tags': ['パイソン', 'マック'], 'slug': 'python-virtualenv-on-mac-osx-mountain-lion-10.8', } @@ -220,8 +220,8 @@ def test_article_with_footnote(self): 'summary': ( '<p>Summary with <strong>inline</strong> markup ' '<em>should</em> be supported.</p>'), - 'date': datetime.datetime(2012, 10, 31), - 'modified': datetime.datetime(2012, 11, 1), + 'date': SafeDatetime(2012, 10, 31), + 'modified': SafeDatetime(2012, 11, 1), 'slug': 'article-with-markdown-containing-footnotes', 'multiline': [ 'Line Metadata should be handle properly.', @@ -311,7 +311,7 @@ def test_article_with_filename_metadata(self): expected = { 'category': 'yeah', 'author': 'Alexis Métaireau', - 'date': datetime.datetime(2012, 11, 30), + 'date': SafeDatetime(2012, 11, 30), } for key, value in expected.items(): self.assertEqual(value, page.metadata[key], key) @@ -325,7 +325,7 @@ def test_article_with_filename_metadata(self): expected = { 'category': 'yeah', 'author': 'Alexis Métaireau', - 'date': datetime.datetime(2012, 11, 30), + 'date': SafeDatetime(2012, 11, 30), 'slug': 'md_w_filename_meta', 'mymeta': 'foo', } @@ -358,7 +358,7 @@ def test_article_with_metadata(self): 'author': 'Alexis Métaireau', 'title': 'This is a super article !', 'summary': 'Summary and stuff', - 'date': datetime.datetime(2010, 12, 2, 10, 14), + 'date': SafeDatetime(2010, 12, 2, 10, 14), 'tags': ['foo', 'bar', 'foobar'], 'custom_field': 'http://notmyidea.org', } @@ -382,7 +382,7 @@ def test_article_with_metadata_and_contents_attrib(self): 'author': 'Alexis Métaireau', 'title': 'This is a super article !', 'summary': 'Summary and stuff', - 'date': datetime.datetime(2010, 12, 2, 10, 14), + 'date': SafeDatetime(2010, 12, 2, 10, 14), 'tags': ['foo', 'bar', 'foobar'], 'custom_field': 'http://notmyidea.org', } diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py --- a/pelican/tests/test_utils.py +++ b/pelican/tests/test_utils.py @@ -3,7 +3,6 @@ import logging import shutil import os -import datetime import time import locale from sys import platform, version_info @@ -38,24 +37,24 @@ def test_deprecated_attribute(self): def test_get_date(self): # valid ones - date = datetime.datetime(year=2012, month=11, day=22) - date_hour = datetime.datetime( + date = utils.SafeDatetime(year=2012, month=11, day=22) + date_hour = utils.SafeDatetime( year=2012, month=11, day=22, hour=22, minute=11) - date_hour_z = datetime.datetime( + date_hour_z = utils.SafeDatetime( year=2012, month=11, day=22, hour=22, minute=11, tzinfo=pytz.timezone('UTC')) - date_hour_est = datetime.datetime( + date_hour_est = utils.SafeDatetime( year=2012, month=11, day=22, hour=22, minute=11, tzinfo=pytz.timezone('EST')) - date_hour_sec = datetime.datetime( + date_hour_sec = utils.SafeDatetime( year=2012, month=11, day=22, hour=22, minute=11, second=10) - date_hour_sec_z = datetime.datetime( + date_hour_sec_z = utils.SafeDatetime( year=2012, month=11, day=22, hour=22, minute=11, second=10, tzinfo=pytz.timezone('UTC')) - date_hour_sec_est = datetime.datetime( + date_hour_sec_est = utils.SafeDatetime( year=2012, month=11, day=22, hour=22, minute=11, second=10, tzinfo=pytz.timezone('EST')) - date_hour_sec_frac_z = datetime.datetime( + date_hour_sec_frac_z = utils.SafeDatetime( year=2012, month=11, day=22, hour=22, minute=11, second=10, microsecond=123000, tzinfo=pytz.timezone('UTC')) dates = { @@ -76,14 +75,14 @@ def test_get_date(self): } # examples from http://www.w3.org/TR/NOTE-datetime - iso_8601_date = datetime.datetime(year=1997, month=7, day=16) - iso_8601_date_hour_tz = datetime.datetime( + iso_8601_date = utils.SafeDatetime(year=1997, month=7, day=16) + iso_8601_date_hour_tz = utils.SafeDatetime( year=1997, month=7, day=16, hour=19, minute=20, tzinfo=pytz.timezone('CET')) - iso_8601_date_hour_sec_tz = datetime.datetime( + iso_8601_date_hour_sec_tz = utils.SafeDatetime( year=1997, month=7, day=16, hour=19, minute=20, second=30, tzinfo=pytz.timezone('CET')) - iso_8601_date_hour_sec_ms_tz = datetime.datetime( + iso_8601_date_hour_sec_ms_tz = utils.SafeDatetime( year=1997, month=7, day=16, hour=19, minute=20, second=30, microsecond=450000, tzinfo=pytz.timezone('CET')) iso_8601 = { @@ -258,7 +257,7 @@ def test_clean_output_dir_is_file(self): self.assertFalse(os.path.exists(test_directory)) def test_strftime(self): - d = datetime.date(2012, 8, 29) + d = utils.SafeDatetime(2012, 8, 29) # simple formatting self.assertEqual(utils.strftime(d, '%d/%m/%y'), '29/08/12') @@ -296,7 +295,7 @@ def test_strftime_locale_dependent_turkish(self): else: locale.setlocale(locale.LC_TIME, str('tr_TR.UTF-8')) - d = datetime.date(2012, 8, 29) + d = utils.SafeDatetime(2012, 8, 29) # simple self.assertEqual(utils.strftime(d, '%d %B %Y'), '29 Ağustos 2012') @@ -329,7 +328,7 @@ def test_strftime_locale_dependent_french(self): else: locale.setlocale(locale.LC_TIME, str('fr_FR.UTF-8')) - d = datetime.date(2012, 8, 29) + d = utils.SafeDatetime(2012, 8, 29) # simple self.assertEqual(utils.strftime(d, '%d %B %Y'), '29 août 2012') @@ -448,7 +447,7 @@ def setUp(self): os.makedirs(template_dir) with open(template_path, 'w') as template_file: template_file.write('date = {{ date|strftime("%A, %d %B %Y") }}') - self.date = datetime.date(2012, 8, 29) + self.date = utils.SafeDatetime(2012, 8, 29) def tearDown(self): @@ -464,7 +463,7 @@ def tearDown(self): def test_french_strftime(self): # This test tries to reproduce an issue that occured with python3.3 under macos10 only locale.setlocale(locale.LC_ALL, str('fr_FR.UTF-8')) - date = datetime.datetime(2014,8,14) + date = utils.SafeDatetime(2014,8,14) # we compare the lower() dates since macos10 returns "Jeudi" for %A whereas linux reports "jeudi" self.assertEqual( u'jeudi, 14 août 2014', utils.strftime(date, date_format="%A, %d %B %Y").lower() ) df = utils.DateFormatter()
Links to localized article URL-s with month names are in wrong language On disk, everything is correct: (conf) DEFAULT_LANG = 'et' LOCALE = "et_EE.UTF-8" DATE_FORMATS = { 'et': ('et_EE.UTF-8','%A, %d %B %Y'), } MONTH_ARCHIVE_URL = '{date:%Y}/{date:%B}/' MONTH_ARCHIVE_SAVE_AS = '{date:%Y}/{date:%B}/index.html' and I get on disk: veeb/2013/detsember/index.html Yet the generated HTML (not my idea theme) includes: a href="./2013/December/teine-post/" rel="bookmark" Have I missed something? Test errors on Python 3.3+ on Mac OS X I'm running into two test errors on current Pelican master with Python 3.3 (does not affect Python 2.7). I went back to a commit from about a month prior and got the same error, so I think this has been around for a while. I thought perhaps it might be something with my local environment, but I was able to replicate it in a fresh virtual machine running Mac OS X 10.8.2. As you can see from the error output below, the tests seem to include an un-anticipated element called `alexis-mactaireau.html`. I'm not certain how Alexis's last name is getting butchered in this way, or why it only seems to happen with Python 3.x. Perhaps @dmdm or other folks might know how to investigate this? I'd like to begin merging some of the long-outstanding pull requests, but I thought it might be better to resolve this error first. Following are the exact steps I took on a freshly-created virtual machine running Mac OS X 10.8.2 and Python 3.3: ``` virtualenv -p python3 /Virtualenvs/pelican-py3-test . /Virtualenvs/pelican-py3-test/bin/activate pip install -e git+https://github.com/dmdm/smartypants#egg=smartypants git clone -b py3k https://github.com/dmdm/typogrify $VIRTUAL_ENV/src/typogrify cd $VIRTUAL_ENV/src/typogrify python setup.py develop git clone -b py3k https://github.com/dmdm/webassets $VIRTUAL_ENV/src/webassets cd $VIRTUAL_ENV/src/webassets python setup.py develop git clone https://github.com/getpelican/pelican $VIRTUAL_ENV/src/pelican cd $VIRTUAL_ENV/src/pelican python setup.py develop pip install unittest2py3k pip install -r dev_requirements.txt unit2 discover ``` Test suite output: ``` ====================================================================== FAIL: test_basic_generation_works (tests.test_pelican.TestPelican) ---------------------------------------------------------------------- Traceback (most recent call last): File "/Virtualenvs/pelican-py3-test/src/pelican/tests/test_pelican.py", line 76, in test_basic_generation_works self.assertFilesEqual(recursiveDiff(dcmp)) File "/Virtualenvs/pelican-py3-test/src/pelican/tests/test_pelican.py", line 61, in assertFilesEqual self.assertEqual(diff['left_only'], [], msg=msg) AssertionError: Lists differ: ['/Virtualenvs/pelica... != [] First list contains 1 additional elements. First extra element 0: /Virtualenvs/pelican-py3-test/src/pelican/tests/output/basic/author/alexis-mactaireau.html - ['/Virtualenvs/pelican-py3-test/src/pelican/tests/output/basic/author/alexis-mactaireau.html'] + [] : some generated files differ from the expected functional tests output. This is probably because the HTML generated files changed. If these changes are normal, please refer to docs/contribute.rst to update the expected output of the functional tests. ====================================================================== FAIL: test_custom_generation_works (tests.test_pelican.TestPelican) ---------------------------------------------------------------------- Traceback (most recent call last): File "/Virtualenvs/pelican-py3-test/src/pelican/tests/test_pelican.py", line 92, in test_custom_generation_works self.assertFilesEqual(recursiveDiff(dcmp)) File "/Virtualenvs/pelican-py3-test/src/pelican/tests/test_pelican.py", line 61, in assertFilesEqual self.assertEqual(diff['left_only'], [], msg=msg) AssertionError: Lists differ: ['/Virtualenvs/pelica... != [] First list contains 1 additional elements. First extra element 0: /Virtualenvs/pelican-py3-test/src/pelican/tests/output/custom/author/alexis-mactaireau.html - ['/Virtualenvs/pelican-py3-test/src/pelican/tests/output/custom/author/alexis-mactaireau.html'] + [] : some generated files differ from the expected functional tests output. This is probably because the HTML generated files changed. If these changes are normal, please refer to docs/contribute.rst to update the expected output of the functional tests. ``` Links to localized article URL-s with month names are in wrong language On disk, everything is correct: (conf) DEFAULT_LANG = 'et' LOCALE = "et_EE.UTF-8" DATE_FORMATS = { 'et': ('et_EE.UTF-8','%A, %d %B %Y'), } MONTH_ARCHIVE_URL = '{date:%Y}/{date:%B}/' MONTH_ARCHIVE_SAVE_AS = '{date:%Y}/{date:%B}/index.html' and I get on disk: veeb/2013/detsember/index.html Yet the generated HTML (not my idea theme) includes: a href="./2013/December/teine-post/" rel="bookmark" Have I missed something?
Okay, the culprit here is https://github.com/getpelican/pelican/blob/master/pelican/writers.py#L151 from the commit ddcccfeaa952d2e1e24ceac94e5d66c73b57c01b Is this by design? As links are written to the disk in correct locale and URL-s are not OK, I guess it is a bug, introduced by "fixing" another bug which was not understood. Anyone who uses unicode filenames on OSX might also be interested in this fix: https://github.com/davisp/ghp-import/pull/20 I can confirm this. ([current master](https://github.com/getpelican/pelican/tree/c6ff88d0fce7f7ab5e05f2c414a365aa9faa6454)) My settings: ``` python LOCALE = 'de_DE.UTF-8' ARTICLE_URL = 'posts/{date:%Y}/{date:%B}/{date:%d}/{slug}/' ARTICLE_SAVE_AS = ARTICLE_URL + 'index.html' ``` Will save the article at e.g.: `posts/2014/März/12/slug.html` But the same article will be refered with: `posts/2014/March/12/slug.html` @martinpaljak said in #1208 that he had some ascii decoding errors on Python2. We need a full backtrace to investigate. @Scheirle, do you have issues with his PR on Python2? @Scheirle, IIRC, you told me on IRC that you had some decoding problems with `März` in URLs. I think that could be what @martinpaljak was hitting. Did you resolve the issue and how? python2 **without** the PR: ``` DEBUG: signal <blinker.base.NamedSignal object at 0x2b2e81764950; u'article_generator_context'>.send(<pelican.generators.ArticlesGenerator object at 0x2595850>, <metadata>) CRITICAL: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128) Traceback (most recent call last): File "/home/bernhard/.virtualenvs/pelican/bin/pelican", line 9, in <module> load_entry_point('pelican==3.3', 'console_scripts', 'pelican')() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/__init__.py", line 373, in main pelican.run() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/__init__.py", line 171, in run signals.finalized.send(self) File "build/bdist.linux-x86_64/egg/blinker/base.py", line 267, in send File "/home/bernhard/Dokumente/Projekte/Website/weblog/plugins/i18n_subsites/i18n_subsites.py", line 83, in create_lang_subsites pelican_obj.run() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/__init__.py", line 163, in run p.generate_context() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/generators.py", line 506, in generate_context signals.article_generator_finalized.send(self) File "build/bdist.linux-x86_64/egg/blinker/base.py", line 267, in send File "/home/bernhard/Dokumente/Projekte/Website/weblog/plugins/i18n_subsites/i18n_subsites.py", line 122, in update_generator_contents move_translations_links(article) File "/home/bernhard/Dokumente/Projekte/Website/weblog/plugins/i18n_subsites/i18n_subsites.py", line 100, in move_translations_links translation.override_url = lang_prepend + translation.url File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/contents.py", line 190, in get_url_setting return self._expand_settings(key) File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/contents.py", line 184, in _expand_settings return self.settings[fq_key].format(**self.url_format) UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128) make: *** [html] Fehler 1 ``` python2 **with** the PR: ``` DEBUG: signal <blinker.base.NamedSignal object at 0x2ab6c5e08950; u'article_generator_context'>.send(<pelican.generators.ArticlesGenerator object at 0x2fc3850>, <metadata>) CRITICAL: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128) Traceback (most recent call last): File "/home/bernhard/.virtualenvs/pelican/bin/pelican", line 9, in <module> load_entry_point('pelican==3.3', 'console_scripts', 'pelican')() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/__init__.py", line 373, in main pelican.run() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/__init__.py", line 171, in run signals.finalized.send(self) File "build/bdist.linux-x86_64/egg/blinker/base.py", line 267, in send File "/home/bernhard/Dokumente/Projekte/Website/weblog/plugins/i18n_subsites/i18n_subsites.py", line 83, in create_lang_subsites pelican_obj.run() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/__init__.py", line 163, in run p.generate_context() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/generators.py", line 506, in generate_context signals.article_generator_finalized.send(self) File "build/bdist.linux-x86_64/egg/blinker/base.py", line 267, in send File "/home/bernhard/Dokumente/Projekte/Website/weblog/plugins/i18n_subsites/i18n_subsites.py", line 122, in update_generator_contents move_translations_links(article) File "/home/bernhard/Dokumente/Projekte/Website/weblog/plugins/i18n_subsites/i18n_subsites.py", line 100, in move_translations_links translation.override_url = lang_prepend + translation.url File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/contents.py", line 190, in get_url_setting return self._expand_settings(key) File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/contents.py", line 184, in _expand_settings return self.settings[fq_key].format(**self.url_format) UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128) make: *** [html] Fehler 1 ``` python3 **without** the PR leads to this bug here. python3 **with** the PR works as expected. As you can see the tracebacks with or without the PR are the same, since pelican crashes befor we write the file. > Did you resolve the issue and how? Currently I am using python 3 with this PR. I think this means that `self.settings[fq_key]` is not a unicode string when `format(...)` is applied to it. @Scheirle, could you please add something like ``` Python logger.critical(self.settings[fq_key]) ``` right before that line 184 in `/home/bernhard/.virtualenvs/pelican/pelican2/pelican/contents.py` that errors out? That could show us which setting it is. I suspect it will be one of those `*_URL` settings that are generated on demand and for osme reason it is not generated as unicode, maybe forgot `from __future__ import unicode_literals` somewhere? I think I found it :) ``` Python from __future__ import unicode_literals ``` needs to be added to `urlwrappers.py` because of line 83 and so on ``` Python save_as = property(functools.partial(_from_settings, key='SAVE_AS')) ``` because without that import `'SAVE_AS'` is not a unicode string. @Scheirle, could you please try adding that import to the top of `/home/bernhard/.virtualenvs/pelican/pelican2/pelican/urlwrappers.py` and see if that helps? Adding `logger.critical(self.settings[fq_key])` outputs <br>`CRITICAL: posts/{date:%Y}/{date:%B}/{date:%d}/{slug}/` right before the crash (This is my `ARTICLE_URL`). But adding `from __future__ import unicode_literals` to `urlwrappers.py` doesn't solve the problem. ##### Edit The date which should be replaced in the url is: `datetime.datetime(2014, 3, 27, 10, 50)` (Month: 3 - März) Also `isinstance(self.settings[fq_key], unicode)` is `True`. (With and without the changes in `urlwrappers.py` If `isinstance(self.settings[fq_key], unicode) is True`, then something in `self.url_format` which is given to `format` may not be. Could you please inspect it and see if something in it indeed is not unicode? Everything except - `tags` | is a List `[<Tag pelican>, <Tag pelican plugin>]` - `date` | is a datetime `datetime.datetime(2014, 3, 27, 10, 50)` is unicode in the `self.url_format` dict. `datetime.strftime` doesn't support unicode: https://dpaste.de/MUfm#L13 So `u"posts/{date:%Y}/{date:%B}/{date:%d}/{slug}/".format(...)"` will break with months like "février". ... which is why, I imagine, @avaris implemented an [`strftime` replacement](https://github.com/getpelican/pelican/blob/ac7e2c6d71544cbd67f7570e00304b7dbe2adbbb/pelican/utils.py#L29) to get around the problem. Continuing @saimn's example: ``` In: d.strftime('%B') Out: 'f\xc3\xa9vrier' In: from pelican.utils import strftime In: strftime(d, '%B') Out: u'f\xe9vrier' ``` tl;dr: To remedy this issue: add `'input_encoding': 'UTF-8'` to the hash of `extra_params` in `readers.py:RstReader:_get_publisher()`. But another issue shows up, see below. Afair, we already had this setting patched into Pelican (old tests on Py3.3 were successful as I recall). "mactaireau" is the generated slug for a certain author, "Métaireau". However, the whole process of generating the context and the output, which uses that slug by class Author's "save_as" property turned out to be correct in all aspects. Moreover, "Métaireau" was not butchered in all occurrences as this test output shows: ``` %--%%%%%% slug alexis-metaireau Alexis Métaireau %--%%%%%% slug alexis-mactaireau Alexis Métaireau %--%%%%%% slug alexis-metaireau Alexis Métaireau %--%%%%%% slug alexis-metaireau Alexis Métaireau %--%%%%%% slug alexis-metaireau Alexis Métaireau %--%%%%%% slug alexis-metaireau Alexis Métaireau %--%%%%%% slug alexis-mactaireau Alexis Métaireau %--%%%%%% slug alexis-metaireau Alexis Métaireau %--%%%%%% slug alexis-metaireau Alexis Métaireau %--%%%%%% slug alexis-metaireau Alexis Métaireau %--%%%%%% slug alexis-metaireau Alexis Métaireau %--%%%%%% slug alexis-metaireau Alexis Métaireau %--%%%%%% slug alexis-metaireau Alexis Métaireau !!!!! Alexis Métaireau author/alexis-mactaireau.html ``` The curious labeling "son-of" is the (correct) result of the method `slugify()` which normalizes "Mé" to "MA(C)" and then removes non-alnum chars and lowercases the result: "mac". So, where does the "Mé" come from? All files were correctly encoded in UTF-8 without BOM. Another warning message gave it away: ``` Deuxième article ################ /home/dm/myprojects/pelican-py33-env/src/my/pelican/samples/content/article2-fr.rst:2: (WARNING/2) Title underline too short. ``` Obviously, docutils were not able to deduce the character encoding automatically. Setting it explicitly as shown above solves this issue. The question (for me) remains, what is so different between Python3.3 and Python3.2 that docutils tripped over? In Python3.2 no error occurred. Aloha, the next functional error now has space to unfold: ``` (pelican-py33-env) dm@Morrigan:~/myprojects/pelican-py33-env/src/my/pelican$ ../../../local/bin/unit2 discover Removed extraneous trailing slash from SITEURL. No timezone information specified in the settings. Assuming your timezone is UTC for feed generation. Check http://docs.notmyidea.org/alexis/pelican/settings.html#timezone for more information No timezone information specified in the settings. Assuming your timezone is UTC for feed generation. Check http://docs.notmyidea.org/alexis/pelican/settings.html#timezone for more information .Since feed URLs should always be absolute, you should specify FEED_DOMAIN in your settings. (e.g., 'FEED_DOMAIN = http://www.example.com') Feeds generated without SITEURL set properly may not be valid No timezone information specified in the settings. Assuming your timezone is UTC for feed generation. Check http://docs.notmyidea.org/alexis/pelican/settings.html#timezone for more information ....Since feed URLs should always be absolute, you should specify FEED_DOMAIN in your settings. (e.g., 'FEED_DOMAIN = http://www.example.com') Feeds generated without SITEURL set properly may not be valid No timezone information specified in the settings. Assuming your timezone is UTC for feed generation. Check http://docs.notmyidea.org/alexis/pelican/settings.html#timezone for more information ..sss...........ssss...Skipping /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/2012-11-30_md_w_filename_meta#foo-bar.md: impossible to find informations about 'title' there are 2 variants of "this-is-a-super-article" /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/article_with_uppercase_metadata.rst /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/article_with_metadata.rst there are 2 variants of "this-is-an-article-without-category" /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/article_without_category.rst /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/TestCategory/article_without_category.rst .Skipping /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/2012-11-30_md_w_filename_meta#foo-bar.md: impossible to find informations about 'title' there are 2 variants of "this-is-a-super-article" /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/article_with_uppercase_metadata.rst /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/article_with_metadata.rst there are 2 variants of "this-is-an-article-without-category" /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/article_without_category.rst /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/TestCategory/article_without_category.rst .Skipping /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/2012-11-30_md_w_filename_meta#foo-bar.md: impossible to find informations about 'title' there are 2 variants of "this-is-a-super-article" /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/article_with_uppercase_metadata.rst /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/article_with_metadata.rst there are 2 variants of "this-is-an-article-without-category" /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/article_without_category.rst /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/TestCategory/article_without_category.rst ..Skipping /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/2012-11-30_md_w_filename_meta#foo-bar.md: impossible to find informations about 'title' there are 2 variants of "this-is-a-super-article" /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/article_with_uppercase_metadata.rst /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/article_with_metadata.rst there are 2 variants of "this-is-an-article-without-category" /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/article_without_category.rst /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/content/TestCategory/article_without_category.rst .Unknown status 'invalid' for file '/home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/TestPages/bad_page.rst', skipping it. .........................ss..............F ====================================================================== FAIL: test_custom_generation_works (tests.test_pelican.TestPelican) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/test_pelican.py", line 93, in test_custom_generation_works self.assertFilesEqual(recursiveDiff(dcmp)) File "/home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/test_pelican.py", line 64, in assertFilesEqual self.assertEqual(diff['diff_files'], [], msg=msg) AssertionError: Lists differ: ['/home/dm/myprojects/pelican-... != [] First list contains 5 additional elements. First extra element 0: /home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/output/custom/feeds/cat1.rss.xml + [] - ['/home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/output/custom/feeds/cat1.rss.xml', - '/home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/output/custom/feeds/misc.rss.xml', - '/home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/output/custom/feeds/bar.rss.xml', - '/home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/output/custom/feeds/yeah.rss.xml', - '/home/dm/myprojects/pelican-py33-env/src/my/pelican/tests/output/custom/feeds/all.rss.xml'] : some generated files differ from the expected functional tests output. This is probably because the HTML generated files changed. If these changes are normal, please refer to docs/contribute.rst to update the expected output of the functional tests. ---------------------------------------------------------------------- Ran 75 tests in 1.160s FAILED (failures=1, skipped=9) ``` Please help :) Hi Dirk. Excellent sleuthing! When I made the change you suggested... ``` diff --git a/pelican/readers.py b/pelican/readers.py index a3c4364..ece2c00 100644 --- a/pelican/readers.py +++ b/pelican/readers.py @@ -117,6 +117,7 @@ class RstReader(Reader): def _get_publisher(self, source_path): extra_params = {'initial_header_level': '2', + 'input_encoding': 'UTF-8', 'syntax_highlight': 'short'} pub = docutils.core.Publisher( destination_class=docutils.io.StringOutput) ``` ... and ran `unit2 discover`, all tests passed without any failures. I didn't see the failure you list above. But then, just to be sure, I ran the test suite again, and then I saw similar errors to what you describe above. I ran the rest suite a few more times, and saw similar errors relating to extra feed elements. Around the 4th or 5th time, the test suite ran without any errors. 6th time --> error. 7th time --> no errors. So it seems this is an intermittent problem, and I have no idea _why_ the test suite yields errors on some runs but no errors on other runs — with no changes in between. It almost seems random. Anyone have any suggestions as to how to rectify this? This is an issue with feedgenerator, which uses a dict to store attributes, so these attributes are not always written in the same order. I reported this issue in https://github.com/getpelican/pelican/issues/688#issuecomment-12457148 but python 3.3 seems to not be supported yet by pelican and feedgenerator. Thanks, saimn, for the pointer. Some test-runs create the feed XML like this: ``` <rss xmlns:atom="http://www.w3.org/2005/Atom" version="2.0"> ``` others with the attributes in a different order: ``` <rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"> ``` Interestingly it seems to not be deterministic, which order is applied. Maybe it has sth to do with Python3.3's new [key sharing dicts](http://docs.python.org/3/whatsnew/3.3.html#pep-412-key-sharing-dictionary): > Changes to repr() output and iteration order: For most cases, this will be unchanged. However for some split-table dictionaries the iteration order will change. -- [PEP412, Cons](http://www.python.org/dev/peps/pep-0412/#cons) Using an OrderedDict as return value [here](https://github.com/dmdm/feedgenerator-py3k/blob/master/feedgenerator/django/utils/feedgenerator.py#L223) [del]solves the issue[/del][add]is a dirty hack[/add] for feedgenerator; Pelican screams even louder now. Corrections and further improvements are welcome. Thanks to @avaris, there is a fix for these test failures, pending updated releases of feedgenerator and smartypants. Once the latter releases are available, Avaris will submit his fix. I get no errors. Does anyone? @tshepang: Yes, I still get errors on current master. Output from `python -m unittest discover` on cb82e48 on Mac OS X follows. ``` ====================================================================== FAIL: test_french_locale (pelican.tests.test_utils.TestDateFormatter) ---------------------------------------------------------------------- Traceback (most recent call last): File "./pelican/tests/test_utils.py", line 377, in test_french_locale utils.strftime(self.date, 'date = %A, %d %B %Y')) AssertionError: 'date = Mercredi, 29 août 2012' != 'date = Mercredi, 29 août 2012' - date = Mercredi, 29 août 2012 ? ^^ + date = Mercredi, 29 août 2012 ? ^ ====================================================================== FAIL: test_turkish_locale (pelican.tests.test_utils.TestDateFormatter) ---------------------------------------------------------------------- Traceback (most recent call last): File "./pelican/tests/test_utils.py", line 406, in test_turkish_locale utils.strftime(self.date, 'date = %A, %d %B %Y')) AssertionError: 'date = Ã\x87arÅ\x9famba, 29 AÄ\x9fustos 2012' != 'date = Çarşamba, 29 Ağustos 2012' - date = Çarşamba, 29 Ağustos 2012 ? ^^ ^^ ^^ + date = Çarşamba, 29 Ağustos 2012 ? ^ ^ ^ ---------------------------------------------------------------------- Ran 94 tests in 10.426s FAILED (failures=2, skipped=11) ``` Ok, those locale tests were skipped since I didn't install the locales. Will do and test. No error for me with the french locale test ... Works for me, but am using Debian. Hi, I got the same result as @justinmayer on MacOS 10.9, Python 3.3, Pelican master branch. Does @tshepang's PR entirely solve this problem? Or perhaps, I missed something. > Does @tshepang's PR entirely solve this problem? Or perhaps, I missed something. Which PR are you talking about? I'm not aware of one that solves this problem. @justinmayer Sorry, I made a mistake. The PR mentioned above is not about this issue. Interestingly, PR #1208 by @martinpaljak seems to rectify this test failure on Python 3.3+ on Mac OS X. Okay, the culprit here is https://github.com/getpelican/pelican/blob/master/pelican/writers.py#L151 from the commit ddcccfeaa952d2e1e24ceac94e5d66c73b57c01b Is this by design? As links are written to the disk in correct locale and URL-s are not OK, I guess it is a bug, introduced by "fixing" another bug which was not understood. Anyone who uses unicode filenames on OSX might also be interested in this fix: https://github.com/davisp/ghp-import/pull/20 I can confirm this. ([current master](https://github.com/getpelican/pelican/tree/c6ff88d0fce7f7ab5e05f2c414a365aa9faa6454)) My settings: ``` python LOCALE = 'de_DE.UTF-8' ARTICLE_URL = 'posts/{date:%Y}/{date:%B}/{date:%d}/{slug}/' ARTICLE_SAVE_AS = ARTICLE_URL + 'index.html' ``` Will save the article at e.g.: `posts/2014/März/12/slug.html` But the same article will be refered with: `posts/2014/March/12/slug.html` @martinpaljak said in #1208 that he had some ascii decoding errors on Python2. We need a full backtrace to investigate. @Scheirle, do you have issues with his PR on Python2? @Scheirle, IIRC, you told me on IRC that you had some decoding problems with `März` in URLs. I think that could be what @martinpaljak was hitting. Did you resolve the issue and how? python2 **without** the PR: ``` DEBUG: signal <blinker.base.NamedSignal object at 0x2b2e81764950; u'article_generator_context'>.send(<pelican.generators.ArticlesGenerator object at 0x2595850>, <metadata>) CRITICAL: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128) Traceback (most recent call last): File "/home/bernhard/.virtualenvs/pelican/bin/pelican", line 9, in <module> load_entry_point('pelican==3.3', 'console_scripts', 'pelican')() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/__init__.py", line 373, in main pelican.run() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/__init__.py", line 171, in run signals.finalized.send(self) File "build/bdist.linux-x86_64/egg/blinker/base.py", line 267, in send File "/home/bernhard/Dokumente/Projekte/Website/weblog/plugins/i18n_subsites/i18n_subsites.py", line 83, in create_lang_subsites pelican_obj.run() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/__init__.py", line 163, in run p.generate_context() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/generators.py", line 506, in generate_context signals.article_generator_finalized.send(self) File "build/bdist.linux-x86_64/egg/blinker/base.py", line 267, in send File "/home/bernhard/Dokumente/Projekte/Website/weblog/plugins/i18n_subsites/i18n_subsites.py", line 122, in update_generator_contents move_translations_links(article) File "/home/bernhard/Dokumente/Projekte/Website/weblog/plugins/i18n_subsites/i18n_subsites.py", line 100, in move_translations_links translation.override_url = lang_prepend + translation.url File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/contents.py", line 190, in get_url_setting return self._expand_settings(key) File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/contents.py", line 184, in _expand_settings return self.settings[fq_key].format(**self.url_format) UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128) make: *** [html] Fehler 1 ``` python2 **with** the PR: ``` DEBUG: signal <blinker.base.NamedSignal object at 0x2ab6c5e08950; u'article_generator_context'>.send(<pelican.generators.ArticlesGenerator object at 0x2fc3850>, <metadata>) CRITICAL: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128) Traceback (most recent call last): File "/home/bernhard/.virtualenvs/pelican/bin/pelican", line 9, in <module> load_entry_point('pelican==3.3', 'console_scripts', 'pelican')() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/__init__.py", line 373, in main pelican.run() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/__init__.py", line 171, in run signals.finalized.send(self) File "build/bdist.linux-x86_64/egg/blinker/base.py", line 267, in send File "/home/bernhard/Dokumente/Projekte/Website/weblog/plugins/i18n_subsites/i18n_subsites.py", line 83, in create_lang_subsites pelican_obj.run() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/__init__.py", line 163, in run p.generate_context() File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/generators.py", line 506, in generate_context signals.article_generator_finalized.send(self) File "build/bdist.linux-x86_64/egg/blinker/base.py", line 267, in send File "/home/bernhard/Dokumente/Projekte/Website/weblog/plugins/i18n_subsites/i18n_subsites.py", line 122, in update_generator_contents move_translations_links(article) File "/home/bernhard/Dokumente/Projekte/Website/weblog/plugins/i18n_subsites/i18n_subsites.py", line 100, in move_translations_links translation.override_url = lang_prepend + translation.url File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/contents.py", line 190, in get_url_setting return self._expand_settings(key) File "/home/bernhard/.virtualenvs/pelican/pelican2/pelican/contents.py", line 184, in _expand_settings return self.settings[fq_key].format(**self.url_format) UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128) make: *** [html] Fehler 1 ``` python3 **without** the PR leads to this bug here. python3 **with** the PR works as expected. As you can see the tracebacks with or without the PR are the same, since pelican crashes befor we write the file. > Did you resolve the issue and how? Currently I am using python 3 with this PR. I think this means that `self.settings[fq_key]` is not a unicode string when `format(...)` is applied to it. @Scheirle, could you please add something like ``` Python logger.critical(self.settings[fq_key]) ``` right before that line 184 in `/home/bernhard/.virtualenvs/pelican/pelican2/pelican/contents.py` that errors out? That could show us which setting it is. I suspect it will be one of those `*_URL` settings that are generated on demand and for osme reason it is not generated as unicode, maybe forgot `from __future__ import unicode_literals` somewhere? I think I found it :) ``` Python from __future__ import unicode_literals ``` needs to be added to `urlwrappers.py` because of line 83 and so on ``` Python save_as = property(functools.partial(_from_settings, key='SAVE_AS')) ``` because without that import `'SAVE_AS'` is not a unicode string. @Scheirle, could you please try adding that import to the top of `/home/bernhard/.virtualenvs/pelican/pelican2/pelican/urlwrappers.py` and see if that helps? Adding `logger.critical(self.settings[fq_key])` outputs <br>`CRITICAL: posts/{date:%Y}/{date:%B}/{date:%d}/{slug}/` right before the crash (This is my `ARTICLE_URL`). But adding `from __future__ import unicode_literals` to `urlwrappers.py` doesn't solve the problem. ##### Edit The date which should be replaced in the url is: `datetime.datetime(2014, 3, 27, 10, 50)` (Month: 3 - März) Also `isinstance(self.settings[fq_key], unicode)` is `True`. (With and without the changes in `urlwrappers.py` If `isinstance(self.settings[fq_key], unicode) is True`, then something in `self.url_format` which is given to `format` may not be. Could you please inspect it and see if something in it indeed is not unicode? Everything except - `tags` | is a List `[<Tag pelican>, <Tag pelican plugin>]` - `date` | is a datetime `datetime.datetime(2014, 3, 27, 10, 50)` is unicode in the `self.url_format` dict. `datetime.strftime` doesn't support unicode: https://dpaste.de/MUfm#L13 So `u"posts/{date:%Y}/{date:%B}/{date:%d}/{slug}/".format(...)"` will break with months like "février". ... which is why, I imagine, @avaris implemented an [`strftime` replacement](https://github.com/getpelican/pelican/blob/ac7e2c6d71544cbd67f7570e00304b7dbe2adbbb/pelican/utils.py#L29) to get around the problem. Continuing @saimn's example: ``` In: d.strftime('%B') Out: 'f\xc3\xa9vrier' In: from pelican.utils import strftime In: strftime(d, '%B') Out: u'f\xe9vrier' ```
2014-06-26T17:56:54Z
[]
[]
getpelican/pelican
1,446
getpelican__pelican-1446
[ "1395" ]
b35759224825c72ea7789193ddb246881a537454
diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -42,8 +42,11 @@ def strftime(date, date_format): replacing formatted output back. ''' + c89_directives = 'aAbBcdfHIjmMpSUwWxXyYzZ%' + strip_zeros = lambda x: x.lstrip('0') or '0' + # grab candidate format options - format_options = '%.' + format_options = '%[-]?.' candidates = re.findall(format_options, date_format) # replace candidates with placeholders for later % formatting @@ -56,14 +59,28 @@ def strftime(date, date_format): formatted_candidates = [] for candidate in candidates: # test for valid C89 directives only - if candidate[1] in 'aAbBcdfHIjmMpSUwWxXyYzZ%': + if candidate[-1] in c89_directives: + # check for '-' prefix + if len(candidate) == 3: + # '-' prefix + candidate = '%{}'.format(candidate[-1]) + conversion = strip_zeros + else: + conversion = None + + # format date if isinstance(date, SafeDatetime): formatted = date.strftime(candidate, safe=False) else: formatted = date.strftime(candidate) + # convert Py2 result to unicode if not six.PY3 and enc is not None: formatted = formatted.decode(enc) + + # strip zeros if '-' prefix is used + if conversion: + formatted = conversion(formatted) else: formatted = candidate formatted_candidates.append(formatted)
diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py --- a/pelican/tests/test_utils.py +++ b/pelican/tests/test_utils.py @@ -280,6 +280,13 @@ def test_strftime(self): self.assertEqual(utils.strftime(d, '%d/%m/%Y Øl trinken beim Besäufnis'), '29/08/2012 Øl trinken beim Besäufnis') + # alternative formatting options + self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '29/8/12') + self.assertEqual(utils.strftime(d, '%-H:%-M:%-S'), '0:0:0') + + d = utils.SafeDatetime(2012, 8, 9) + self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '9/8/12') + # test the output of utils.strftime in a different locale # Turkish locale
Allow non zero padded date formats When setting DEFAULT_DATE_FORMAT you can only use zero padded date formats. Arguably this is a bug in python (or even C89) but it would be nice to extend the available formats to allow nicer formats.
AFAIK `datetime.strftime` support other codes, but we would have to add them to our `pelican.utils.strftime` as it uses only C89 atm. So it is most likely doable, just portability is the problem. Adding alternative formatting options that does `.lstrip('0')` is nearly trivial. If we decide on the format, I can add them. Some `srtftime` implementations use `-` prefix for that (e.g. `%-d`, `%-m`). It shouldn't be too hard to adopt `pelican.utils.strftime` to strip zeros for those. That is also a possibility, but it might be simpler to extend the C89 identifiers to include e.g. `%e` as an alternative to `%d` which comes from the Single Unix specification, see **strftime(3)** for details. `date.stfrtime` supports them already. I'll tag this with the 3.4.1 milestone as our strftime change broke this for some people But `strftime(3)` doesn't have an option for non-padded month. We need to add that in any case. Might as well do it for both in a consistent (and platform independent) way. Alternatively, we can support both `%e` and `-` prefixed versions. I agree with @avaris that the more options the user has the better, especially since our `pelican.utils.strftime` would make it easy to implement the `-` functionality. Now it's just a question of which chars to add to the C89 set alrey used. I vote for using anything appearing in **strftime(3)**, i.e. SU, C99, etc.
2014-08-22T22:20:24Z
[]
[]
getpelican/pelican
1,501
getpelican__pelican-1501
[ "1500" ]
a81fcd3fefa7ffce657805163cfb32470c962bf9
diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -122,13 +122,21 @@ def get_files(self, paths, exclude=[], extensions=None): """ if isinstance(paths, six.string_types): paths = [paths] # backward compatibility for older generators + + # group the exclude dir names by parent path, for use with os.walk() + exclusions_by_dirpath = {} + for e in exclude: + parent_path, subdir = os.path.split(os.path.join(self.path, e)) + exclusions_by_dirpath.setdefault(parent_path, set()).add(subdir) + files = [] for path in paths: - root = os.path.join(self.path, path) + # careful: os.path.join() will add a slash when path == ''. + root = os.path.join(self.path, path) if path else self.path if os.path.isdir(root): for dirpath, dirs, temp_files in os.walk(root, followlinks=True): - for e in exclude: + for e in exclusions_by_dirpath.get(dirpath, ()): if e in dirs: dirs.remove(e) reldir = os.path.relpath(dirpath, self.path)
diff --git a/pelican/tests/nested_content/maindir/maindir.md b/pelican/tests/nested_content/maindir/maindir.md new file mode 100644 --- /dev/null +++ b/pelican/tests/nested_content/maindir/maindir.md @@ -0,0 +1,3 @@ +Title: Main Dir Page + +This page lives in maindir. diff --git a/pelican/tests/nested_content/maindir/subdir/subdir.md b/pelican/tests/nested_content/maindir/subdir/subdir.md new file mode 100644 --- /dev/null +++ b/pelican/tests/nested_content/maindir/subdir/subdir.md @@ -0,0 +1,3 @@ +Title: Subdir Page + +This page lives in maindir/subdir. diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py --- a/pelican/tests/test_generators.py +++ b/pelican/tests/test_generators.py @@ -41,6 +41,38 @@ def test_include_path(self): self.assertTrue(include_path(filename, extensions=('rst',))) self.assertFalse(include_path(filename, extensions=('md',))) + def test_get_files_exclude(self): + """Test that Generator.get_files() properly excludes directories. + """ + # We use our own Generator so we can give it our own content path + generator = Generator(context=self.settings.copy(), + settings=self.settings, + path=os.path.join(CUR_DIR, 'nested_content'), + theme=self.settings['THEME'], output_path=None) + + filepaths = generator.get_files(paths=['maindir']) + found_files = {os.path.basename(f) for f in filepaths} + expected_files = {'maindir.md', 'subdir.md'} + self.assertFalse(expected_files - found_files, + "get_files() failed to find one or more files") + + filepaths = generator.get_files(paths=[''], exclude=['maindir']) + found_files = {os.path.basename(f) for f in filepaths} + self.assertNotIn('maindir.md', found_files, + "get_files() failed to exclude a top-level directory") + self.assertNotIn('subdir.md', found_files, + "get_files() failed to exclude a subdir of an excluded directory") + + filepaths = generator.get_files(paths=[''], + exclude=[os.path.join('maindir', 'subdir')]) + found_files = {os.path.basename(f) for f in filepaths} + self.assertNotIn('subdir.md', found_files, + "get_files() failed to exclude a subdirectory") + + filepaths = generator.get_files(paths=[''], exclude=['subdir']) + found_files = {os.path.basename(f) for f in filepaths} + self.assertIn('subdir.md', found_files, + "get_files() excluded a subdirectory by name, ignoring its path") class TestArticlesGenerator(unittest.TestCase):
PAGE_EXCLUDES and ARTICLE_EXCLUDES don't work properly with subdirectories. I discovered today that setting `PAGE_EXCLUDES=['foo']` excludes all directories named `foo`, regardless of whether they're in the top-level content directory or inside a subdirectory whose entire contents should be included. Meanwhile, setting `PAGE_EXCLUDES=['subdir/foo']` never excludes any directories; not even the `foo` that lives within `subdir`. In other words, there is no way to exclude a subdirectory without risking the accidental exclusion of other directories with the same name elsewhere in the file system. `ARTICLE_EXCLUDES` has the same problem.
2014-10-13T05:16:58Z
[]
[]
getpelican/pelican
1,507
getpelican__pelican-1507
[ "949" ]
17c551c7934935ab5959b3705eafc69874054ed7
diff --git a/pelican/rstdirectives.py b/pelican/rstdirectives.py --- a/pelican/rstdirectives.py +++ b/pelican/rstdirectives.py @@ -70,7 +70,7 @@ def run(self): directives.register_directive('sourcecode', Pygments) -_abbr_re = re.compile('\((.*)\)$') +_abbr_re = re.compile('\((.*)\)$', re.DOTALL) class abbreviation(nodes.Inline, nodes.TextElement):
diff --git a/pelican/tests/test_rstdirectives.py b/pelican/tests/test_rstdirectives.py new file mode 100644 --- /dev/null +++ b/pelican/tests/test_rstdirectives.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function + +from mock import Mock +from pelican.tests.support import unittest + +class Test_abbr_role(unittest.TestCase): + def call_it(self, text): + from pelican.rstdirectives import abbr_role + rawtext = text + lineno = 42 + inliner = Mock(name='inliner') + nodes, system_messages = abbr_role( + 'abbr', rawtext, text, lineno, inliner) + self.assertEqual(system_messages, []) + self.assertEqual(len(nodes), 1) + return nodes[0] + + def test(self): + node = self.call_it("Abbr (Abbreviation)") + self.assertEqual(node.astext(), "Abbr") + self.assertEqual(node['explanation'], "Abbreviation") + + def test_newlines_in_explanation(self): + node = self.call_it("CUL (See you\nlater)") + self.assertEqual(node.astext(), "CUL") + self.assertEqual(node['explanation'], "See you\nlater") + + def test_newlines_in_abbr(self): + node = self.call_it("US of\nA \n (USA)") + self.assertEqual(node.astext(), "US of\nA") + self.assertEqual(node['explanation'], "USA")
abbr support doesn't work for multiline Eg: ``` rst this is an :abbr:`TLA (Three Letter Abbreviation)` ``` will output `<abbr>TLA (Three Letter Abbreviation)</abbr>` instead of `<abbr title="Three Letter Abbreviation">TLA</abbr>` I believe this could be fixed by adding the `re.M` flag to the `re.compile` call on this line: https://github.com/getpelican/pelican/blob/636fd6cc380f2537924532a587c70e96a386e25c/pelican/rstdirectives.py#L101 This refs ticket #395
I'm trying to work out why this is happening. Is this (in terms of docutils) a higher priority than the transform that removes the newlines? Hi Mattieu and Russ. Would either of you care to work on this and help improve how it's handled? Sure thing, Captain. Hi @magopian and @russkel. Just checking in. Would either of you care to work on this issue?
2014-10-16T14:12:36Z
[]
[]
getpelican/pelican
1,516
getpelican__pelican-1516
[ "1019" ]
369b8b84745e60affd48257c9c9ccb914c07eb22
diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -208,7 +208,9 @@ def get_generator_classes(self): logger.debug('Found generator: %s', v) generators.append(v) - # StaticGenerator runs last so it can see which files the others handle + # StaticGenerator must run last, so it can identify files that + # were skipped by the other generators, and so static files can + # have their output paths overridden by the {attach} link syntax. generators.append(StaticGenerator) return generators diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -219,7 +219,7 @@ def replacer(m): origin = m.group('path') # XXX Put this in a different location. - if what == 'filename': + if what in {'filename', 'attach'}: if path.startswith('/'): path = path[1:] else: @@ -234,9 +234,16 @@ def replacer(m): if unquoted_path in self._context['filenames']: path = unquoted_path - if self._context['filenames'].get(path): - origin = '/'.join((siteurl, - self._context['filenames'][path].url)) + linked_content = self._context['filenames'].get(path) + if linked_content: + if what == 'attach': + if isinstance(linked_content, Static): + linked_content.attach_to(self) + else: + logger.warning("%s used {attach} link syntax on a " + "non-static file. Use {filename} instead.", + self.get_relative_source_path()) + origin = '/'.join((siteurl, linked_content.url)) origin = origin.replace('\\', '/') # for Windows paths. else: logger.warning( @@ -359,6 +366,10 @@ class Quote(Page): @python_2_unicode_compatible class Static(Page): + def __init__(self, *args, **kwargs): + super(Static, self).__init__(*args, **kwargs) + self._output_location_referenced = False + @deprecated_attribute(old='filepath', new='source_path', since=(3, 2, 0)) def filepath(): return None @@ -371,6 +382,65 @@ def src(): def dst(): return None + @property + def url(self): + # Note when url has been referenced, so we can avoid overriding it. + self._output_location_referenced = True + return super(Static, self).url + + @property + def save_as(self): + # Note when save_as has been referenced, so we can avoid overriding it. + self._output_location_referenced = True + return super(Static, self).save_as + + def attach_to(self, content): + """Override our output directory with that of the given content object. + """ + # Determine our file's new output path relative to the linking document. + # If it currently lives beneath the linking document's source directory, + # preserve that relationship on output. Otherwise, make it a sibling. + linking_source_dir = os.path.dirname(content.source_path) + tail_path = os.path.relpath(self.source_path, linking_source_dir) + if tail_path.startswith(os.pardir + os.sep): + tail_path = os.path.basename(tail_path) + new_save_as = os.path.join( + os.path.dirname(content.save_as), tail_path) + + # We do not build our new url by joining tail_path with the linking + # document's url, because we cannot know just by looking at the latter + # whether it points to the document itself or to its parent directory. + # (An url like 'some/content' might mean a directory named 'some' + # with a file named 'content', or it might mean a directory named + # 'some/content' with a file named 'index.html'.) Rather than trying + # to figure it out by comparing the linking document's url and save_as + # path, we simply build our new url from our new save_as path. + new_url = path_to_url(new_save_as) + + def _log_reason(reason): + logger.warning("The {attach} link in %s cannot relocate %s " + "because %s. Falling back to {filename} link behavior instead.", + content.get_relative_source_path(), + self.get_relative_source_path(), reason, + extra={'limit_msg': "More {attach} warnings silenced."}) + + # We never override an override, because we don't want to interfere + # with user-defined overrides that might be in EXTRA_PATH_METADATA. + if hasattr(self, 'override_save_as') or hasattr(self, 'override_url'): + if new_save_as != self.save_as or new_url != self.url: + _log_reason("its output location was already overridden") + return + + # We never change an output path that has already been referenced, + # because we don't want to break links that depend on that path. + if self._output_location_referenced: + if new_save_as != self.save_as or new_url != self.url: + _log_reason("another link already referenced its location") + return + + self.override_save_as = new_save_as + self.override_url = new_url + def is_valid_content(content, f): try:
diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -4,12 +4,13 @@ import six from sys import platform import locale +import os.path from pelican.tests.support import unittest, get_settings -from pelican.contents import Page, Article, URLWrapper +from pelican.contents import Page, Article, Static, URLWrapper from pelican.settings import DEFAULT_CONFIG -from pelican.utils import truncate_html_words, SafeDatetime +from pelican.utils import path_to_url, truncate_html_words, SafeDatetime from pelican.signals import content_object_init from jinja2.utils import generate_lorem_ipsum @@ -401,6 +402,148 @@ def test_slugify_category_author(self): self.assertEqual(article.save_as, 'obrien/csharp-stuff/fnord/index.html') +class TestStatic(unittest.TestCase): + + def setUp(self): + + self.settings = get_settings( + STATIC_SAVE_AS='{path}', + STATIC_URL='{path}', + PAGE_SAVE_AS=os.path.join('outpages', '{slug}.html'), + PAGE_URL='outpages/{slug}.html') + self.context = self.settings.copy() + + self.static = Static(content=None, metadata={}, settings=self.settings, + source_path=os.path.join('dir', 'foo.jpg'), context=self.context) + + self.context['filenames'] = {self.static.source_path: self.static} + + def tearDown(self): + pass + + def test_attach_to_same_dir(self): + """attach_to() overrides a static file's save_as and url. + """ + page = Page(content="fake page", + metadata={'title': 'fakepage'}, settings=self.settings, + source_path=os.path.join('dir', 'fakepage.md')) + self.static.attach_to(page) + + expected_save_as = os.path.join('outpages', 'foo.jpg') + self.assertEqual(self.static.save_as, expected_save_as) + self.assertEqual(self.static.url, path_to_url(expected_save_as)) + + def test_attach_to_parent_dir(self): + """attach_to() preserves dirs inside the linking document dir. + """ + page = Page(content="fake page", metadata={'title': 'fakepage'}, + settings=self.settings, source_path='fakepage.md') + self.static.attach_to(page) + + expected_save_as = os.path.join('outpages', 'dir', 'foo.jpg') + self.assertEqual(self.static.save_as, expected_save_as) + self.assertEqual(self.static.url, path_to_url(expected_save_as)) + + def test_attach_to_other_dir(self): + """attach_to() ignores dirs outside the linking document dir. + """ + page = Page(content="fake page", + metadata={'title': 'fakepage'}, settings=self.settings, + source_path=os.path.join('dir', 'otherdir', 'fakepage.md')) + self.static.attach_to(page) + + expected_save_as = os.path.join('outpages', 'foo.jpg') + self.assertEqual(self.static.save_as, expected_save_as) + self.assertEqual(self.static.url, path_to_url(expected_save_as)) + + def test_attach_to_ignores_subsequent_calls(self): + """attach_to() does nothing when called a second time. + """ + page = Page(content="fake page", + metadata={'title': 'fakepage'}, settings=self.settings, + source_path=os.path.join('dir', 'fakepage.md')) + + self.static.attach_to(page) + + otherdir_settings = self.settings.copy() + otherdir_settings.update(dict( + PAGE_SAVE_AS=os.path.join('otherpages', '{slug}.html'), + PAGE_URL='otherpages/{slug}.html')) + otherdir_page = Page(content="other page", + metadata={'title': 'otherpage'}, settings=otherdir_settings, + source_path=os.path.join('dir', 'otherpage.md')) + + self.static.attach_to(otherdir_page) + + otherdir_save_as = os.path.join('otherpages', 'foo.jpg') + self.assertNotEqual(self.static.save_as, otherdir_save_as) + self.assertNotEqual(self.static.url, path_to_url(otherdir_save_as)) + + def test_attach_to_does_nothing_after_save_as_referenced(self): + """attach_to() does nothing if the save_as was already referenced. + (For example, by a {filename} link an a document processed earlier.) + """ + original_save_as = self.static.save_as + + page = Page(content="fake page", + metadata={'title': 'fakepage'}, settings=self.settings, + source_path=os.path.join('dir', 'fakepage.md')) + self.static.attach_to(page) + + self.assertEqual(self.static.save_as, original_save_as) + self.assertEqual(self.static.url, path_to_url(original_save_as)) + + def test_attach_to_does_nothing_after_url_referenced(self): + """attach_to() does nothing if the url was already referenced. + (For example, by a {filename} link an a document processed earlier.) + """ + original_url = self.static.url + + page = Page(content="fake page", + metadata={'title': 'fakepage'}, settings=self.settings, + source_path=os.path.join('dir', 'fakepage.md')) + self.static.attach_to(page) + + self.assertEqual(self.static.save_as, self.static.source_path) + self.assertEqual(self.static.url, original_url) + + def test_attach_to_does_not_override_an_override(self): + """attach_to() does not override paths that were overridden elsewhere. + (For example, by the user with EXTRA_PATH_METADATA) + """ + customstatic = Static(content=None, + metadata=dict(save_as='customfoo.jpg', url='customfoo.jpg'), + settings=self.settings, + source_path=os.path.join('dir', 'foo.jpg'), + context=self.settings.copy()) + + page = Page(content="fake page", + metadata={'title': 'fakepage'}, settings=self.settings, + source_path=os.path.join('dir', 'fakepage.md')) + + customstatic.attach_to(page) + + self.assertEqual(customstatic.save_as, 'customfoo.jpg') + self.assertEqual(customstatic.url, 'customfoo.jpg') + + def test_attach_link_syntax(self): + """{attach} link syntax triggers output path override & url replacement. + """ + html = '<a href="{attach}../foo.jpg">link</a>' + page = Page(content=html, + metadata={'title': 'fakepage'}, settings=self.settings, + source_path=os.path.join('dir', 'otherdir', 'fakepage.md'), + context=self.context) + content = page.get_content('') + + self.assertNotEqual(content, html, + "{attach} link syntax did not trigger URL replacement.") + + expected_save_as = os.path.join('outpages', 'foo.jpg') + self.assertEqual(self.static.save_as, expected_save_as) + self.assertEqual(self.static.url, path_to_url(expected_save_as)) + + class TestURLWrapper(unittest.TestCase): def test_comparisons(self): # URLWrappers are sorted by name diff --git a/pelican/tests/test_pelican.py b/pelican/tests/test_pelican.py --- a/pelican/tests/test_pelican.py +++ b/pelican/tests/test_pelican.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- from __future__ import unicode_literals, print_function +import collections import os import sys from tempfile import mkdtemp @@ -77,14 +78,17 @@ def assertDirsEqual(self, left_path, right_path): assert not err, err def test_order_of_generators(self): - # StaticGenerator must run last, so it can find files that were - # skipped by the other generators. + # StaticGenerator must run last, so it can identify files that + # were skipped by the other generators, and so static files can + # have their output paths overridden by the {attach} link syntax. pelican = Pelican(settings=read_settings(path=None)) generator_classes = pelican.get_generator_classes() self.assertTrue(generator_classes[-1] is StaticGenerator, "StaticGenerator must be the last generator, but it isn't!") + self.assertIsInstance(generator_classes, collections.Sequence, + "get_generator_classes() must return a Sequence to preserve order") def test_basic_generation_works(self): # when running pelican without settings, it should pick up the default
Images can be anywhere in the content directory, not only static paths Hello Using Pelican version 3.2.2 ``` → pelican --version 3.2.2 ``` With the current file ``` Title: Network Panel in Firefox 23 Developer Tools Date: 2013-08-07 Slug: network-panel-firefox Status: draft […] ![Firefox 22 Screenshot with developer tools](|filename|firefox22-devtools.jpg) […] ![Firefox 23 Screenshot with developer tools](|filename|firefox23-devtools.jpg) ``` In the current structure ``` → ls -1 content/2013/08/07/ firefox22-devtools.jpg firefox23-devtools.jpg network-panel.md ``` And generating the html ``` → make html ``` creates the output among other lines. ``` WARNING: Unable to find 2013/08/07/firefox22-devtools.jpg, skipping url replacement WARNING: Unable to find 2013/08/07/firefox23-devtools.jpg, skipping url replacement ``` Then accessing the draft folder ``` http://localhost:8000/drafts/network-panel-firefox.html ``` Images are not displayed. Inside the HTML, the code is ``` <p><img alt="Firefox 23 Screenshot with developer tools" src="|filename|firefox23-devtools.jpg" /></p> ``` `make rsync_upload` has the same issue Using ``` RELATIVE_URLS = True ``` doesn't solve the issue. ## What I would expect The images to be displayed and put at the right place on `2013/08/07/firefox23-devtools.jpg`. Thanks.
This seems like a dup of #865, does it not? @justinmayer maybe. Let me do another test without the draft status to be sure. OK. Similar issue, but not related to draft status only. ``` WARNING: Unable to find 2012/10/26/script-not-working.png, skipping url replacement ``` On this [blog post](http://www.otsukare.info/2012/10/26/usability-of-browsers-error-messages) View source ``` <p><img alt="Modal window" src="|filename|script-not-working.png" /></p> ``` In https://github.com/getpelican/pelican/blob/master/pelican/contents.py#L212 ``` if value in self._context['filenames']: ``` is a table of all objects supposed to be part of the site (there will be a memory tax here for big sites, but that's another topic ;) ). I just checked and see that the image files which are side by side with the current blog post are not listed at all. So the table is incomplete and then the else is executed. > [...] the image files which are side by side with the current blog post are not listed at all If I'm not mistaken, this is how the `STATIC_PATHS` setting is designed — i.e., only images within directories listed in `STATIC_PATHS` will be copied over. Oh so you can't do that? ``` → ls -1 content/2013/08/07/ firefox22-devtools.jpg firefox23-devtools.jpg network-panel.md ``` Putting everything in one folder is not wise. It's good for logos and stuff but for the rest not really a good idea. clutter the namespace makes the folder a mess, far from home (aka the article). OK I guess I will have to hack a bit myself to find out how to modify the code. :/ Yes, upon further review, this is actually documented quite thoroughly (probably by me) in the [Linking to internal content](http://docs.getpelican.com/en/latest/getting_started.html#linking-to-internal-content) section. That said, I agree that additional flexibility would be beneficial. I'm sorry this is not currently available out-of-the-box. I'm sure any assistance you could provide in implementing said flexibility would be appreciated by others as well. :^) I'm changing the title to make it easier to find the feature for future search. +1 I have this use case. I’d like to explain the rationale for keeping all content together (rather than text in one folder, and relevant image, binary, or other content formats in a separate asset folder). 1. If I write a post that includes one or several content images (illustration, figures, screenshots), or self-hosted videos, or relevant PDFs, it makes logical sense to keep related content together in the same folder. This is especially true — and useful — if I use one folder per post or page. 2. When people routinely wrote HTML sites by hand, this is the kind of pattern they used: keep related content together. 3. CMSs introduced technical limitations: either the text was in a database, or it was in a specific repository of text files (a XML database for instance). So virtually every CMS decided to use a separate directory for “static” or “media” files, called `uploads` or `media` or something similar. See for instance the infamous `wp-content/uploads` in WordPress. 4. In my view this is an antipattern, justified by technical limitations and the database/media dichotomy, and it’s regrettable to reproduce it in content management applications that use the folder and file structure for organizing and editing content. Anyway, I understand that Pelican was designed with this (anti)pattern, and that offering the behavior in this feature request as an option could create as many usability issues as it solves. (So I’m going to use a different tool for my website. Probably Wintersmith.) I do understand the rationale behind this choice, pelican was build without this in mind so its current implementation doesn't allow this, but it doesn't mean it cannot evolve to take this into consideration. However, the challenge here is to have a way to attach images with other static content, in the .md or .rst files. One simple way to solve this would be to list the images in a metadata directive, but that sounds a bit complicated to me (for the user). Another way to solve this is parse the rendered HTML to find links and check which ones are starting with `:filename:`. We're already doing this [here](https://github.com/getpelican/pelican/blob/master/pelican/contents.py#L177) and I believe one could extend this to allow what you're talking about. If someone wants to step-up and implement it, I would be happy to review. I put together a hackish workaround for this: a plugin to copy over the static files: https://gist.github.com/agateau/7043146 and a [patch](https://github.com/agateau/pelican/tree/wip/dirname) to allow referring to them using `{dirname}` in the `src` attribute of images. @agateau I tried your plugin briefly and it seemed to work nicely. @fvsch good to know. I found out it is currently broken if pages are not stored in the root folder of the repository though. I need to find some time to think about it. A simple flag to copy over all static images would be a good work-a-round for the time being. Much more smarter would it be to include the statics that are being linked to in the source files. `![foo](image.png)` could be a trigger to copy over `image.png` at the same dir as the current file. Not thought through this ... but to be honest I kind of assumed it would work that way when I found it is not. So we all agree that this would be a useful feature. Would someone like to implement an appropriate solution? So Ive had the same exact problem; and been screwing around with this pretty much all night tonight :) First-time Python + never used Pelican before doesn't help much. Here's what I think a problem with @agateau's approach is... You're using a Generator. The problem is; your Generator is called _after_ all the default Generators are already done, and with all the content written to disk. So at that point it's too late. I toyed around with trying to post-process html files and that doesn't work since you never know where your img tag might show up at the end (it's up to the Template, based on Content). @karlcow , Im hitting exactly the same problem as you -- in contents.py; _update_content(); it wants the file to be there in the context to replace a relative link; except that it's not there (I didn't go down the path of figuring out why :) ). So I cheated a bit. Added a plugin that kicks in on article_generator_finalized. At this point, there's enough state to know where the articles will end up with (so that the images can be placed in the right directories); yet the html is not yet generated -- allowing you to do your replacement routine on the content inside the Article object. Im doing a simple search-and-replace of {article} -- yes, naive me thinks that I will never use that combination of characters anywhere but a relative image reference. It obviously can be updated with a fancier regexp; at that point in execution the article's already HTML minus the template. With a quick modification and last line uncommented, this can be adopted for pages too (Ill do it tomorrow). Here's the code. Pardon bad Python; it's my first time ;) ``` python import os import logging import shutil import re from pprint import pprint from sys import exit; from pelican import signals from pelican.generators import Generator from pelican.readers import BaseReader from pelican.utils import mkdir_p, copy logger = logging.getLogger(__name__) INLINE_STATIC_EXTENSIONS = ('png', 'jpeg', 'jpg'); ARTICLE_TAG = '{article}'; def process_images(generator): print("Image processing generator called"); for article in generator.articles: article_path = os.path.dirname(article.source_path); out_path = os.path.join(article._context['OUTPUT_PATH'], article.save_as); out_dir = os.path.dirname(out_path); base_url = os.path.join(article._context['SITEURL'], os.path.dirname(article.save_as)); base_url = base_url.replace('\\', '/'); os.makedirs(out_dir); for f in generator.get_files(article_path, extensions=INLINE_STATIC_EXTENSIONS): src = os.path.join(article._context['PATH'], f); print(" ", src, "\n", out_dir); shutil.copy(src, out_dir); article._content = article._content.replace(ARTICLE_TAG, base_url); def register(): signals.article_generator_finalized.connect(process_images); # signals.page_generator_finalized.connect(process_images); ``` I wanted this for my site so I created a plugin for it: [pelican-autostatic](https://github.com/AlexJF/pelican-autostatic). It basically only copies those static elements that are indeed referenced and is pluggable (with [pelican-advthumbnailer](https://github.com/AlexJF/pelican-advthumbnailer) for instance). Personally find it fits my workflow better so it might help someone else. Here's an [example usage](https://raw.githubusercontent.com/AlexJF/alexjf.net/fe0e1ed6a58b05fc2428a4d319f7cb53a6ec99cb/content/blog/personal/2013/07/14/barcelona-review/post.md) and [result](http://www.alexjf.net/blog/personal/barcelona-review/) @ametaireau and @justinmayer, I'm interested in working on this. Each of the attempts I've seen here so far has one or more of these problems: 1. Duplicating `StaticGenerator`'s job. Since we already have a generator for static files, I'd like it to handle all of them. This would minimize duplicate code, consistently update internal variables, and ensure that all static files respect the same config settings. 2. Requiring the user to maintain a list of filename suffixes to be copied as static files. It would be a hassle to track down all those suffixes when migrating a site to Pelican, and would result in broken links if the user forgets to add a suffix to that list. I'd rather just designate a directory as containing both static and content source files, and let Pelican figure out which is which. That would mean less work for the user and less chance of error. 3. No way to copy static files that are not linked by any pages/articles. This is a problem for sites with standalone files, such as secret downloads, or music files linked by playlists instead of pages. 4. No way to make static files end up in the same output directory as the articles that link to them. This is a non-issue when a site's output directory tree looks just like its source tree, because a static file's `SAVE_AS` path only has to replicate its source path in order to end up in the right place. It is mainly a problem for blog posts, because their `SAVE_AS` paths often do not match their source paths, but are instead generated based on metadata that the static files not share. I believe problems 1-3 can be solved with just a couple of steps: - Configure `STATIC_PATHS` to include the content directory. Anyone can do this with Pelican today. It will copy the static files into the output directory, but will also copy the content source files, which is not what most of us want. The next step fixes that. - Teach `StaticGenerator` to ignore content source files. I have already implemented this, and can create a pull request at any time. It's a pretty lightweight patch. I haven't submitted a pull request yet because I've been pondering how best to solve problem 4. I think it makes sense to create a config setting for directories whose static files should be copied as siblings of the articles that link to them, effectively overriding their normal `SAVE_AS` paths. That would work well when only one article links to a file, but what happens when two or more articles link to the same file, and those articles live in different (generated) directories? How should Pelican decide which article the static file should follow? The best answer I have so far is to copy the file to both directories, which would work nicely for users, but it would change the current 1:1 mapping between source path and save-as path, and would mean generating context (by adding new `Static` objects) after all the context generators had already finished running. Maybe the latter wouldn't be so bad if done through a signal interface? What do the Pelican maintainers think about this? Does someone have a better suggestion? Now that I think of it, we don't truly have a 1:1 mapping between source paths and save-as paths today. If someone points `STATIC_PATHS` and `PAGE_PATHS` at the same directory, Pelican 3.4 will create two different `Content` objects (a `Static` object and a `Page` object) for each source path. So, maybe my concern about preserving 1:1 isn't important. A simpler answer: Let a static file's target directory be overridden by the first article found linking to it, but no others. Any additional articles' links would be rewritten to reference it in the already-overridden location. This has a few advantages over copying the file to multiple locations: - No need to create new `Static` objects after the context generators have been run. - No need for multiple save-as paths per source path. - Reduced storage and bandwidth usage on the web server. This would solve problem 4: allowing static files to live in the same generated directory as their linking articles, for what I expect are the vast majority of cases. For cases where several articles in different generated directories link to the same file, the links would still work; they just wouldn't be as pretty. One thing still bothers me a little: If the first article found linking to a file is the one that determines its location, and another such article is ever added with a different save-as directory, the new one might be found first and change the static file's location in future site builds. Any off-site links to that file (perhaps on a third-party site or in an email message) would then break. Maybe that's okay, though. It could only happen for files that had been explicitly marked as belonging to articles in generated directories, which basically means blog posts, and I expect most people would want to discourage off-site deep links to those files anyway. The article locations would remain stable, so linking to them instead would be just fine. A note about this in the documentation might take care of it. I created a pull request that solves problems 1-3. I believe I have a solid design that will build on that work and solve problem 4, and I plan to implement it soon. I'd like to have this all done in time for others to try it before the upcoming Debian freeze. By the way, is anyone reading this stuff? I hope I'm not wasting my time here. @foresto, we welcome your contribution. I share your concern about the Debian freeze. The best you can do is approach people and devs on IRC and keep nagging people and make them test it :wink: Hey Forest. I am indeed reading this. (^_^) This is a long-requested feature, and it would certainly be nice to include your work in the upcoming 3.5 release. Great. Thanks for saying so, both of you. In that case, I'll continue working on problem 4 (sticking/attaching static files to articles). Awesome — sounds great, Forest. Speaking of the Debian freeze, anyone available to sprint on Pelican on, say, the Nov. 1-2 weekend? I'm nearly ready to submit a patch for attaching static files to articles. It depends on changes I made in my earlier patch, but since the two are conceptually separate and since some folks have already reviewed the first one, I don't think squashing them together makes sense. Should I just add this patch as a second changeset to PR #1509, or should I open a new PR along with a note that the first one must be merged first? Please open a new PR along with a note that the first one must be merged first. My goal is to set aside some time tomorrow morning to review in greater detail.
2014-11-01T22:48:00Z
[]
[]
getpelican/pelican
1,577
getpelican__pelican-1577
[ "1513" ]
5a8efcd5269af75ac67251ab32c6a34722d0b219
diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -257,9 +257,9 @@ def replacer(m): 'limit_msg': ("Other resources were not found " "and their urls not replaced")}) elif what == 'category': - origin = Category(path, self.settings).url + origin = '/'.join((siteurl, Category(path, self.settings).url)) elif what == 'tag': - origin = Tag(path, self.settings).url + origin = '/'.join((siteurl, Tag(path, self.settings).url)) # keep all other parts, such as query, fragment, etc. parts = list(value)
diff --git a/pelican/tests/TestPages/page_with_category_and_tag_links.md b/pelican/tests/TestPages/page_with_category_and_tag_links.md new file mode 100644 --- /dev/null +++ b/pelican/tests/TestPages/page_with_category_and_tag_links.md @@ -0,0 +1,7 @@ +Title: Page with a bunch of links + +My links: + +[Link 1]({tag}マック) + +[Link 2]({category}Yeah) diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -212,17 +212,20 @@ def test_get_content(self): '<a href="|tag|tagname">link</a>') page = Page(**args) content = page.get_content('http://notmyidea.org') - self.assertEqual(content, ('A simple test, with a ' - '<a href="tag/tagname.html">link</a>')) + self.assertEqual( + content, + ('A simple test, with a ' + '<a href="http://notmyidea.org/tag/tagname.html">link</a>')) # Category args['content'] = ('A simple test, with a ' '<a href="|category|category">link</a>') page = Page(**args) content = page.get_content('http://notmyidea.org') - self.assertEqual(content, - ('A simple test, with a ' - '<a href="category/category.html">link</a>')) + self.assertEqual( + content, + ('A simple test, with a ' + '<a href="http://notmyidea.org/category/category.html">link</a>')) def test_intrasite_link(self): # type does not take unicode in PY2 and bytes in PY3, which in @@ -543,6 +546,31 @@ def test_attach_link_syntax(self): self.assertEqual(self.static.save_as, expected_save_as) self.assertEqual(self.static.url, path_to_url(expected_save_as)) + def test_tag_link_syntax(self): + "{tag} link syntax triggers url replacement." + + html = '<a href="{tag}foo">link</a>' + page = Page( + content=html, + metadata={'title': 'fakepage'}, settings=self.settings, + source_path=os.path.join('dir', 'otherdir', 'fakepage.md'), + context=self.context) + content = page.get_content('') + + self.assertNotEqual(content, html) + + def test_category_link_syntax(self): + "{category} link syntax triggers url replacement." + + html = '<a href="{category}foo">link</a>' + page = Page(content=html, + metadata={'title': 'fakepage'}, settings=self.settings, + source_path=os.path.join('dir', 'otherdir', 'fakepage.md'), + context=self.context) + content = page.get_content('') + + self.assertNotEqual(content, html) + class TestURLWrapper(unittest.TestCase): def test_comparisons(self): diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py --- a/pelican/tests/test_generators.py +++ b/pelican/tests/test_generators.py @@ -428,6 +428,7 @@ def test_generate_context(self): ['This is a markdown test page', 'published', 'page'], ['This is a test page with a preset template', 'published', 'custom'], + ['Page with a bunch of links', 'published', 'page'], ['A Page (Test) for sorting', 'published', 'page'], ] hidden_pages_expected = [ @@ -517,6 +518,7 @@ def test_generate_sorted(self): ['This is a test page', 'published', 'page'], ['This is a markdown test page', 'published', 'page'], ['A Page (Test) for sorting', 'published', 'page'], + ['Page with a bunch of links', 'published', 'page'], ['This is a test page with a preset template', 'published', 'custom'], ] @@ -530,6 +532,7 @@ def test_generate_sorted(self): # sort by title pages_expected_sorted_by_title = [ ['A Page (Test) for sorting', 'published', 'page'], + ['Page with a bunch of links', 'published', 'page'], ['This is a markdown test page', 'published', 'page'], ['This is a test page', 'published', 'page'], ['This is a test page with a preset template', 'published', @@ -543,6 +546,26 @@ def test_generate_sorted(self): pages = self.distill_pages(generator.pages) self.assertEqual(pages_expected_sorted_by_title, pages) + def test_tag_and_category_links_on_generated_pages(self): + """ + Test to ensure links of the form {tag}tagname and {category}catname + are generated correctly on pages + """ + settings = get_settings(filenames={}) + settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR + settings['CACHE_PATH'] = self.temp_cache + settings['DEFAULT_DATE'] = (1970, 1, 1) + + generator = PagesGenerator( + context=settings.copy(), settings=settings, + path=CUR_DIR, theme=settings['THEME'], output_path=None) + generator.generate_context() + pages_by_title = {p.title: p.content for p in generator.pages} + + test_content = pages_by_title['Page with a bunch of links'] + self.assertIn('<a href="/category/yeah.html">', test_content) + self.assertIn('<a href="/tag/matsuku.html">', test_content) + class TestTemplatePagesGenerator(unittest.TestCase):
{tag} syntax doesn't generate correct links when used in pages When using the `{tag}tagname` syntax in an article, it results in the correct link to `/tag/tagname.html`. When using it in a page, however, it translates to `/pages/tag/tagname.html`, which is a nonexistent location. Ref: http://stackoverflow.com/q/26621639/695132
Thank you for reporting this @szhorvat. It's likely that this will also happen with `{category}cateoryname` syntax. The problem is that that the replacer returns a path `tag/tagname.html` relative to `SITEURL` as defined in `TAG_URL`. Perhaps a solution would be to make it explicitly relative to siteurl [as with filenames a few lines above](https://github.com/getpelican/pelican/blob/master/pelican/contents.py#L238): ``` Python # generators.py:247 elif what == 'category': origin = '/'.join((siteurl, Category(path, self.settings).url)) elif what == 'tag': origin = '/'.join((siteurl, Tag(path, self.settings).url)) ``` You can try if this quick fix helps you. If yes, it would be easy to merge it. I'll try it on the weekend and will let you know. I'm not in the position to tell how this would interact with the `RELATIVE_URLS` setting, would there be problems? I actually think this would fix `RELATIVE_URLS` behavior, I doubt it works now. You can try that too. It does seem to work in a few simple tests. Wouldn't Windows need the `\` path separator (like a few lines above for filenames)? no, only `/` can be used in URLs. Encountered the same bug, except using `{category}foo` and then looking at the various category pages (got an extra category/category link). I can create a PR with the proposed fix above if that is helpful. @ehashman go ahead :) But please add a few test for the test suite.
2015-01-02T07:14:12Z
[]
[]
getpelican/pelican
1,653
getpelican__pelican-1653
[ "1547" ]
e35ca1d6ff4fd9a31b6dd60b2bb345c2fee0828e
diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -53,7 +53,7 @@ def __init__(self, content, metadata=None, settings=None, self._context = context self.translations = [] - local_metadata = dict(settings['DEFAULT_METADATA']) + local_metadata = dict() local_metadata.update(metadata) # set metadata as attributes @@ -166,21 +166,13 @@ def url_format(self): """Returns the URL, formatted with the proper values""" metadata = copy.copy(self.metadata) path = self.metadata.get('path', self.get_relative_source_path()) - default_category = self.settings['DEFAULT_CATEGORY'] - slug_substitutions = self.settings.get('SLUG_SUBSTITUTIONS', ()) metadata.update({ 'path': path_to_url(path), 'slug': getattr(self, 'slug', ''), 'lang': getattr(self, 'lang', 'en'), 'date': getattr(self, 'date', SafeDatetime.now()), - 'author': slugify( - getattr(self, 'author', ''), - slug_substitutions - ), - 'category': slugify( - getattr(self, 'category', default_category), - slug_substitutions - ) + 'author': self.author.slug if hasattr(self, 'author') else '', + 'category': self.category.slug if hasattr(self, 'category') else '' }) return metadata diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -537,6 +537,10 @@ def find_empty_alt(content, path): def default_metadata(settings=None, process=None): metadata = {} if settings: + for name, value in dict(settings.get('DEFAULT_METADATA', {})).items(): + if process: + value = process(name, value) + metadata[name] = value if 'DEFAULT_CATEGORY' in settings: value = settings['DEFAULT_CATEGORY'] if process:
diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -8,7 +8,7 @@ from pelican.tests.support import unittest, get_settings -from pelican.contents import Page, Article, Static, URLWrapper +from pelican.contents import Page, Article, Static, URLWrapper, Author, Category from pelican.settings import DEFAULT_CONFIG from pelican.utils import path_to_url, truncate_html_words, SafeDatetime, posix_join from pelican.signals import content_object_init @@ -33,7 +33,7 @@ def setUp(self): 'metadata': { 'summary': TEST_SUMMARY, 'title': 'foo bar', - 'author': 'Blogger', + 'author': Author('Blogger', DEFAULT_CONFIG), }, 'source_path': '/path/to/file/foo.ext' } @@ -374,7 +374,8 @@ def test_multiple_authors(self): content = Page(**args) assert content.authors == [content.author] args['metadata'].pop('author') - args['metadata']['authors'] = ['First Author', 'Second Author'] + args['metadata']['authors'] = [Author('First Author', DEFAULT_CONFIG), + Author('Second Author', DEFAULT_CONFIG)] content = Page(**args) assert content.authors assert content.author == content.authors[0] @@ -396,8 +397,8 @@ def test_slugify_category_author(self): settings['ARTICLE_URL'] = '{author}/{category}/{slug}/' settings['ARTICLE_SAVE_AS'] = '{author}/{category}/{slug}/index.html' article_kwargs = self._copy_page_kwargs() - article_kwargs['metadata']['author'] = "O'Brien" - article_kwargs['metadata']['category'] = 'C# & stuff' + article_kwargs['metadata']['author'] = Author("O'Brien", settings) + article_kwargs['metadata']['category'] = Category('C# & stuff', settings) article_kwargs['metadata']['title'] = 'fnord' article_kwargs['settings'] = settings article = Article(**article_kwargs) diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py --- a/pelican/tests/test_generators.py +++ b/pelican/tests/test_generators.py @@ -413,6 +413,38 @@ def test_ignore_cache(self): generator.generate_context() generator.readers.read_file.assert_called_count == orig_call_count + def test_standard_metadata_in_default_metadata(self): + settings = get_settings(filenames={}) + settings['CACHE_CONTENT'] = False + settings['DEFAULT_CATEGORY'] = 'Default' + settings['DEFAULT_DATE'] = (1970, 1, 1) + settings['DEFAULT_METADATA'] = (('author', 'Blogger'), + # category will be ignored in favor of + # DEFAULT_CATEGORY + ('category', 'Random'), + ('tags', 'general, untagged')) + generator = ArticlesGenerator( + context=settings.copy(), settings=settings, + path=CONTENT_DIR, theme=settings['THEME'], output_path=None) + generator.generate_context() + + authors = sorted([author.name for author, _ in generator.authors]) + authors_expected = sorted(['Alexis Métaireau', 'Blogger', + 'First Author', 'Second Author']) + self.assertEqual(authors, authors_expected) + + categories = sorted([category.name + for category, _ in generator.categories]) + categories_expected = [ + sorted(['Default', 'TestCategory', 'yeah', 'test', '指導書']), + sorted(['Default', 'TestCategory', 'Yeah', 'test', '指導書'])] + self.assertIn(categories, categories_expected) + + tags = sorted([tag.name for tag in generator.tags]) + tags_expected = sorted(['bar', 'foo', 'foobar', 'general', 'untagged', + 'パイソン', 'マック']) + self.assertEqual(tags, tags_expected) + class TestPageGenerator(unittest.TestCase): # Note: Every time you want to test for a new field; Make sure the test diff --git a/pelican/tests/test_paginator.py b/pelican/tests/test_paginator.py --- a/pelican/tests/test_paginator.py +++ b/pelican/tests/test_paginator.py @@ -5,7 +5,7 @@ from pelican.tests.support import unittest, get_settings from pelican.paginator import Paginator -from pelican.contents import Article +from pelican.contents import Article, Author from pelican.settings import DEFAULT_CONFIG from jinja2.utils import generate_lorem_ipsum @@ -26,7 +26,6 @@ def setUp(self): 'metadata': { 'summary': TEST_SUMMARY, 'title': 'foo bar', - 'author': 'Blogger', }, 'source_path': '/path/to/file/foo.ext' } @@ -49,6 +48,7 @@ def test_save_as_preservation(self): key=lambda r: r[0], ) + self.page_kwargs['metadata']['author'] = Author('Blogger', settings) object_list = [Article(**self.page_kwargs), Article(**self.page_kwargs)] paginator = Paginator('foobar.foo', object_list, settings) page = paginator.page(1)
`category.slug` is not honored when determining URLs for articles I'm trying to write a plugin which allows me to manually adjust the slug for a category, i.e. the thing which goes into the URL when you put `{category}` in `ARTICLE_URL` and friends. I have it working, except that `category.slug` is _ignored_ by `Content.url_format` (in favor of `slugify(category)`, which uses the _name_, so the generated pathname for the category itself honors the slug setting, but the generated pathnames for articles in the category don't. It appears to me that this is a straight-up bug in `Content.url_format`, possibly due to its having been written before `URLWrapper` objects were a thing?
2015-03-06T21:54:11Z
[]
[]
getpelican/pelican
1,740
getpelican__pelican-1740
[ "1739" ]
940eb76b7f70b1c9c7f833d5328d44cb19bde406
diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -22,7 +22,8 @@ TemplatePagesGenerator) from pelican.readers import Readers from pelican.settings import read_settings -from pelican.utils import clean_output_dir, folder_watcher, file_watcher +from pelican.utils import (clean_output_dir, folder_watcher, + file_watcher, maybe_pluralize) from pelican.writers import Writer __version__ = "3.5.0" @@ -183,12 +184,32 @@ def run(self): pages_generator = next(g for g in generators if isinstance(g, PagesGenerator)) - print('Done: Processed {} article(s), {} draft(s) and {} page(s) in ' \ - '{:.2f} seconds.'.format( - len(articles_generator.articles) + len(articles_generator.translations), - len(articles_generator.drafts) + \ - len(articles_generator.drafts_translations), - len(pages_generator.pages) + len(pages_generator.translations), + pluralized_articles = maybe_pluralize( + len(articles_generator.articles) + + len(articles_generator.translations), + 'article', + 'articles') + pluralized_drafts = maybe_pluralize( + len(articles_generator.drafts) + + len(articles_generator.drafts_translations), + 'draft', + 'drafts') + pluralized_pages = maybe_pluralize( + len(pages_generator.pages) + + len(pages_generator.translations), + 'page', + 'pages') + pluralized_hidden_pages = maybe_pluralize( + len(pages_generator.hidden_pages) + + len(pages_generator.hidden_translations), + 'hidden page', + 'hidden pages') + + print('Done: Processed {}, {}, {} and {} in {:.2f} seconds.'.format( + pluralized_articles, + pluralized_drafts, + pluralized_pages, + pluralized_hidden_pages, time.time() - start_time)) def get_generator_classes(self): diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -767,3 +767,19 @@ def path_to_file_url(path): '''Convert file-system path to file:// URL''' return six.moves.urllib_parse.urljoin( "file://", six.moves.urllib.request.pathname2url(path)) + + +def maybe_pluralize(count, singular, plural): + ''' + Returns a formatted string containing count and plural if count is not 1 + Returns count and singular if count is 1 + + maybe_pluralize(0, 'Article', 'Articles') -> '0 Articles' + maybe_pluralize(1, 'Article', 'Articles') -> '1 Article' + maybe_pluralize(2, 'Article', 'Articles') -> '2 Articles' + + ''' + selection = plural + if count == 1: + selection = singular + return '{} {}'.format(count, selection)
diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py --- a/pelican/tests/test_utils.py +++ b/pelican/tests/test_utils.py @@ -358,6 +358,12 @@ def test_strftime_locale_dependent_french(self): locale.setlocale(locale.LC_ALL, old_locale) + def test_maybe_pluralize(self): + self.assertEqual(utils.maybe_pluralize(0, 'Article', 'Articles'), '0 Articles') + self.assertEqual(utils.maybe_pluralize(1, 'Article', 'Articles'), '1 Article') + self.assertEqual(utils.maybe_pluralize(2, 'Article', 'Articles'), '2 Articles') + + class TestCopy(unittest.TestCase): '''Tests the copy utility'''
Hidden pages not counted when generating If you set status to hidden on a page it won't be counted when generating. These pages are nonetheless generated. So there should be a counter for hidden as well.
2015-06-03T07:05:08Z
[]
[]
getpelican/pelican
1,753
getpelican__pelican-1753
[ "1752" ]
de6bd537b51ccba24f0666ee5d732e3d8453b08e
diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -1,45 +1,41 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function -import six +from __future__ import print_function, unicode_literals +import argparse +import collections +import locale +import logging import os import re import sys import time -import logging -import argparse -import locale -import collections + +import six # pelican.log has to be the first pelican module to be loaded # because logging.setLoggerClass has to be called before logging.getLogger -from pelican.log import init - +from pelican.log import init # noqa from pelican import signals - from pelican.generators import (ArticlesGenerator, PagesGenerator, - StaticGenerator, SourceFileGenerator, + SourceFileGenerator, StaticGenerator, TemplatePagesGenerator) from pelican.readers import Readers from pelican.settings import read_settings -from pelican.utils import (clean_output_dir, folder_watcher, - file_watcher, maybe_pluralize) +from pelican.utils import (clean_output_dir, file_watcher, + folder_watcher, maybe_pluralize) from pelican.writers import Writer __version__ = "3.6.4.dev0" - DEFAULT_CONFIG_NAME = 'pelicanconf.py' - - logger = logging.getLogger(__name__) class Pelican(object): def __init__(self, settings): - """ - Pelican initialisation, performs some checks on the environment before - doing anything else. + """Pelican initialisation + + Performs some checks on the environment before doing anything else. """ # define the default settings @@ -152,7 +148,7 @@ def run(self): context = self.settings.copy() # Share these among all the generators and content objects: context['filenames'] = {} # maps source path to Content object or None - context['localsiteurl'] = self.settings['SITEURL'] + context['localsiteurl'] = self.settings['SITEURL'] generators = [ cls( @@ -190,23 +186,23 @@ def run(self): if isinstance(g, PagesGenerator)) pluralized_articles = maybe_pluralize( - len(articles_generator.articles) + - len(articles_generator.translations), + (len(articles_generator.articles) + + len(articles_generator.translations)), 'article', 'articles') pluralized_drafts = maybe_pluralize( - len(articles_generator.drafts) + - len(articles_generator.drafts_translations), + (len(articles_generator.drafts) + + len(articles_generator.drafts_translations)), 'draft', 'drafts') pluralized_pages = maybe_pluralize( - len(pages_generator.pages) + - len(pages_generator.translations), + (len(pages_generator.pages) + + len(pages_generator.translations)), 'page', 'pages') pluralized_hidden_pages = maybe_pluralize( - len(pages_generator.hidden_pages) + - len(pages_generator.hidden_translations), + (len(pages_generator.hidden_pages) + + len(pages_generator.hidden_translations)), 'hidden page', 'hidden pages') @@ -243,8 +239,8 @@ def get_generator_classes(self): return generators def get_writer(self): - writers = [ w for (_, w) in signals.get_writer.send(self) - if isinstance(w, type) ] + writers = [w for (_, w) in signals.get_writer.send(self) + if isinstance(w, type)] writers_found = len(writers) if writers_found == 0: return Writer(self.output_path, settings=self.settings) @@ -254,15 +250,15 @@ def get_writer(self): logger.debug('Found writer: %s', writer) else: logger.warning( - '%s writers found, using only first one: %s', + '%s writers found, using only first one: %s', writers_found, writer) return writer(self.output_path, settings=self.settings) def parse_arguments(): parser = argparse.ArgumentParser( - description="""A tool to generate a static blog, - with restructured text input files.""", + description='A tool to generate a static blog, ' + ' with restructured text input files.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) @@ -354,7 +350,7 @@ def get_config(args): # argparse returns bytes in Py2. There is no definite answer as to which # encoding argparse (or sys.argv) uses. # "Best" option seems to be locale.getpreferredencoding() - # ref: http://mail.python.org/pipermail/python-list/2006-October/405766.html + # http://mail.python.org/pipermail/python-list/2006-October/405766.html if not six.PY3: enc = locale.getpreferredencoding() for key in config: @@ -424,7 +420,8 @@ def main(): # Added static paths # Add new watchers and set them as modified - for static_path in set(new_static).difference(old_static): + new_watchers = set(new_static).difference(old_static) + for static_path in new_watchers: static_key = '[static]%s' % static_path watchers[static_key] = folder_watcher( os.path.join(pelican.path, static_path), @@ -434,7 +431,8 @@ def main(): # Removed static paths # Remove watchers and modified values - for static_path in set(old_static).difference(new_static): + old_watchers = set(old_static).difference(new_static) + for static_path in old_watchers: static_key = '[static]%s' % static_path watchers.pop(static_key) modified.pop(static_key) diff --git a/pelican/cache.py b/pelican/cache.py --- a/pelican/cache.py +++ b/pelican/cache.py @@ -1,15 +1,13 @@ +# -*- coding: utf-8 -*- from __future__ import unicode_literals import hashlib import logging import os -try: - import cPickle as pickle -except: - import pickle -from pelican.utils import mkdir_p +from six.moves import cPickle as pickle +from pelican.utils import mkdir_p logger = logging.getLogger(__name__) @@ -83,6 +81,7 @@ def __init__(self, settings, cache_name, caching_policy, load_policy): """This sublcass additionally sets filestamp function and base path for filestamping operations """ + super(FileStampDataCacher, self).__init__(settings, cache_name, caching_policy, load_policy) @@ -118,6 +117,7 @@ def _get_file_stamp(self, filename): a hash for a function name in the hashlib module or an empty bytes string otherwise """ + try: return self._filestamp_func(filename) except (IOError, OSError, TypeError) as err: @@ -133,6 +133,7 @@ def get_cached_data(self, filename, default=None): Modification is checked by comparing the cached and current file stamp. """ + stamp, data = super(FileStampDataCacher, self).get_cached_data( filename, (None, default)) if stamp != self._get_file_stamp(filename): diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -1,23 +1,24 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function -import six -from six.moves.urllib.parse import urlparse, urlunparse +from __future__ import print_function, unicode_literals import copy import locale import logging -import functools import os import re import sys import pytz +import six +from six.moves.urllib.parse import urlparse, urlunparse + from pelican import signals from pelican.settings import DEFAULT_CONFIG -from pelican.utils import (slugify, truncate_html_words, memoized, strftime, - python_2_unicode_compatible, deprecated_attribute, - path_to_url, posixize_path, set_date_tzinfo, SafeDatetime) +from pelican.utils import (SafeDatetime, deprecated_attribute, memoized, + path_to_url, posixize_path, + python_2_unicode_compatible, set_date_tzinfo, + slugify, strftime, truncate_html_words) # Import these so that they're avalaible when you import from pelican.contents. from pelican.urlwrappers import (URLWrapper, Author, Category, Tag) # NOQA @@ -66,7 +67,7 @@ def __init__(self, content, metadata=None, settings=None, # also keep track of the metadata attributes available self.metadata = local_metadata - #default template if it's not defined in page + # default template if it's not defined in page self.template = self._get_template() # First, read the authors from "authors", if not, fallback to "author" @@ -94,13 +95,16 @@ def __init__(self, content, metadata=None, settings=None, # create the slug if not existing, generate slug according to # setting of SLUG_ATTRIBUTE if not hasattr(self, 'slug'): - if settings['SLUGIFY_SOURCE'] == 'title' and hasattr(self, 'title'): + if (settings['SLUGIFY_SOURCE'] == 'title' and + hasattr(self, 'title')): self.slug = slugify(self.title, - settings.get('SLUG_SUBSTITUTIONS', ())) - elif settings['SLUGIFY_SOURCE'] == 'basename' and source_path != None: - basename = os.path.basename(os.path.splitext(source_path)[0]) - self.slug = slugify(basename, - settings.get('SLUG_SUBSTITUTIONS', ())) + settings.get('SLUG_SUBSTITUTIONS', ())) + elif (settings['SLUGIFY_SOURCE'] == 'basename' and + source_path is not None): + basename = os.path.basename( + os.path.splitext(source_path)[0]) + self.slug = slugify( + basename, settings.get('SLUG_SUBSTITUTIONS', ())) self.source_path = source_path @@ -233,7 +237,8 @@ def replacer(m): if isinstance(linked_content, Static): linked_content.attach_to(self) else: - logger.warning("%s used {attach} link syntax on a " + logger.warning( + "%s used {attach} link syntax on a " "non-static file. Use {filename} instead.", self.get_relative_source_path()) origin = '/'.join((siteurl, linked_content.url)) @@ -241,7 +246,7 @@ def replacer(m): else: logger.warning( "Unable to find `%s`, skipping url replacement.", - value.geturl(), extra = { + value.geturl(), extra={ 'limit_msg': ("Other resources were not found " "and their urls not replaced")}) elif what == 'category': @@ -250,9 +255,9 @@ def replacer(m): origin = '/'.join((siteurl, Tag(path, self.settings).url)) else: logger.warning( - "Replacement Indicator '%s' not recognized, " - "skipping replacement", - what) + "Replacement Indicator '%s' not recognized, " + "skipping replacement", + what) # keep all other parts, such as query, fragment, etc. parts = list(value) @@ -337,7 +342,9 @@ def get_relative_source_path(self, source_path=None): return posixize_path( os.path.relpath( - os.path.abspath(os.path.join(self.settings['PATH'], source_path)), + os.path.abspath(os.path.join( + self.settings['PATH'], + source_path)), os.path.abspath(self.settings['PATH']) )) @@ -402,9 +409,12 @@ def save_as(self): def attach_to(self, content): """Override our output directory with that of the given content object. """ - # Determine our file's new output path relative to the linking document. - # If it currently lives beneath the linking document's source directory, - # preserve that relationship on output. Otherwise, make it a sibling. + + # Determine our file's new output path relative to the linking + # document. If it currently lives beneath the linking + # document's source directory, preserve that relationship on output. + # Otherwise, make it a sibling. + linking_source_dir = os.path.dirname(content.source_path) tail_path = os.path.relpath(self.source_path, linking_source_dir) if tail_path.startswith(os.pardir + os.sep): @@ -420,11 +430,14 @@ def attach_to(self, content): # 'some/content' with a file named 'index.html'.) Rather than trying # to figure it out by comparing the linking document's url and save_as # path, we simply build our new url from our new save_as path. + new_url = path_to_url(new_save_as) def _log_reason(reason): - logger.warning("The {attach} link in %s cannot relocate %s " - "because %s. Falling back to {filename} link behavior instead.", + logger.warning( + "The {attach} link in %s cannot relocate " + "%s because %s. Falling back to " + "{filename} link behavior instead.", content.get_relative_source_path(), self.get_relative_source_path(), reason, extra={'limit_msg': "More {attach} warnings silenced."}) @@ -452,5 +465,6 @@ def is_valid_content(content, f): content.check_properties() return True except NameError as e: - logger.error("Skipping %s: could not find information about '%s'", f, e) + logger.error( + "Skipping %s: could not find information about '%s'", f, e) return False diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -1,28 +1,28 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function +from __future__ import print_function, unicode_literals -import os -import six +import calendar +import fnmatch import logging +import os import shutil -import fnmatch -import calendar - from codecs import open from collections import defaultdict from functools import partial from itertools import chain, groupby from operator import attrgetter -from jinja2 import (Environment, FileSystemLoader, PrefixLoader, ChoiceLoader, - BaseLoader, TemplateNotFound) +from jinja2 import (BaseLoader, ChoiceLoader, Environment, FileSystemLoader, + PrefixLoader, TemplateNotFound) + +import six +from pelican import signals from pelican.cache import FileStampDataCacher from pelican.contents import Article, Draft, Page, Static, is_valid_content from pelican.readers import Readers -from pelican.utils import (copy, process_translations, mkdir_p, DateFormatter, - python_2_unicode_compatible, posixize_path) -from pelican import signals +from pelican.utils import (DateFormatter, copy, mkdir_p, posixize_path, + process_translations, python_2_unicode_compatible) logger = logging.getLogger(__name__) @@ -31,6 +31,7 @@ class PelicanTemplateNotFound(Exception): pass + @python_2_unicode_compatible class Generator(object): """Baseclass generator""" @@ -90,8 +91,9 @@ def get_template(self, name): try: self._templates[name] = self.env.get_template(name + '.html') except TemplateNotFound: - raise PelicanTemplateNotFound('[templates] unable to load %s.html from %s' - % (name, self._templates_path)) + raise PelicanTemplateNotFound( + '[templates] unable to load {}.html from {}'.format( + name, self._templates_path)) return self._templates[name] def _include_path(self, path, extensions=None): @@ -105,7 +107,7 @@ def _include_path(self, path, extensions=None): extensions = tuple(self.readers.extensions) basename = os.path.basename(path) - #check IGNORE_FILES + # check IGNORE_FILES ignores = self.settings['IGNORE_FILES'] if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores): return False @@ -122,8 +124,9 @@ def get_files(self, paths, exclude=[], extensions=None): :param extensions: the list of allowed extensions (if False, all extensions are allowed) """ + # backward compatibility for older generators if isinstance(paths, six.string_types): - paths = [paths] # backward compatibility for older generators + paths = [paths] # group the exclude dir names by parent path, for use with os.walk() exclusions_by_dirpath = {} @@ -138,7 +141,8 @@ def get_files(self, paths, exclude=[], extensions=None): root = os.path.join(self.path, path) if path else self.path if os.path.isdir(root): - for dirpath, dirs, temp_files in os.walk(root, followlinks=True): + for dirpath, dirs, temp_files in os.walk( + root, followlinks=True): drop = [] excl = exclusions_by_dirpath.get(dirpath, ()) for d in dirs: @@ -178,7 +182,8 @@ def _is_potential_source_path(self, path): before this method is called, even if they failed to process.) The path argument is expected to be relative to self.path. """ - return posixize_path(os.path.normpath(path)) in self.context['filenames'] + return (posixize_path(os.path.normpath(path)) + in self.context['filenames']) def _update_context(self, items): """Update the context with the given items from the currrent @@ -211,7 +216,8 @@ def __init__(self, *args, **kwargs): readers_cache_name=(cls_name + '-Readers'), **kwargs) - cache_this_level = self.settings['CONTENT_CACHING_LAYER'] == 'generator' + cache_this_level = \ + self.settings['CONTENT_CACHING_LAYER'] == 'generator' caching_policy = cache_this_level and self.settings['CACHE_CONTENT'] load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE'] FileStampDataCacher.__init__(self, self.settings, cls_name, @@ -259,14 +265,14 @@ class ArticlesGenerator(CachingGenerator): def __init__(self, *args, **kwargs): """initialize properties""" - self.articles = [] # only articles in default language + self.articles = [] # only articles in default language self.translations = [] self.dates = {} self.tags = defaultdict(list) self.categories = defaultdict(list) self.related_posts = [] self.authors = defaultdict(list) - self.drafts = [] # only drafts in default language + self.drafts = [] # only drafts in default language self.drafts_translations = [] super(ArticlesGenerator, self).__init__(*args, **kwargs) signals.article_generator_init.send(self) @@ -282,8 +288,8 @@ def generate_feeds(self, writer): writer.write_feed(self.articles, self.context, self.settings['FEED_RSS'], feed_type='rss') - if (self.settings.get('FEED_ALL_ATOM') - or self.settings.get('FEED_ALL_RSS')): + if (self.settings.get('FEED_ALL_ATOM') or + self.settings.get('FEED_ALL_RSS')): all_articles = list(self.articles) for article in self.articles: all_articles.extend(article.translations) @@ -322,8 +328,8 @@ def generate_feeds(self, writer): self.settings['AUTHOR_FEED_RSS'] % auth.slug, feed_type='rss') - if (self.settings.get('TAG_FEED_ATOM') - or self.settings.get('TAG_FEED_RSS')): + if (self.settings.get('TAG_FEED_ATOM') or + self.settings.get('TAG_FEED_RSS')): for tag, arts in self.tags.items(): arts.sort(key=attrgetter('date'), reverse=True) if self.settings.get('TAG_FEED_ATOM'): @@ -336,8 +342,8 @@ def generate_feeds(self, writer): self.settings['TAG_FEED_RSS'] % tag.slug, feed_type='rss') - if (self.settings.get('TRANSLATION_FEED_ATOM') - or self.settings.get('TRANSLATION_FEED_RSS')): + if (self.settings.get('TRANSLATION_FEED_ATOM') or + self.settings.get('TRANSLATION_FEED_RSS')): translations_feeds = defaultdict(list) for article in chain(self.articles, self.translations): translations_feeds[article.lang].append(article) @@ -472,9 +478,9 @@ def generate_drafts(self, write): """Generate drafts pages.""" for draft in chain(self.drafts_translations, self.drafts): write(draft.save_as, self.get_template(draft.template), - self.context, article=draft, category=draft.category, - override_output=hasattr(draft, 'override_save_as'), - blog=True, all_articles=self.articles) + self.context, article=draft, category=draft.category, + override_output=hasattr(draft, 'override_save_as'), + blog=True, all_articles=self.articles) def generate_pages(self, writer): """Generate the pages on the disk""" @@ -503,7 +509,8 @@ def generate_context(self): exclude=self.settings['ARTICLE_EXCLUDES']): article_or_draft = self.get_cached_data(f, None) if article_or_draft is None: - #TODO needs overhaul, maybe nomad for read_file solution, unified behaviour + # TODO needs overhaul, maybe nomad for read_file + # solution, unified behaviour try: article_or_draft = self.readers.read_file( base_path=self.path, path=f, content_class=Article, @@ -513,7 +520,8 @@ def generate_context(self): context_signal=signals.article_generator_context, context_sender=self) except Exception as e: - logger.error('Could not process %s\n%s', f, e, + logger.error( + 'Could not process %s\n%s', f, e, exc_info=self.settings.get('DEBUG', False)) self._add_failed_source_path(f) continue @@ -535,8 +543,9 @@ def generate_context(self): self.add_source_path(article_or_draft) all_drafts.append(article_or_draft) else: - logger.error("Unknown status '%s' for file %s, skipping it.", - article_or_draft.status, f) + logger.error( + "Unknown status '%s' for file %s, skipping it.", + article_or_draft.status, f) self._add_failed_source_path(f) continue @@ -544,9 +553,9 @@ def generate_context(self): self.add_source_path(article_or_draft) - - self.articles, self.translations = process_translations(all_articles, - order_by=self.settings['ARTICLE_ORDER_BY']) + self.articles, self.translations = process_translations( + all_articles, + order_by=self.settings['ARTICLE_ORDER_BY']) self.drafts, self.drafts_translations = \ process_translations(all_drafts) @@ -615,7 +624,8 @@ def generate_context(self): context_signal=signals.page_generator_context, context_sender=self) except Exception as e: - logger.error('Could not process %s\n%s', f, e, + logger.error( + 'Could not process %s\n%s', f, e, exc_info=self.settings.get('DEBUG', False)) self._add_failed_source_path(f) continue @@ -629,8 +639,9 @@ def generate_context(self): elif page.status.lower() == "hidden": hidden_pages.append(page) else: - logger.error("Unknown status '%s' for file %s, skipping it.", - page.status, f) + logger.error( + "Unknown status '%s' for file %s, skipping it.", + page.status, f) self._add_failed_source_path(f) continue @@ -638,10 +649,11 @@ def generate_context(self): self.add_source_path(page) - self.pages, self.translations = process_translations(all_pages, - order_by=self.settings['PAGE_ORDER_BY']) - self.hidden_pages, self.hidden_translations = ( - process_translations(hidden_pages)) + self.pages, self.translations = process_translations( + all_pages, + order_by=self.settings['PAGE_ORDER_BY']) + self.hidden_pages, self.hidden_translations = \ + process_translations(hidden_pages) self._update_context(('pages', 'hidden_pages')) diff --git a/pelican/log.py b/pelican/log.py --- a/pelican/log.py +++ b/pelican/log.py @@ -1,26 +1,27 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function - -__all__ = [ - 'init' -] +from __future__ import print_function, unicode_literals +import locale +import logging import os import sys -import logging -import locale - -from collections import defaultdict, Mapping +from collections import Mapping, defaultdict import six +__all__ = [ + 'init' +] + + class BaseFormatter(logging.Formatter): def __init__(self, fmt=None, datefmt=None): FORMAT = '%(customlevelname)s %(message)s' super(BaseFormatter, self).__init__(fmt=FORMAT, datefmt=datefmt) def format(self, record): - record.__dict__['customlevelname'] = self._get_levelname(record.levelname) + customlevel = self._get_levelname(record.levelname) + record.__dict__['customlevelname'] = customlevel # format multiline messages 'nicely' to make it clear they are together record.msg = record.msg.replace('\n', '\n | ') return super(BaseFormatter, self).format(record) @@ -132,13 +133,13 @@ class SafeLogger(logging.Logger): def _log(self, level, msg, args, exc_info=None, extra=None): # if the only argument is a Mapping, Logger uses that for formatting # format values for that case - if args and len(args)==1 and isinstance(args[0], Mapping): + if args and len(args) == 1 and isinstance(args[0], Mapping): args = ({k: self._decode_arg(v) for k, v in args[0].items()},) # otherwise, format each arg else: args = tuple(self._decode_arg(arg) for arg in args) - super(SafeLogger, self)._log(level, msg, args, - exc_info=exc_info, extra=extra) + super(SafeLogger, self)._log( + level, msg, args, exc_info=exc_info, extra=extra) def _decode_arg(self, arg): ''' @@ -175,8 +176,7 @@ def init(level=None, handler=logging.StreamHandler()): logger = logging.getLogger() - if (os.isatty(sys.stdout.fileno()) - and not sys.platform.startswith('win')): + if os.isatty(sys.stdout.fileno()) and not sys.platform.startswith('win'): fmt = ANSIFormatter() else: fmt = TextFormatter() diff --git a/pelican/paginator.py b/pelican/paginator.py --- a/pelican/paginator.py +++ b/pelican/paginator.py @@ -1,18 +1,15 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function -import six +from __future__ import print_function, unicode_literals -# From django.core.paginator -from collections import namedtuple import functools import logging import os - +from collections import namedtuple from math import ceil -logger = logging.getLogger(__name__) - +import six +logger = logging.getLogger(__name__) PaginationRule = namedtuple( 'PaginationRule', 'min_page URL SAVE_AS', @@ -143,7 +140,7 @@ def _from_settings(self, key): 'settings': self.settings, 'base_name': os.path.dirname(self.name), 'number_sep': '/', - 'extension': self.extension, + 'extension': self.extension, } if self.number == 1: diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function +from __future__ import print_function, unicode_literals import logging import os @@ -9,24 +9,50 @@ import docutils.core import docutils.io from docutils.writers.html4css1 import HTMLTranslator + import six +from six.moves.html_parser import HTMLParser -# import the directives to have pygments support from pelican import rstdirectives # NOQA +from pelican import signals +from pelican.cache import FileStampDataCacher +from pelican.contents import Author, Category, Page, Tag +from pelican.utils import SafeDatetime, get_date, pelican_open, posixize_path + try: from markdown import Markdown except ImportError: Markdown = False # NOQA + try: from html import escape except ImportError: from cgi import escape -from six.moves.html_parser import HTMLParser -from pelican import signals -from pelican.cache import FileStampDataCacher -from pelican.contents import Page, Category, Tag, Author -from pelican.utils import get_date, pelican_open, SafeDatetime, posixize_path +# Metadata processors have no way to discard an unwanted value, so we have +# them return this value instead to signal that it should be discarded later. +# This means that _filter_discardable_metadata() must be called on processed +# metadata dicts before use, to remove the items with the special value. +_DISCARD = object() +METADATA_PROCESSORS = { + 'tags': lambda x, y: ([ + Tag(tag, y) + for tag in ensure_metadata_list(x) + ] or _DISCARD), + 'date': lambda x, y: get_date(x.replace('_', ' ')), + 'modified': lambda x, y: get_date(x), + 'status': lambda x, y: x.strip() or _DISCARD, + 'category': lambda x, y: _process_if_nonempty(Category, x, y), + 'author': lambda x, y: _process_if_nonempty(Author, x, y), + 'authors': lambda x, y: ([ + Author(author, y) + for author in ensure_metadata_list(x) + ] or _DISCARD), + 'slug': lambda x, y: x.strip() or _DISCARD, +} + +logger = logging.getLogger(__name__) + def ensure_metadata_list(text): """Canonicalize the format of a list of authors or tags. This works @@ -49,13 +75,6 @@ def ensure_metadata_list(text): return [v for v in (w.strip() for w in text) if v] -# Metadata processors have no way to discard an unwanted value, so we have -# them return this value instead to signal that it should be discarded later. -# This means that _filter_discardable_metadata() must be called on processed -# metadata dicts before use, to remove the items with the special value. -_DISCARD = object() - - def _process_if_nonempty(processor, name, settings): """Removes extra whitespace from name and applies a metadata processor. If name is empty or all whitespace, returns _DISCARD instead. @@ -64,28 +83,11 @@ def _process_if_nonempty(processor, name, settings): return processor(name, settings) if name else _DISCARD -METADATA_PROCESSORS = { - 'tags': lambda x, y: ([Tag(tag, y) for tag in ensure_metadata_list(x)] - or _DISCARD), - 'date': lambda x, y: get_date(x.replace('_', ' ')), - 'modified': lambda x, y: get_date(x), - 'status': lambda x, y: x.strip() or _DISCARD, - 'category': lambda x, y: _process_if_nonempty(Category, x, y), - 'author': lambda x, y: _process_if_nonempty(Author, x, y), - 'authors': lambda x, y: ([Author(author, y) - for author in ensure_metadata_list(x)] - or _DISCARD), - 'slug': lambda x, y: x.strip() or _DISCARD, -} - - def _filter_discardable_metadata(metadata): """Return a copy of a dict, minus any items marked as discardable.""" return {name: val for name, val in metadata.items() if val is not _DISCARD} -logger = logging.getLogger(__name__) - class BaseReader(object): """Base class to read files. @@ -267,8 +269,10 @@ def _parse_metadata(self, meta): output[name] = self.process_metadata(name, summary) elif name in METADATA_PROCESSORS: if len(value) > 1: - logger.warning('Duplicate definition of `%s` ' - 'for %s. Using first one.', name, self._source_path) + logger.warning( + 'Duplicate definition of `%s` ' + 'for %s. Using first one.', + name, self._source_path) output[name] = self.process_metadata(name, value[0]) elif len(value) > 1: # handle list metadata as list of string @@ -380,7 +384,8 @@ def build_tag(self, tag, attrs, close_tag): def _handle_meta_tag(self, attrs): name = self._attr_value(attrs, 'name') if name is None: - attr_serialized = ', '.join(['{}="{}"'.format(k, v) for k, v in attrs]) + attr_list = ['{}="{}"'.format(k, v) for k, v in attrs] + attr_serialized = ', '.join(attr_list) logger.warning("Meta tag in file %s does not have a 'name' " "attribute, skipping. Attributes: %s", self._filename, attr_serialized) @@ -394,9 +399,9 @@ def _handle_meta_tag(self, attrs): "Meta tag attribute 'contents' used in file %s, should" " be changed to 'content'", self._filename, - extra={'limit_msg': ("Other files have meta tag " - "attribute 'contents' that should " - "be changed to 'content'")}) + extra={'limit_msg': "Other files have meta tag " + "attribute 'contents' that should " + "be changed to 'content'"}) if name == 'keywords': name = 'tags' @@ -474,7 +479,8 @@ def read_file(self, base_path, path, content_class=Page, fmt=None, path = os.path.abspath(os.path.join(base_path, path)) source_path = posixize_path(os.path.relpath(path, base_path)) - logger.debug('Read file %s -> %s', + logger.debug( + 'Read file %s -> %s', source_path, content_class.__name__) if not fmt: @@ -486,7 +492,8 @@ def read_file(self, base_path, path, content_class=Page, fmt=None, 'Pelican does not know how to parse %s', path) if preread_signal: - logger.debug('Signal %s.send(%s)', + logger.debug( + 'Signal %s.send(%s)', preread_signal.name, preread_sender) preread_signal.send(preread_sender) @@ -527,7 +534,9 @@ def read_file(self, base_path, path, content_class=Page, fmt=None, def typogrify_wrapper(text): """Ensures ignore_tags feature is backward compatible""" try: - return typogrify(text, self.settings['TYPOGRIFY_IGNORE_TAGS']) + return typogrify( + text, + self.settings['TYPOGRIFY_IGNORE_TAGS']) except TypeError: return typogrify(text) @@ -539,8 +548,10 @@ def typogrify_wrapper(text): metadata['summary'] = typogrify_wrapper(metadata['summary']) if context_signal: - logger.debug('Signal %s.send(%s, <metadata>)', - context_signal.name, context_sender) + logger.debug( + 'Signal %s.send(%s, <metadata>)', + context_signal.name, + context_sender) context_signal.send(context_sender, metadata=metadata) return content_class(content=content, metadata=metadata, @@ -591,7 +602,8 @@ def default_metadata(settings=None, process=None): if process: value = process('category', value) metadata['category'] = value - if settings.get('DEFAULT_DATE', None) and settings['DEFAULT_DATE'] != 'fs': + if settings.get('DEFAULT_DATE', None) and \ + settings['DEFAULT_DATE'] != 'fs': metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE']) return metadata diff --git a/pelican/rstdirectives.py b/pelican/rstdirectives.py --- a/pelican/rstdirectives.py +++ b/pelican/rstdirectives.py @@ -1,13 +1,17 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function +from __future__ import print_function, unicode_literals + +import re from docutils import nodes, utils -from docutils.parsers.rst import directives, roles, Directive -from pygments.formatters import HtmlFormatter +from docutils.parsers.rst import Directive, directives, roles + from pygments import highlight -from pygments.lexers import get_lexer_by_name, TextLexer -import re +from pygments.formatters import HtmlFormatter +from pygments.lexers import TextLexer, get_lexer_by_name + import six + import pelican.settings as pys diff --git a/pelican/server.py b/pelican/server.py --- a/pelican/server.py +++ b/pelican/server.py @@ -1,16 +1,18 @@ -from __future__ import print_function +# -*- coding: utf-8 -*- +from __future__ import print_function, unicode_literals + +import logging import os import sys -import logging - -from six.moves import SimpleHTTPServer as srvmod -from six.moves import socketserver try: from magic import from_file as magic_from_file except ImportError: magic_from_file = None +from six.moves import SimpleHTTPServer as srvmod +from six.moves import socketserver + class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler): SUFFIXES = ['', '.html', '/index.html'] @@ -54,12 +56,12 @@ def guess_type(self, path): socketserver.TCPServer.allow_reuse_address = True try: - httpd = socketserver.TCPServer((SERVER, PORT), ComplexHTTPRequestHandler) + httpd = socketserver.TCPServer( + (SERVER, PORT), ComplexHTTPRequestHandler) except OSError as e: logging.error("Could not listen on port %s, server %s.", PORT, SERVER) sys.exit(getattr(e, 'exitcode', 1)) - logging.info("Serving at port %s, server %s.", PORT, SERVER) try: httpd.serve_forever() diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -1,31 +1,32 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function -import six +from __future__ import print_function, unicode_literals import copy import inspect -import os import locale import logging +import os +from os.path import isabs +from posixpath import join as posix_join + +import six + +from pelican.log import LimitFilter try: # SourceFileLoader is the recommended way in 3.3+ from importlib.machinery import SourceFileLoader - load_source = lambda name, path: SourceFileLoader(name, path).load_module() + + def load_source(name, path): + return SourceFileLoader(name, path).load_module() except ImportError: # but it does not exist in 3.2-, so fall back to imp import imp load_source = imp.load_source -from os.path import isabs -from pelican.utils import posix_join - -from pelican.log import LimitFilter - logger = logging.getLogger(__name__) - DEFAULT_THEME = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'themes', 'notmyidea') DEFAULT_CONFIG = { @@ -131,7 +132,7 @@ 'LOAD_CONTENT_CACHE': False, 'WRITE_SELECTED': [], 'FORMATTED_FIELDS': ['summary'], - } +} PYGMENTS_RST_OPTIONS = None @@ -158,8 +159,20 @@ def read_settings(path=None, override=None): "has been deprecated (should be a list)") local_settings['PLUGIN_PATHS'] = [local_settings['PLUGIN_PATHS']] elif local_settings['PLUGIN_PATHS'] is not None: - local_settings['PLUGIN_PATHS'] = [os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(path), pluginpath))) - if not isabs(pluginpath) else pluginpath for pluginpath in local_settings['PLUGIN_PATHS']] + def getabs(path, pluginpath): + if isabs(pluginpath): + return pluginpath + else: + path_dirname = os.path.dirname(path) + path_joined = os.path.join(path_dirname, pluginpath) + path_normed = os.path.normpath(path_joined) + path_absolute = os.path.abspath(path_normed) + return path_absolute + + pluginpath_list = [getabs(path, pluginpath) + for pluginpath + in local_settings['PLUGIN_PATHS']] + local_settings['PLUGIN_PATHS'] = pluginpath_list else: local_settings = copy.deepcopy(DEFAULT_CONFIG) @@ -199,13 +212,13 @@ def configure_settings(settings): settings. Also, specify the log messages to be ignored. """ - if not 'PATH' in settings or not os.path.isdir(settings['PATH']): + if 'PATH' not in settings or not os.path.isdir(settings['PATH']): raise Exception('You need to specify a path containing the content' ' (see pelican --help for more information)') # specify the log messages to be ignored - LimitFilter._ignore.update(set(settings.get('LOG_FILTER', - DEFAULT_CONFIG['LOG_FILTER']))) + log_filter = settings.get('LOG_FILTER', DEFAULT_CONFIG['LOG_FILTER']) + LimitFilter._ignore.update(set(log_filter)) # lookup the theme in "pelican/themes" if the given one doesn't exist if not os.path.isdir(settings['THEME']): @@ -223,19 +236,15 @@ def configure_settings(settings): settings['WRITE_SELECTED'] = [ os.path.abspath(path) for path in settings.get('WRITE_SELECTED', DEFAULT_CONFIG['WRITE_SELECTED']) - ] + ] # standardize strings to lowercase strings - for key in [ - 'DEFAULT_LANG', - ]: + for key in ['DEFAULT_LANG']: if key in settings: settings[key] = settings[key].lower() # standardize strings to lists - for key in [ - 'LOCALE', - ]: + for key in ['LOCALE']: if key in settings and isinstance(settings[key], six.string_types): settings[key] = [settings[key]] @@ -243,12 +252,13 @@ def configure_settings(settings): for key, types in [ ('OUTPUT_SOURCES_EXTENSION', six.string_types), ('FILENAME_METADATA', six.string_types), - ]: + ]: if key in settings and not isinstance(settings[key], types): value = settings.pop(key) - logger.warn('Detected misconfigured %s (%s), ' - 'falling back to the default (%s)', - key, value, DEFAULT_CONFIG[key]) + logger.warn( + 'Detected misconfigured %s (%s), ' + 'falling back to the default (%s)', + key, value, DEFAULT_CONFIG[key]) # try to set the different locales, fallback on the default. locales = settings.get('LOCALE', DEFAULT_CONFIG['LOCALE']) @@ -270,16 +280,16 @@ def configure_settings(settings): logger.warning("Removed extraneous trailing slash from SITEURL.") # If SITEURL is defined but FEED_DOMAIN isn't, # set FEED_DOMAIN to SITEURL - if not 'FEED_DOMAIN' in settings: + if 'FEED_DOMAIN' not in settings: settings['FEED_DOMAIN'] = settings['SITEURL'] # check content caching layer and warn of incompatibilities - if (settings.get('CACHE_CONTENT', False) and - settings.get('CONTENT_CACHING_LAYER', '') == 'generator' and - settings.get('WITH_FUTURE_DATES', DEFAULT_CONFIG['WITH_FUTURE_DATES'])): - logger.warning('WITH_FUTURE_DATES conflicts with ' - "CONTENT_CACHING_LAYER set to 'generator', " - "use 'reader' layer instead") + if settings.get('CACHE_CONTENT', False) and \ + settings.get('CONTENT_CACHING_LAYER', '') == 'generator' and \ + settings.get('WITH_FUTURE_DATES', False): + logger.warning( + "WITH_FUTURE_DATES conflicts with CONTENT_CACHING_LAYER " + "set to 'generator', use 'reader' layer instead") # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined feed_keys = [ @@ -296,7 +306,7 @@ def configure_settings(settings): logger.warning('Feeds generated without SITEURL set properly may' ' not be valid') - if not 'TIMEZONE' in settings: + if 'TIMEZONE' not in settings: logger.warning( 'No timezone information specified in the settings. Assuming' ' your timezone is UTC for feed generation. Check ' @@ -321,7 +331,8 @@ def configure_settings(settings): old_key = key + '_DIR' new_key = key + '_PATHS' if old_key in settings: - logger.warning('Deprecated setting %s, moving it to %s list', + logger.warning( + 'Deprecated setting %s, moving it to %s list', old_key, new_key) settings[new_key] = [settings[old_key]] # also make a list del settings[old_key] @@ -365,8 +376,9 @@ def configure_settings(settings): for old, new, doc in [ ('LESS_GENERATOR', 'the Webassets plugin', None), ('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA', - 'https://github.com/getpelican/pelican/blob/master/docs/settings.rst#path-metadata'), - ]: + 'https://github.com/getpelican/pelican/' + 'blob/master/docs/settings.rst#path-metadata'), + ]: if old in settings: message = 'The {} setting has been removed in favor of {}'.format( old, new) diff --git a/pelican/signals.py b/pelican/signals.py --- a/pelican/signals.py +++ b/pelican/signals.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function +from __future__ import print_function, unicode_literals + from blinker import signal # Run-level signals: diff --git a/pelican/tools/pelican_import.py b/pelican/tools/pelican_import.py --- a/pelican/tools/pelican_import.py +++ b/pelican/tools/pelican_import.py @@ -1,29 +1,30 @@ #!/usr/bin/env python - # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function +from __future__ import print_function, unicode_literals + import argparse -try: - from html import unescape # py3.4+ -except ImportError: - from six.moves.html_parser import HTMLParser - unescape = HTMLParser().unescape +import logging import os import re import subprocess import sys import time -import logging from codecs import open + from six.moves.urllib.error import URLError from six.moves.urllib.parse import urlparse from six.moves.urllib.request import urlretrieve -# pelican.log has to be the first pelican module to be loaded # because logging.setLoggerClass has to be called before logging.getLogger from pelican.log import init -from pelican.utils import slugify, SafeDatetime +from pelican.utils import SafeDatetime, slugify + +try: + from html import unescape # py3.4+ +except ImportError: + from six.moves.html_parser import HTMLParser + unescape = HTMLParser().unescape logger = logging.getLogger(__name__) @@ -70,12 +71,19 @@ def decode_wp_content(content, br=True): content = "" for p in pgraphs: content = content + "<p>" + p.strip() + "</p>\n" - # under certain strange conditions it could create a P of entirely whitespace + # under certain strange conditions it could create + # a P of entirely whitespace content = re.sub(r'<p>\s*</p>', '', content) - content = re.sub(r'<p>([^<]+)</(div|address|form)>', "<p>\\1</p></\\2>", content) + content = re.sub( + r'<p>([^<]+)</(div|address|form)>', + "<p>\\1</p></\\2>", + content) # don't wrap tags - content = re.sub(r'<p>\s*(</?' + allblocks + r'[^>]*>)\s*</p>', "\\1", content) - #problem with nested lists + content = re.sub( + r'<p>\s*(</?' + allblocks + r'[^>]*>)\s*</p>', + "\\1", + content) + # problem with nested lists content = re.sub(r'<p>(<li.*)</p>', "\\1", content) content = re.sub(r'<p><blockquote([^>]*)>', "<blockquote\\1><p>", content) content = content.replace('</blockquote></p>', '</p></blockquote>') @@ -84,12 +92,20 @@ def decode_wp_content(content, br=True): if br: def _preserve_newline(match): return match.group(0).replace("\n", "<WPPreserveNewline />") - content = re.sub(r'/<(script|style).*?<\/\\1>/s', _preserve_newline, content) + content = re.sub( + r'/<(script|style).*?<\/\\1>/s', + _preserve_newline, + content) # optionally make line breaks content = re.sub(r'(?<!<br />)\s*\n', "<br />\n", content) content = content.replace("<WPPreserveNewline />", "\n") - content = re.sub(r'(</?' + allblocks + r'[^>]*>)\s*<br />', "\\1", content) - content = re.sub(r'<br />(\s*</?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)', '\\1', content) + content = re.sub( + r'(</?' + allblocks + r'[^>]*>)\s*<br />', "\\1", + content) + content = re.sub( + r'<br />(\s*</?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)', + '\\1', + content) content = re.sub(r'\n</p>', "</p>", content) if pre_tags: @@ -100,13 +116,14 @@ def _multi_replace(dic, string): return content + def get_items(xml): """Opens a WordPress xml file and returns a list of items""" try: from bs4 import BeautifulSoup except ImportError: - error = ('Missing dependency ' - '"BeautifulSoup4" and "lxml" required to import WordPress XML files.') + error = ('Missing dependency "BeautifulSoup4" and "lxml" required to ' + 'import WordPress XML files.') sys.exit(error) with open(xml, encoding='utf-8') as infile: xmlfile = infile.read() @@ -114,12 +131,14 @@ def get_items(xml): items = soup.rss.channel.findAll('item') return items + def get_filename(filename, post_id): if filename is not None: return filename else: return post_id + def wp2fields(xml, wp_custpost=False): """Opens a wordpress XML file, and yield Pelican fields""" @@ -141,16 +160,18 @@ def wp2fields(xml, wp_custpost=False): content = item.find('encoded').string raw_date = item.find('post_date').string - date_object = time.strptime(raw_date, "%Y-%m-%d %H:%M:%S") - date = time.strftime("%Y-%m-%d %H:%M", date_object) + date_object = time.strptime(raw_date, '%Y-%m-%d %H:%M:%S') + date = time.strftime('%Y-%m-%d %H:%M', date_object) author = item.find('creator').string - categories = [cat.string for cat in item.findAll('category', {'domain' : 'category'})] - # caturl = [cat['nicename'] for cat in item.find(domain='category')] + categories = [cat.string for cat + in item.findAll('category', {'domain': 'category'})] - tags = [tag.string for tag in item.findAll('category', {'domain' : 'post_tag'})] + tags = [tag.string for tag + in item.findAll('category', {'domain': 'post_tag'})] # To publish a post the status should be 'published' - status = 'published' if item.find('status').string == "publish" else item.find('status').string + status = 'published' if item.find('status').string == "publish" \ + else item.find('status').string kind = 'article' post_type = item.find('post_type').string @@ -159,16 +180,17 @@ def wp2fields(xml, wp_custpost=False): elif wp_custpost: if post_type == 'post': pass - # Old behaviour was to name everything not a page as an article. - # Theoretically all attachments have status == inherit so - # no attachments should be here. But this statement is to + # Old behaviour was to name everything not a page as an + # article.Theoretically all attachments have status == inherit + # so no attachments should be here. But this statement is to # maintain existing behaviour in case that doesn't hold true. elif post_type == 'attachment': pass else: kind = post_type - yield (title, content, filename, date, author, categories, tags, status, - kind, "wp-html") + yield (title, content, filename, date, author, categories, + tags, status, kind, 'wp-html') + def dc2fields(file): """Opens a Dotclear export file, and yield pelican fields""" @@ -176,10 +198,10 @@ def dc2fields(file): from bs4 import BeautifulSoup except ImportError: error = ('Missing dependency ' - '"BeautifulSoup4" and "lxml" required to import Dotclear files.') + '"BeautifulSoup4" and "lxml" required ' + 'to import Dotclear files.') sys.exit(error) - in_cat = False in_post = False category_list = {} @@ -203,7 +225,7 @@ def dc2fields(file): # remove 1st and last "" fields[0] = fields[0][1:] # fields[-1] = fields[-1][:-1] - category_list[fields[0]]=fields[2] + category_list[fields[0]] = fields[2] elif in_post: if not line: in_post = False @@ -249,45 +271,50 @@ def dc2fields(file): # remove seconds post_creadt = ':'.join(post_creadt.split(':')[0:2]) - author = "" + author = '' categories = [] tags = [] if cat_id: - categories = [category_list[id].strip() for id in cat_id.split(',')] + categories = [category_list[id].strip() for id + in cat_id.split(',')] # Get tags related to a post - tag = post_meta.replace('{', '').replace('}', '').replace('a:1:s:3:\\"tag\\";a:', '').replace('a:0:', '') + tag = (post_meta.replace('{', '') + .replace('}', '') + .replace('a:1:s:3:\\"tag\\";a:', '') + .replace('a:0:', '')) if len(tag) > 1: if int(tag[:1]) == 1: newtag = tag.split('"')[1] tags.append( BeautifulSoup( - newtag - , "xml" + newtag, + 'xml' ) # bs4 always outputs UTF-8 .decode('utf-8') ) else: - i=1 - j=1 + i = 1 + j = 1 while(i <= int(tag[:1])): - newtag = tag.split('"')[j].replace('\\','') + newtag = tag.split('"')[j].replace('\\', '') tags.append( BeautifulSoup( - newtag - , "xml" + newtag, + 'xml' ) # bs4 always outputs UTF-8 .decode('utf-8') ) - i=i+1 - if j < int(tag[:1])*2: - j=j+2 + i = i + 1 + if j < int(tag[:1]) * 2: + j = j + 2 """ - dotclear2 does not use markdown by default unless you use the markdown plugin + dotclear2 does not use markdown by default unless + you use the markdown plugin Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown """ if post_format == "markdown": @@ -322,12 +349,13 @@ def posterous2fields(api_token, email, password): # py2 import import urllib2 as urllib_request - - def get_posterous_posts(api_token, email, password, page = 1): - base64string = base64.encodestring(("%s:%s" % (email, password)).encode('utf-8')).replace(b'\n', b'') - url = "http://posterous.com/api/v2/users/me/sites/primary/posts?api_token=%s&page=%d" % (api_token, page) + def get_posterous_posts(api_token, email, password, page=1): + base64string = base64.encodestring( + ("%s:%s" % (email, password)).encode('utf-8')).replace('\n', '') + url = ("http://posterous.com/api/v2/users/me/sites/primary/" + "posts?api_token=%s&page=%d") % (api_token, page) request = urllib_request.Request(url) - request.add_header("Authorization", "Basic %s" % base64string.decode()) + request.add_header('Authorization', 'Basic %s' % base64string.decode()) handle = urllib_request.urlopen(request) posts = json.loads(handle.read().decode('utf-8')) return posts @@ -344,16 +372,18 @@ def get_posterous_posts(api_token, email, password, page = 1): slug = slugify(post.get('title')) tags = [tag.get('name') for tag in post.get('tags')] raw_date = post.get('display_date') - date_object = SafeDatetime.strptime(raw_date[:-6], "%Y/%m/%d %H:%M:%S") + date_object = SafeDatetime.strptime( + raw_date[:-6], '%Y/%m/%d %H:%M:%S') offset = int(raw_date[-5:]) - delta = timedelta(hours = offset / 100) + delta = timedelta(hours=(offset / 100)) date_object -= delta - date = date_object.strftime("%Y-%m-%d %H:%M") - kind = 'article' # TODO: Recognise pages + date = date_object.strftime('%Y-%m-%d %H:%M') + kind = 'article' # TODO: Recognise pages status = 'published' # TODO: Find a way for draft posts - yield (post.get('title'), post.get('body_cleaned'), slug, date, - post.get('user').get('display_name'), [], tags, status, kind, "html") + yield (post.get('title'), post.get('body_cleaned'), + slug, date, post.get('user').get('display_name'), + [], tags, status, kind, 'html') def tumblr2fields(api_key, blogname): @@ -374,7 +404,9 @@ def tumblr2fields(api_key, blogname): import urllib2 as urllib_request def get_tumblr_posts(api_key, blogname, offset=0): - url = "http://api.tumblr.com/v2/blog/%s.tumblr.com/posts?api_key=%s&offset=%d&filter=raw" % (blogname, api_key, offset) + url = ("http://api.tumblr.com/v2/blog/%s.tumblr.com/" + "posts?api_key=%s&offset=%d&filter=raw") % ( + blogname, api_key, offset) request = urllib_request.Request(url) handle = urllib_request.urlopen(request) posts = json.loads(handle.read().decode('utf-8')) @@ -384,7 +416,10 @@ def get_tumblr_posts(api_key, blogname, offset=0): posts = get_tumblr_posts(api_key, blogname, offset) while len(posts) > 0: for post in posts: - title = post.get('title') or post.get('source_title') or post.get('type').capitalize() + title = \ + post.get('title') or \ + post.get('source_title') or \ + post.get('type').capitalize() slug = post.get('slug') or slugify(title) tags = post.get('tags') timestamp = post.get('timestamp') @@ -398,7 +433,11 @@ def get_tumblr_posts(api_key, blogname, offset=0): fmtstr = '![%s](%s)' else: fmtstr = '<img alt="%s" src="%s" />' - content = '\n'.join(fmtstr % (photo.get('caption'), photo.get('original_size').get('url')) for photo in post.get('photos')) + content = '' + for photo in post.get('photos'): + content += '\n'.join( + fmtstr % (photo.get('caption'), + photo.get('original_size').get('url'))) content += '\n\n' + post.get('caption') elif type == 'quote': if format == 'markdown': @@ -417,16 +456,29 @@ def get_tumblr_posts(api_key, blogname, offset=0): fmtstr = '[via](%s)\n\n' else: fmtstr = '<p><a href="%s">via</a></p>\n' - content = fmtstr % post.get('source_url') + post.get('caption') + post.get('player') + content = fmtstr % post.get('source_url') + \ + post.get('caption') + \ + post.get('player') elif type == 'video': if format == 'markdown': fmtstr = '[via](%s)\n\n' else: fmtstr = '<p><a href="%s">via</a></p>\n' - content = fmtstr % post.get('source_url') + post.get('caption') + '\n'.join(player.get('embed_code') for player in post.get('player')) + source = fmtstr % post.get('source_url') + caption = post.get('caption') + players = '\n'.join(player.get('embed_code') + for player in post.get('player')) + content = source + caption + players elif type == 'answer': title = post.get('question') - content = '<p><a href="%s" rel="external nofollow">%s</a>: %s</p>\n%s' % (post.get('asking_name'), post.get('asking_url'), post.get('question'), post.get('answer')) + content = ('<p>' + '<a href="%s" rel="external nofollow">%s</a>' + ': %s' + '</p>\n' + ' %s' % (post.get('asking_name'), + post.get('asking_url'), + post.get('question'), + post.get('answer'))) content = content.rstrip() + '\n' kind = 'article' @@ -438,25 +490,30 @@ def get_tumblr_posts(api_key, blogname, offset=0): offset += len(posts) posts = get_tumblr_posts(api_key, blogname, offset) + def feed2fields(file): """Read a feed and yield pelican fields""" import feedparser d = feedparser.parse(file) for entry in d.entries: - date = (time.strftime("%Y-%m-%d %H:%M", entry.updated_parsed) - if hasattr(entry, "updated_parsed") else None) - author = entry.author if hasattr(entry, "author") else None - tags = [e['term'] for e in entry.tags] if hasattr(entry, "tags") else None + date = (time.strftime('%Y-%m-%d %H:%M', entry.updated_parsed) + if hasattr(entry, 'updated_parsed') else None) + author = entry.author if hasattr(entry, 'author') else None + tags = ([e['term'] for e in entry.tags] + if hasattr(entry, 'tags') else None) slug = slugify(entry.title) kind = 'article' - yield (entry.title, entry.description, slug, date, author, [], tags, None, - kind, "html") + yield (entry.title, entry.description, slug, date, + author, [], tags, None, kind, 'html') -def build_header(title, date, author, categories, tags, slug, status=None, attachments=None): - from docutils.utils import column_width +def build_header(title, date, author, categories, tags, slug, + status=None, attachments=None): """Build a header from a list of fields""" + + from docutils.utils import column_width + header = '%s\n%s\n' % (title, '#' * column_width(title)) if date: header += ':date: %s\n' % date @@ -475,8 +532,9 @@ def build_header(title, date, author, categories, tags, slug, status=None, attac header += '\n' return header -def build_markdown_header(title, date, author, categories, tags, slug, status=None, - attachments=None): + +def build_markdown_header(title, date, author, categories, tags, + slug, status=None, attachments=None): """Build a header from a list of fields""" header = 'Title: %s\n' % title if date: @@ -496,6 +554,7 @@ def build_markdown_header(title, date, author, categories, tags, slug, status=No header += '\n' return header + def get_ext(out_markup, in_markup='html'): if in_markup == 'markdown' or out_markup == 'markdown': ext = '.md' @@ -503,26 +562,27 @@ def get_ext(out_markup, in_markup='html'): ext = '.rst' return ext + def get_out_filename(output_path, filename, ext, kind, - dirpage, dircat, categories, wp_custpost): + dirpage, dircat, categories, wp_custpost): filename = os.path.basename(filename) # Enforce filename restrictions for various filesystems at once; see # http://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words # we do not need to filter words because an extension will be appended - filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename) # invalid chars - filename = filename.lstrip('.') # should not start with a dot + filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename) # invalid chars + filename = filename.lstrip('.') # should not start with a dot if not filename: filename = '_' - filename = filename[:249] # allow for 5 extra characters + filename = filename[:249] # allow for 5 extra characters - out_filename = os.path.join(output_path, filename+ext) + out_filename = os.path.join(output_path, filename + ext) # option to put page posts in pages/ subdirectory if dirpage and kind == 'page': pages_dir = os.path.join(output_path, 'pages') if not os.path.isdir(pages_dir): os.mkdir(pages_dir) - out_filename = os.path.join(pages_dir, filename+ext) + out_filename = os.path.join(pages_dir, filename + ext) elif not dirpage and kind == 'page': pass # option to put wp custom post types in directories with post type @@ -539,18 +599,19 @@ def get_out_filename(output_path, filename, ext, kind, else: catname = '' out_filename = os.path.join(output_path, typename, - catname, filename+ext) + catname, filename + ext) if not os.path.isdir(os.path.join(output_path, typename, catname)): os.makedirs(os.path.join(output_path, typename, catname)) # option to put files in directories with categories names elif dircat and (len(categories) > 0): catname = slugify(categories[0]) - out_filename = os.path.join(output_path, catname, filename+ext) + out_filename = os.path.join(output_path, catname, filename + ext) if not os.path.isdir(os.path.join(output_path, catname)): os.mkdir(os.path.join(output_path, catname)) return out_filename + def get_attachments(xml): """returns a dictionary of posts that have attachments with a list of the attachment_urls @@ -566,7 +627,7 @@ def get_attachments(xml): if kind == 'attachment': attachments.append((item.find('post_parent').string, - item.find('attachment_url').string)) + item.find('attachment_url').string)) else: filename = get_filename(filename, post_id) names[post_id] = filename @@ -575,7 +636,7 @@ def get_attachments(xml): try: parent_name = names[parent] except KeyError: - #attachment's parent is not a valid post + # attachment's parent is not a valid post parent_name = None try: @@ -585,6 +646,7 @@ def get_attachments(xml): attachedposts[parent_name].append(url) return attachedposts + def download_attachments(output_path, urls): """Downloads WordPress attachments and returns a list of paths to attachments that can be associated with a post (relative path to output @@ -592,8 +654,8 @@ def download_attachments(output_path, urls): locations = [] for url in urls: path = urlparse(url).path - #teardown path and rebuild to negate any errors with - #os.path.join and leading /'s + # teardown path and rebuild to negate any errors with + # os.path.join and leading /'s path = path.split('/') filename = path.pop(-1) localpath = '' @@ -608,12 +670,13 @@ def download_attachments(output_path, urls): urlretrieve(url, os.path.join(full_path, filename)) locations.append(os.path.join(localpath, filename)) except (URLError, IOError) as e: - #Python 2.7 throws an IOError rather Than URLError + # Python 2.7 throws an IOError rather Than URLError logger.warning("No file could be downloaded from %s\n%s", url, e) return locations -def fields2pelican(fields, out_markup, output_path, +def fields2pelican( + fields, out_markup, output_path, dircat=False, strip_raw=False, disable_slugs=False, dirpage=False, filename_template=None, filter_author=None, wp_custpost=False, wp_attach=False, attachments=None): @@ -634,24 +697,26 @@ def fields2pelican(fields, out_markup, output_path, ext = get_ext(out_markup, in_markup) if ext == '.md': - header = build_markdown_header(title, date, author, categories, - tags, slug, status, attached_files) + header = build_markdown_header( + title, date, author, categories, tags, slug, + status, attached_files) else: - out_markup = "rst" + out_markup = 'rst' header = build_header(title, date, author, categories, - tags, slug, status, attached_files) + tags, slug, status, attached_files) - out_filename = get_out_filename(output_path, filename, ext, - kind, dirpage, dircat, categories, wp_custpost) + out_filename = get_out_filename( + output_path, filename, ext, kind, dirpage, dircat, + categories, wp_custpost) print(out_filename) - if in_markup in ("html", "wp-html"): - html_filename = os.path.join(output_path, filename+'.html') + if in_markup in ('html', 'wp-html'): + html_filename = os.path.join(output_path, filename + '.html') with open(html_filename, 'w', encoding='utf-8') as fp: # Replace newlines with paragraphs wrapped with <p> so # HTML is valid before conversion - if in_markup == "wp-html": + if in_markup == 'wp-html': new_content = decode_wp_content(content) else: paragraphs = content.splitlines() @@ -660,79 +725,95 @@ def fields2pelican(fields, out_markup, output_path, fp.write(new_content) - parse_raw = '--parse-raw' if not strip_raw else '' cmd = ('pandoc --normalize {0} --from=html' - ' --to={1} -o "{2}" "{3}"').format( - parse_raw, out_markup, out_filename, html_filename) + ' --to={1} -o "{2}" "{3}"') + cmd = cmd.format(parse_raw, out_markup, + out_filename, html_filename) try: rc = subprocess.call(cmd, shell=True) if rc < 0: - error = "Child was terminated by signal %d" % -rc + error = 'Child was terminated by signal %d' % -rc exit(error) elif rc > 0: - error = "Please, check your Pandoc installation." + error = 'Please, check your Pandoc installation.' exit(error) except OSError as e: - error = "Pandoc execution failed: %s" % e + error = 'Pandoc execution failed: %s' % e exit(error) os.remove(html_filename) with open(out_filename, 'r', encoding='utf-8') as fs: content = fs.read() - if out_markup == "markdown": - # In markdown, to insert a <br />, end a line with two or more spaces & then a end-of-line - content = content.replace("\\\n ", " \n") - content = content.replace("\\\n", " \n") + if out_markup == 'markdown': + # In markdown, to insert a <br />, end a line with two + # or more spaces & then a end-of-line + content = content.replace('\\\n ', ' \n') + content = content.replace('\\\n', ' \n') with open(out_filename, 'w', encoding='utf-8') as fs: fs.write(header + content) if wp_attach and attachments and None in attachments: print("downloading attachments that don't have a parent post") urls = attachments[None] - orphan_galleries = download_attachments(output_path, urls) + download_attachments(output_path, urls) + def main(): parser = argparse.ArgumentParser( - description="Transform feed, WordPress, Tumblr, Dotclear, or Posterous " - "files into reST (rst) or Markdown (md) files. Be sure to " - "have pandoc installed.", + description="Transform feed, WordPress, Tumblr, Dotclear, or " + "Posterous files into reST (rst) or Markdown (md) files. " + "Be sure to have pandoc installed.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument(dest='input', help='The input file to read') - parser.add_argument('--wpfile', action='store_true', dest='wpfile', + parser.add_argument( + dest='input', help='The input file to read') + parser.add_argument( + '--wpfile', action='store_true', dest='wpfile', help='Wordpress XML export') - parser.add_argument('--dotclear', action='store_true', dest='dotclear', + parser.add_argument( + '--dotclear', action='store_true', dest='dotclear', help='Dotclear export') - parser.add_argument('--posterous', action='store_true', dest='posterous', + parser.add_argument( + '--posterous', action='store_true', dest='posterous', help='Posterous export') - parser.add_argument('--tumblr', action='store_true', dest='tumblr', + parser.add_argument( + '--tumblr', action='store_true', dest='tumblr', help='Tumblr export') - parser.add_argument('--feed', action='store_true', dest='feed', + parser.add_argument( + '--feed', action='store_true', dest='feed', help='Feed to parse') - parser.add_argument('-o', '--output', dest='output', default='output', + parser.add_argument( + '-o', '--output', dest='output', default='output', help='Output path') - parser.add_argument('-m', '--markup', dest='markup', default='rst', + parser.add_argument( + '-m', '--markup', dest='markup', default='rst', help='Output markup format (supports rst & markdown)') - parser.add_argument('--dir-cat', action='store_true', dest='dircat', + parser.add_argument( + '--dir-cat', action='store_true', dest='dircat', help='Put files in directories with categories name') - parser.add_argument('--dir-page', action='store_true', dest='dirpage', + parser.add_argument( + '--dir-page', action='store_true', dest='dirpage', help=('Put files recognised as pages in "pages/" sub-directory' ' (wordpress import only)')) - parser.add_argument('--filter-author', dest='author', + parser.add_argument( + '--filter-author', dest='author', help='Import only post from the specified author') - parser.add_argument('--strip-raw', action='store_true', dest='strip_raw', + parser.add_argument( + '--strip-raw', action='store_true', dest='strip_raw', help="Strip raw HTML code that can't be converted to " "markup such as flash embeds or iframes (wordpress import only)") - parser.add_argument('--wp-custpost', action='store_true', + parser.add_argument( + '--wp-custpost', action='store_true', dest='wp_custpost', help='Put wordpress custom post types in directories. If used with ' '--dir-cat option directories will be created as ' '/post_type/category/ (wordpress import only)') - parser.add_argument('--wp-attach', action='store_true', dest='wp_attach', + parser.add_argument( + '--wp-attach', action='store_true', dest='wp_attach', help='(wordpress import only) Download files uploaded to wordpress as ' 'attachments. Files will be added to posts as a list in the post ' 'header. All files will be downloaded, even if ' @@ -740,16 +821,20 @@ def main(): 'with their original path inside the output directory. ' 'e.g. output/wp-uploads/date/postname/file.jpg ' '-- Requires an internet connection --') - parser.add_argument('--disable-slugs', action='store_true', + parser.add_argument( + '--disable-slugs', action='store_true', dest='disable_slugs', help='Disable storing slugs from imported posts within output. ' 'With this disabled, your Pelican URLs may not be consistent ' 'with your original posts.') - parser.add_argument('-e', '--email', dest='email', + parser.add_argument( + '-e', '--email', dest='email', help="Email address (posterous import only)") - parser.add_argument('-p', '--password', dest='password', + parser.add_argument( + '-p', '--password', dest='password', help="Password (posterous import only)") - parser.add_argument('-b', '--blogname', dest='blogname', + parser.add_argument( + '-b', '--blogname', dest='blogname', help="Blog name (Tumblr import only)") args = parser.parse_args() @@ -766,18 +851,20 @@ def main(): elif args.feed: input_type = 'feed' else: - error = "You must provide either --wpfile, --dotclear, --posterous, --tumblr or --feed options" + error = ('You must provide either --wpfile, --dotclear, ' + '--posterous, --tumblr or --feed options') exit(error) if not os.path.exists(args.output): try: os.mkdir(args.output) except OSError: - error = "Unable to create the output folder: " + args.output + error = 'Unable to create the output folder: ' + args.output exit(error) if args.wp_attach and input_type != 'wordpress': - error = "You must be importing a wordpress xml to use the --wp-attach option" + error = ('You must be importing a wordpress xml ' + 'to use the --wp-attach option') exit(error) if input_type == 'wordpress': @@ -796,14 +883,14 @@ def main(): else: attachments = None - init() # init logging - + # init logging + init() fields2pelican(fields, args.markup, args.output, dircat=args.dircat or False, dirpage=args.dirpage or False, strip_raw=args.strip_raw or False, disable_slugs=args.disable_slugs or False, filter_author=args.author, - wp_custpost = args.wp_custpost or False, - wp_attach = args.wp_attach or False, - attachments = attachments or None) + wp_custpost=args.wp_custpost or False, + wp_attach=args.wp_attach or False, + attachments=attachments or None) diff --git a/pelican/tools/pelican_quickstart.py b/pelican/tools/pelican_quickstart.py --- a/pelican/tools/pelican_quickstart.py +++ b/pelican/tools/pelican_quickstart.py @@ -1,18 +1,20 @@ #!/usr/bin/env python - # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function -import six +from __future__ import print_function, unicode_literals +import argparse +import codecs import os import string -import argparse import sys -import codecs + import pytz +import six + from pelican import __version__ + _TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates") @@ -44,9 +46,10 @@ 'timezone': 'Europe/Paris' } -#url for list of valid timezones +# url for list of valid timezones _TZ_URL = 'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones' + def _input_compat(prompt): if six.PY3: r = input(prompt) @@ -59,6 +62,7 @@ def _input_compat(prompt): else: str_compat = unicode + # Create a 'marked' default path, to determine if someone has supplied # a path on the command-line. class _DEFAULT_PATH_TYPE(str_compat): @@ -66,6 +70,7 @@ class _DEFAULT_PATH_TYPE(str_compat): _DEFAULT_PATH = _DEFAULT_PATH_TYPE(os.curdir) + def decoding_strings(f): def wrapper(*args, **kwargs): out = f(*args, **kwargs) @@ -164,7 +169,8 @@ def ask(question, answer=str_compat, default=None, l=None): print('You must enter an integer') return r else: - raise NotImplemented('Argument `answer` must be str_compat, bool, or integer') + raise NotImplemented( + 'Argument `answer` must be str_compat, bool, or integer') def ask_timezone(question, default, tzurl): @@ -177,7 +183,8 @@ def ask_timezone(question, default, tzurl): r = pytz.all_timezones[lower_tz.index(r)] break else: - print('Please enter a valid time zone:\n (check [{0}])'.format(tzurl)) + print('Please enter a valid time zone:\n' + ' (check [{0}])'.format(tzurl)) return r @@ -186,13 +193,13 @@ def main(): description="A kickstarter for Pelican", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-p', '--path', default=_DEFAULT_PATH, - help="The path to generate the blog into") + help="The path to generate the blog into") parser.add_argument('-t', '--title', metavar="title", - help='Set the title of the website') + help='Set the title of the website') parser.add_argument('-a', '--author', metavar="author", - help='Set the author name of the website') + help='Set the author name of the website') parser.add_argument('-l', '--lang', metavar="lang", - help='Set the default web site language') + help='Set the default web site language') args = parser.parse_args() @@ -214,50 +221,94 @@ def main(): 'Will save to:\n%s\n' % CONF['basedir']) else: CONF['basedir'] = os.path.abspath(os.path.expanduser( - ask('Where do you want to create your new web site?', answer=str_compat, default=args.path))) + ask('Where do you want to create your new web site?', + answer=str_compat, default=args.path))) - CONF['sitename'] = ask('What will be the title of this web site?', answer=str_compat, default=args.title) - CONF['author'] = ask('Who will be the author of this web site?', answer=str_compat, default=args.author) - CONF['lang'] = ask('What will be the default language of this web site?', str_compat, args.lang or CONF['lang'], 2) + CONF['sitename'] = ask('What will be the title of this web site?', + answer=str_compat, default=args.title) + CONF['author'] = ask('Who will be the author of this web site?', + answer=str_compat, default=args.author) + CONF['lang'] = ask('What will be the default language of this web site?', + str_compat, args.lang or CONF['lang'], 2) - if ask('Do you want to specify a URL prefix? e.g., http://example.com ', answer=bool, default=True): - CONF['siteurl'] = ask('What is your URL prefix? (see above example; no trailing slash)', str_compat, CONF['siteurl']) + if ask('Do you want to specify a URL prefix? e.g., http://example.com ', + answer=bool, default=True): + CONF['siteurl'] = ask('What is your URL prefix? (see ' + 'above example; no trailing slash)', + str_compat, CONF['siteurl']) - CONF['with_pagination'] = ask('Do you want to enable article pagination?', bool, bool(CONF['default_pagination'])) + CONF['with_pagination'] = ask('Do you want to enable article pagination?', + bool, bool(CONF['default_pagination'])) if CONF['with_pagination']: - CONF['default_pagination'] = ask('How many articles per page do you want?', int, CONF['default_pagination']) + CONF['default_pagination'] = ask('How many articles per page ' + 'do you want?', + int, CONF['default_pagination']) else: CONF['default_pagination'] = False - CONF['timezone'] = ask_timezone('What is your time zone?', CONF['timezone'], _TZ_URL) + CONF['timezone'] = ask_timezone('What is your time zone?', + CONF['timezone'], _TZ_URL) - automation = ask('Do you want to generate a Fabfile/Makefile to automate generation and publishing?', bool, True) - develop = ask('Do you want an auto-reload & simpleHTTP script to assist with theme and site development?', bool, True) + automation = ask('Do you want to generate a Fabfile/Makefile ' + 'to automate generation and publishing?', bool, True) + develop = ask('Do you want an auto-reload & simpleHTTP script ' + 'to assist with theme and site development?', bool, True) if automation: - if ask('Do you want to upload your website using FTP?', answer=bool, default=False): - CONF['ftp_host'] = ask('What is the hostname of your FTP server?', str_compat, CONF['ftp_host']) - CONF['ftp_user'] = ask('What is your username on that server?', str_compat, CONF['ftp_user']) - CONF['ftp_target_dir'] = ask('Where do you want to put your web site on that server?', str_compat, CONF['ftp_target_dir']) - if ask('Do you want to upload your website using SSH?', answer=bool, default=False): - CONF['ssh_host'] = ask('What is the hostname of your SSH server?', str_compat, CONF['ssh_host']) - CONF['ssh_port'] = ask('What is the port of your SSH server?', int, CONF['ssh_port']) - CONF['ssh_user'] = ask('What is your username on that server?', str_compat, CONF['ssh_user']) - CONF['ssh_target_dir'] = ask('Where do you want to put your web site on that server?', str_compat, CONF['ssh_target_dir']) - if ask('Do you want to upload your website using Dropbox?', answer=bool, default=False): - CONF['dropbox_dir'] = ask('Where is your Dropbox directory?', str_compat, CONF['dropbox_dir']) - if ask('Do you want to upload your website using S3?', answer=bool, default=False): - CONF['s3_bucket'] = ask('What is the name of your S3 bucket?', str_compat, CONF['s3_bucket']) - if ask('Do you want to upload your website using Rackspace Cloud Files?', answer=bool, default=False): - CONF['cloudfiles_username'] = ask('What is your Rackspace Cloud username?', str_compat, CONF['cloudfiles_username']) - CONF['cloudfiles_api_key'] = ask('What is your Rackspace Cloud API key?', str_compat, CONF['cloudfiles_api_key']) - CONF['cloudfiles_container'] = ask('What is the name of your Cloud Files container?', str_compat, CONF['cloudfiles_container']) - if ask('Do you want to upload your website using GitHub Pages?', answer=bool, default=False): - if ask('Is this your personal page (username.github.io)?', answer=bool, default=False): - CONF['github_pages_branch'] = _GITHUB_PAGES_BRANCHES['personal'] + if ask('Do you want to upload your website using FTP?', + answer=bool, default=False): + CONF['ftp_host'] = ask('What is the hostname of your FTP server?', + str_compat, CONF['ftp_host']) + CONF['ftp_user'] = ask('What is your username on that server?', + str_compat, CONF['ftp_user']) + CONF['ftp_target_dir'] = ask('Where do you want to put your ' + 'web site on that server?', + str_compat, CONF['ftp_target_dir']) + if ask('Do you want to upload your website using SSH?', + answer=bool, default=False): + CONF['ssh_host'] = ask('What is the hostname of your SSH server?', + str_compat, CONF['ssh_host']) + CONF['ssh_port'] = ask('What is the port of your SSH server?', + int, CONF['ssh_port']) + CONF['ssh_user'] = ask('What is your username on that server?', + str_compat, CONF['ssh_user']) + CONF['ssh_target_dir'] = ask('Where do you want to put your ' + 'web site on that server?', + str_compat, CONF['ssh_target_dir']) + + if ask('Do you want to upload your website using Dropbox?', + answer=bool, default=False): + CONF['dropbox_dir'] = ask('Where is your Dropbox directory?', + str_compat, CONF['dropbox_dir']) + + if ask('Do you want to upload your website using S3?', + answer=bool, default=False): + CONF['s3_bucket'] = ask('What is the name of your S3 bucket?', + str_compat, CONF['s3_bucket']) + + if ask('Do you want to upload your website using ' + 'Rackspace Cloud Files?', answer=bool, default=False): + CONF['cloudfiles_username'] = ask('What is your Rackspace ' + 'Cloud username?', str_compat, + CONF['cloudfiles_username']) + CONF['cloudfiles_api_key'] = ask('What is your Rackspace ' + 'Cloud API key?', str_compat, + CONF['cloudfiles_api_key']) + CONF['cloudfiles_container'] = ask('What is the name of your ' + 'Cloud Files container?', + str_compat, + CONF['cloudfiles_container']) + + if ask('Do you want to upload your website using GitHub Pages?', + answer=bool, default=False): + if ask('Is this your personal page (username.github.io)?', + answer=bool, default=False): + CONF['github_pages_branch'] = \ + _GITHUB_PAGES_BRANCHES['personal'] else: - CONF['github_pages_branch'] = _GITHUB_PAGES_BRANCHES['project'] + CONF['github_pages_branch'] = \ + _GITHUB_PAGES_BRANCHES['project'] try: os.makedirs(os.path.join(CONF['basedir'], 'content')) @@ -270,7 +321,8 @@ def main(): print('Error: {0}'.format(e)) try: - with codecs.open(os.path.join(CONF['basedir'], 'pelicanconf.py'), 'w', 'utf-8') as fd: + with codecs.open(os.path.join(CONF['basedir'], 'pelicanconf.py'), + 'w', 'utf-8') as fd: conf_python = dict() for key, value in CONF.items(): conf_python[key] = repr(value) @@ -283,7 +335,8 @@ def main(): print('Error: {0}'.format(e)) try: - with codecs.open(os.path.join(CONF['basedir'], 'publishconf.py'), 'w', 'utf-8') as fd: + with codecs.open(os.path.join(CONF['basedir'], 'publishconf.py'), + 'w', 'utf-8') as fd: for line in get_template('publishconf.py'): template = string.Template(line) fd.write(template.safe_substitute(CONF)) @@ -293,7 +346,8 @@ def main(): if automation: try: - with codecs.open(os.path.join(CONF['basedir'], 'fabfile.py'), 'w', 'utf-8') as fd: + with codecs.open(os.path.join(CONF['basedir'], 'fabfile.py'), + 'w', 'utf-8') as fd: for line in get_template('fabfile.py'): template = string.Template(line) fd.write(template.safe_substitute(CONF)) @@ -301,7 +355,8 @@ def main(): except OSError as e: print('Error: {0}'.format(e)) try: - with codecs.open(os.path.join(CONF['basedir'], 'Makefile'), 'w', 'utf-8') as fd: + with codecs.open(os.path.join(CONF['basedir'], 'Makefile'), + 'w', 'utf-8') as fd: mkfile_template_name = 'Makefile' py_v = 'PY?=python' if six.PY3: @@ -323,7 +378,9 @@ def main(): value = '"' + value.replace('"', '\\"') + '"' conf_shell[key] = value try: - with codecs.open(os.path.join(CONF['basedir'], 'develop_server.sh'), 'w', 'utf-8') as fd: + with codecs.open(os.path.join(CONF['basedir'], + 'develop_server.sh'), + 'w', 'utf-8') as fd: lines = list(get_template('develop_server.sh')) py_v = 'PY=${PY:-python}\n' if six.PY3: @@ -333,7 +390,10 @@ def main(): template = string.Template(line) fd.write(template.safe_substitute(conf_shell)) fd.close() - os.chmod((os.path.join(CONF['basedir'], 'develop_server.sh')), 493) # mode 0o755 + + # mode 0o755 + os.chmod((os.path.join(CONF['basedir'], + 'develop_server.sh')), 493) except OSError as e: print('Error: {0}'.format(e)) diff --git a/pelican/tools/pelican_themes.py b/pelican/tools/pelican_themes.py --- a/pelican/tools/pelican_themes.py +++ b/pelican/tools/pelican_themes.py @@ -1,26 +1,31 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function - -import six +from __future__ import print_function, unicode_literals import argparse import os import shutil import sys + +def err(msg, die=None): + """Print an error message and exits if an exit code is given""" + sys.stderr.write(msg + '\n') + if die: + sys.exit((die if type(die) is int else 1)) + try: import pelican except: - err('Cannot import pelican.\nYou must install Pelican in order to run this script.', -1) + err('Cannot import pelican.\nYou must ' + 'install Pelican in order to run this script.', + -1) global _THEMES_PATH _THEMES_PATH = os.path.join( os.path.dirname( - os.path.abspath( - pelican.__file__ - ) + os.path.abspath(pelican.__file__) ), 'themes' ) @@ -29,49 +34,51 @@ _BUILTIN_THEMES = ['simple', 'notmyidea'] -def err(msg, die=None): - """Print an error message and exits if an exit code is given""" - sys.stderr.write(msg + '\n') - if die: - sys.exit((die if type(die) is int else 1)) - - def main(): """Main function""" - parser = argparse.ArgumentParser(description="""Install themes for Pelican""") + parser = argparse.ArgumentParser( + description="""Install themes for Pelican""") - excl= parser.add_mutually_exclusive_group() - excl.add_argument('-l', '--list', dest='action', action="store_const", const='list', + excl = parser.add_mutually_exclusive_group() + excl.add_argument( + '-l', '--list', dest='action', action="store_const", const='list', help="Show the themes already installed and exit") - excl.add_argument('-p', '--path', dest='action', action="store_const", const='path', + excl.add_argument( + '-p', '--path', dest='action', action="store_const", const='path', help="Show the themes path and exit") - excl.add_argument('-V', '--version', action='version', version='pelican-themes v{0}'.format(__version__), + excl.add_argument( + '-V', '--version', action='version', + version='pelican-themes v{0}'.format(__version__), help='Print the version of this script') - - parser.add_argument('-i', '--install', dest='to_install', nargs='+', metavar="theme path", + parser.add_argument( + '-i', '--install', dest='to_install', nargs='+', metavar="theme path", help='The themes to install') - parser.add_argument('-r', '--remove', dest='to_remove', nargs='+', metavar="theme name", + parser.add_argument( + '-r', '--remove', dest='to_remove', nargs='+', metavar="theme name", help='The themes to remove') - parser.add_argument('-U', '--upgrade', dest='to_upgrade', nargs='+', - metavar="theme path", help='The themes to upgrade') - parser.add_argument('-s', '--symlink', dest='to_symlink', nargs='+', metavar="theme path", - help="Same as `--install', but create a symbolic link instead of copying the theme. Useful for theme development") - parser.add_argument('-c', '--clean', dest='clean', action="store_true", + parser.add_argument( + '-U', '--upgrade', dest='to_upgrade', nargs='+', + metavar="theme path", help='The themes to upgrade') + parser.add_argument( + '-s', '--symlink', dest='to_symlink', nargs='+', metavar="theme path", + help="Same as `--install', but create a symbolic link instead of " + "copying the theme. Useful for theme development") + parser.add_argument( + '-c', '--clean', dest='clean', action="store_true", help="Remove the broken symbolic links of the theme path") - - parser.add_argument('-v', '--verbose', dest='verbose', action="store_true", + parser.add_argument( + '-v', '--verbose', dest='verbose', + action="store_true", help="Verbose output") - args = parser.parse_args() - + to_install = args.to_install or args.to_upgrade to_sym = args.to_symlink or args.clean - if args.action: if args.action is 'list': list_themes(args.verbose) @@ -95,7 +102,7 @@ def main(): if args.to_upgrade: if args.verbose: print('Upgrading themes...') - + for i in args.to_upgrade: install(i, v=args.verbose, u=True) @@ -144,11 +151,13 @@ def list_themes(v=False): def remove(theme_name, v=False): """Removes a theme""" - theme_name = theme_name.replace('/','') + theme_name = theme_name.replace('/', '') target = os.path.join(_THEMES_PATH, theme_name) if theme_name in _BUILTIN_THEMES: - err(theme_name + ' is a builtin theme.\nYou cannot remove a builtin theme with this script, remove it by hand if you want.') + err(theme_name + ' is a builtin theme.\n' + 'You cannot remove a builtin theme with this script, ' + 'remove it by hand if you want.') elif os.path.islink(target): if v: print('Removing link `' + target + "'") @@ -180,7 +189,8 @@ def install(path, v=False, u=False): install(path, v) else: if v: - print("Copying `{p}' to `{t}' ...".format(p=path, t=theme_path)) + print("Copying '{p}' to '{t}' ...".format(p=path, + t=theme_path)) try: shutil.copytree(path, theme_path) @@ -189,14 +199,18 @@ def install(path, v=False, u=False): for root, dirs, files in os.walk(theme_path): for d in dirs: dname = os.path.join(root, d) - os.chmod(dname, 493) # 0o755 + os.chmod(dname, 493) # 0o755 for f in files: fname = os.path.join(root, f) - os.chmod(fname, 420) # 0o644 + os.chmod(fname, 420) # 0o644 except OSError as e: - err("Cannot change permissions of files or directory in `{r}':\n{e}".format(r=theme_path, e=str(e)), die=False) + err("Cannot change permissions of files " + "or directory in `{r}':\n{e}".format(r=theme_path, + e=str(e)), + die=False) except Exception as e: - err("Cannot copy `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e))) + err("Cannot copy `{p}' to `{t}':\n{e}".format( + p=path, t=theme_path, e=str(e))) def symlink(path, v=False): @@ -212,11 +226,13 @@ def symlink(path, v=False): err(path + ' : already exists') else: if v: - print("Linking `{p}' to `{t}' ...".format(p=path, t=theme_path)) + print("Linking `{p}' to `{t}' ...".format( + p=path, t=theme_path)) try: os.symlink(path, theme_path) except Exception as e: - err("Cannot link `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e))) + err("Cannot link `{p}' to `{t}':\n{e}".format( + p=path, t=theme_path, e=str(e))) def is_broken_link(path): @@ -227,7 +243,7 @@ def is_broken_link(path): def clean(v=False): """Removes the broken symbolic links""" - c=0 + c = 0 for path in os.listdir(_THEMES_PATH): path = os.path.join(_THEMES_PATH, path) if os.path.islink(path): @@ -236,9 +252,9 @@ def clean(v=False): print('Removing {0}'.format(path)) try: os.remove(path) - except OSError as e: + except OSError: print('Error: cannot remove {0}'.format(path)) else: - c+=1 + c += 1 print("\nRemoved {0} broken links".format(c)) diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py --- a/pelican/urlwrappers.py +++ b/pelican/urlwrappers.py @@ -4,9 +4,10 @@ import functools import logging import os + import six -from pelican.utils import (slugify, python_2_unicode_compatible) +from pelican.utils import python_2_unicode_compatible, slugify logger = logging.getLogger(__name__) diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -1,29 +1,30 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function -import six +from __future__ import print_function, unicode_literals import codecs +import datetime import errno import fnmatch import locale import logging import os -import pytz import re import shutil import sys import traceback -import pickle -import datetime - from collections import Hashable from contextlib import contextmanager -import dateutil.parser from functools import partial from itertools import groupby -from jinja2 import Markup from operator import attrgetter -from posixpath import join as posix_join + +import dateutil.parser + +from jinja2 import Markup + +import pytz + +import six from six.moves.html_parser import HTMLParser logger = logging.getLogger(__name__) @@ -43,9 +44,9 @@ def strftime(date, date_format): formatting them with the date, (if necessary) decoding the output and replacing formatted output back. ''' - + def strip_zeros(x): + return x.lstrip('0') or '0' c89_directives = 'aAbBcdfHIjmMpSUwWxXyYzZ%' - strip_zeros = lambda x: x.lstrip('0') or '0' # grab candidate format options format_options = '%[-]?.' @@ -200,8 +201,8 @@ def _warn(): ' and will be removed by version {}'.format(version)) message.append('. Use {} instead.'.format(new)) logger.warning(''.join(message)) - logger.debug(''.join( - six.text_type(x) for x in traceback.format_stack())) + logger.debug(''.join(six.text_type(x) for x + in traceback.format_stack())) def fget(self): _warn() @@ -224,7 +225,7 @@ def get_date(string): """ string = re.sub(' +', ' ', string) default = SafeDatetime.now().replace(hour=0, minute=0, - second=0, microsecond=0) + second=0, microsecond=0) try: return dateutil.parser.parse(string, default=default) except (TypeError, ValueError): @@ -319,12 +320,12 @@ def walk_error(err): for src_dir, subdirs, others in os.walk(source_): dst_dir = os.path.join(destination_, - os.path.relpath(src_dir, source_)) + os.path.relpath(src_dir, source_)) subdirs[:] = (s for s in subdirs if not any(fnmatch.fnmatch(s, i) for i in ignores)) - others[:] = (o for o in others if not any(fnmatch.fnmatch(o, i) - for i in ignores)) + others[:] = (o for o in others if not any(fnmatch.fnmatch(o, i) + for i in ignores)) if not os.path.isdir(dst_dir): logger.info('Creating directory %s', dst_dir) @@ -338,9 +339,11 @@ def walk_error(err): logger.info('Copying %s to %s', src_path, dst_path) shutil.copy2(src_path, dst_path) else: - logger.warning('Skipped copy %s (not a file or directory) to %s', + logger.warning('Skipped copy %s (not a file or ' + 'directory) to %s', src_path, dst_path) + def clean_output_dir(path, retention): """Remove all files from output directory except those in retention list""" @@ -366,8 +369,8 @@ def clean_output_dir(path, retention): shutil.rmtree(file) logger.debug("Deleted directory %s", file) except Exception as e: - logger.error("Unable to delete directory %s; %s", - file, e) + logger.error("Unable to delete directory %s; %s", + file, e) elif os.path.isfile(file) or os.path.islink(file): try: os.remove(file) @@ -507,12 +510,12 @@ def process_translations(content_list, order_by=None): for slug, items in grouped_by_slugs: items = list(items) - # items with `translation` metadata will be used as translations… + # items with `translation` metadata will be used as translations... default_lang_items = list(filter( - lambda i: i.metadata.get('translation', 'false').lower() - == 'false', - items)) - # …unless all items with that slug are translations + lambda i: + i.metadata.get('translation', 'false').lower() == 'false', + items)) + # ...unless all items with that slug are translations if not default_lang_items: default_lang_items = items @@ -522,13 +525,14 @@ def process_translations(content_list, order_by=None): len_ = len(lang_items) if len_ > 1: logger.warning('There are %s variants of "%s" with lang %s', - len_, slug, lang) + len_, slug, lang) for x in lang_items: logger.warning('\t%s', x.source_path) # find items with default language - default_lang_items = list(filter(attrgetter('in_default_lang'), - default_lang_items)) + default_lang_items = list(filter( + attrgetter('in_default_lang'), + default_lang_items)) # if there is no article with default language, take an other one if not default_lang_items: @@ -536,10 +540,9 @@ def process_translations(content_list, order_by=None): if not slug: logger.warning( - 'empty slug for %s. ' - 'You can fix this by adding a title or a slug to your ' - 'content', - default_lang_items[0].source_path) + 'Empty slug for %s. You can fix this by ' + 'adding a title or a slug to your content', + default_lang_items[0].source_path) index.extend(default_lang_items) translations.extend([x for x in items if x not in default_lang_items]) for a in items: @@ -567,10 +570,12 @@ def process_translations(content_list, order_by=None): index.sort(key=attrgetter(order_by), reverse=order_reversed) except AttributeError: - logger.warning('There is no "%s" attribute in the item ' + logger.warning( + 'There is no "%s" attribute in the item ' 'metadata. Defaulting to slug order.', order_by) else: - logger.warning('Invalid *_ORDER_BY setting (%s).' + logger.warning( + 'Invalid *_ORDER_BY setting (%s).' 'Valid options are strings and functions.', order_by) return index, translations @@ -589,12 +594,12 @@ def file_times(path): dirs[:] = [x for x in dirs if not x.startswith(os.curdir)] for f in files: - if (f.endswith(tuple(extensions)) and - not any(fnmatch.fnmatch(f, ignore) for ignore in ignores)): - try: - yield os.stat(os.path.join(root, f)).st_mtime - except OSError as e: - logger.warning('Caught Exception: %s', e) + if f.endswith(tuple(extensions)) and \ + not any(fnmatch.fnmatch(f, ignore) for ignore in ignores): + try: + yield os.stat(os.path.join(root, f)).st_mtime + except OSError as e: + logger.warning('Caught Exception: %s', e) LAST_MTIME = 0 while True: diff --git a/pelican/writers.py b/pelican/writers.py --- a/pelican/writers.py +++ b/pelican/writers.py @@ -1,21 +1,23 @@ # -*- coding: utf-8 -*- -from __future__ import with_statement, unicode_literals, print_function -import six +from __future__ import print_function, unicode_literals, with_statement -import os import logging - -if not six.PY3: - from codecs import open +import os from feedgenerator import Atom1Feed, Rss201rev2Feed + from jinja2 import Markup + +import six from six.moves.urllib.parse import urlparse -from pelican.paginator import Paginator -from pelican.utils import (get_relative_path, path_to_url, set_date_tzinfo, - is_selected_for_writing) from pelican import signals +from pelican.paginator import Paginator +from pelican.utils import (get_relative_path, is_selected_for_writing, + path_to_url, set_date_tzinfo) + +if not six.PY3: + from codecs import open logger = logging.getLogger(__name__) @@ -119,10 +121,10 @@ def write_feed(self, elements, context, path=None, feed_type='atom'): feed.write(fp, 'utf-8') logger.info('Writing %s', complete_path) - signals.feed_written.send(complete_path, context=context, feed=feed) + signals.feed_written.send( + complete_path, context=context, feed=feed) return feed - def write_file(self, name, template, context, relative_urls=False, paginated=None, override_output=False, **kwargs): """Render the template and write the file. @@ -139,9 +141,10 @@ def write_file(self, name, template, context, relative_urls=False, :param **kwargs: additional variables to pass to the templates """ - if name is False or name == "" or\ - not is_selected_for_writing(self.settings,\ - os.path.join(self.output_path, name)): + if name is False or \ + name == "" or \ + not is_selected_for_writing(self.settings, + os.path.join(self.output_path, name)): return elif not name: # other stuff, just return for now @@ -169,7 +172,8 @@ def _write_file(template, localcontext, output_path, name, override): def _get_localcontext(context, name, kwargs, relative_urls): localcontext = context.copy() - localcontext['localsiteurl'] = localcontext.get('localsiteurl', None) + localcontext['localsiteurl'] = localcontext.get( + 'localsiteurl', None) if relative_urls: relative_url = path_to_url(get_relative_path(name)) localcontext['SITEURL'] = relative_url @@ -201,11 +205,13 @@ def _get_localcontext(context, name, kwargs, relative_urls): '%s_previous_page' % key: previous_page, '%s_next_page' % key: next_page}) - localcontext = _get_localcontext(context, page.save_as, paginated_kwargs, relative_urls) + localcontext = _get_localcontext( + context, page.save_as, paginated_kwargs, relative_urls) _write_file(template, localcontext, self.output_path, page.save_as, override_output) else: # no pagination - localcontext = _get_localcontext(context, name, kwargs, relative_urls) + localcontext = _get_localcontext( + context, name, kwargs, relative_urls) _write_file(template, localcontext, self.output_path, name, override_output)
diff --git a/pelican/tests/default_conf.py b/pelican/tests/default_conf.py --- a/pelican/tests/default_conf.py +++ b/pelican/tests/default_conf.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function +from __future__ import print_function, unicode_literals AUTHOR = 'Alexis Métaireau' SITENAME = "Alexis' log" SITEURL = 'http://blog.notmyidea.org' @@ -31,17 +31,16 @@ # path-specific metadata EXTRA_PATH_METADATA = { 'extra/robots.txt': {'path': 'robots.txt'}, - } +} # static paths will be copied without parsing their contents STATIC_PATHS = [ 'pictures', 'extra/robots.txt', - ] +] FORMATTED_FIELDS = ['summary', 'custom_formatted_field'] # foobar will not be used, because it's not in caps. All configuration keys # have to be in caps foobar = "barbaz" - diff --git a/pelican/tests/support.py b/pelican/tests/support.py --- a/pelican/tests/support.py +++ b/pelican/tests/support.py @@ -1,25 +1,26 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function -__all__ = ['get_article', 'unittest', ] +from __future__ import print_function, unicode_literals +import locale +import logging import os import re import subprocess import sys -from six import StringIO -import logging -from logging.handlers import BufferingHandler import unittest -import locale - -from functools import wraps from contextlib import contextmanager -from tempfile import mkdtemp +from functools import wraps +from logging.handlers import BufferingHandler from shutil import rmtree +from tempfile import mkdtemp + +from six import StringIO from pelican.contents import Article from pelican.settings import DEFAULT_CONFIG +__all__ = ['get_article', 'unittest', ] + @contextmanager def temporary_folder(): @@ -167,7 +168,7 @@ def get_settings(**kwargs): Set keyword arguments to override specific settings. """ settings = DEFAULT_CONFIG.copy() - for key,value in kwargs.items(): + for key, value in kwargs.items(): settings[key] = value return settings @@ -179,10 +180,13 @@ def __init__(self, capacity=1000): logging.handlers.BufferingHandler.__init__(self, capacity) def count_logs(self, msg=None, level=None): - return len([l for l in self.buffer - if (msg is None or re.match(msg, l.getMessage())) - and (level is None or l.levelno == level) - ]) + return len([ + l + for l + in self.buffer + if (msg is None or re.match(msg, l.getMessage())) and + (level is None or l.levelno == level) + ]) class LoggedTestCase(unittest.TestCase): diff --git a/pelican/tests/test_cache.py b/pelican/tests/test_cache.py --- a/pelican/tests/test_cache.py +++ b/pelican/tests/test_cache.py @@ -1,7 +1,14 @@ +# -*- coding: utf-8 -*- from __future__ import unicode_literals import os -from codecs import open + +from shutil import rmtree +from tempfile import mkdtemp + +from pelican.generators import ArticlesGenerator, PagesGenerator +from pelican.tests.support import get_settings, unittest + try: from unittest.mock import MagicMock except ImportError: @@ -10,12 +17,6 @@ except ImportError: MagicMock = False -from shutil import rmtree -from tempfile import mkdtemp - -from pelican.generators import ArticlesGenerator, PagesGenerator -from pelican.tests.support import unittest, get_settings - CUR_DIR = os.path.dirname(__file__) CONTENT_DIR = os.path.join(CUR_DIR, 'content') @@ -35,7 +36,6 @@ def _get_cache_enabled_settings(self): settings['CACHE_PATH'] = self.temp_cache return settings - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_article_object_caching(self): """Test Article objects caching at the generator level""" @@ -44,7 +44,6 @@ def test_article_object_caching(self): settings['DEFAULT_DATE'] = (1970, 1, 1) settings['READERS'] = {'asc': None} - generator = ArticlesGenerator( context=settings.copy(), settings=settings, path=CONTENT_DIR, theme=settings['THEME'], output_path=None) @@ -108,7 +107,9 @@ def test_article_ignore_cache(self): path=CONTENT_DIR, theme=settings['THEME'], output_path=None) generator.readers.read_file = MagicMock() generator.generate_context() - self.assertEqual(generator.readers.read_file.call_count, orig_call_count) + self.assertEqual( + generator.readers.read_file.call_count, + orig_call_count) @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_page_object_caching(self): @@ -181,5 +182,6 @@ def test_page_ignore_cache(self): path=CUR_DIR, theme=settings['THEME'], output_path=None) generator.readers.read_file = MagicMock() generator.generate_context() - self.assertEqual(generator.readers.read_file.call_count, orig_call_count) - + self.assertEqual( + generator.readers.read_file.call_count, + orig_call_count) diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -1,20 +1,21 @@ -from __future__ import unicode_literals, absolute_import +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals -import logging import locale +import logging import os.path -import six +from posixpath import join as posix_join +from sys import platform from jinja2.utils import generate_lorem_ipsum -from sys import platform -from pelican.contents import (Page, Article, Static, URLWrapper, - Author, Category) +import six + +from pelican.contents import Article, Author, Category, Page, Static from pelican.settings import DEFAULT_CONFIG from pelican.signals import content_object_init -from pelican.tests.support import LoggedTestCase, mute, unittest, get_settings -from pelican.utils import (path_to_url, truncate_html_words, SafeDatetime, - posix_join) +from pelican.tests.support import LoggedTestCase, get_settings, unittest +from pelican.utils import SafeDatetime, path_to_url, truncate_html_words # generate one paragraph, enclosed with <p> @@ -49,7 +50,7 @@ def test_use_args(self): # them to initialise object's attributes. metadata = {'foo': 'bar', 'foobar': 'baz', 'title': 'foobar', } page = Page(TEST_CONTENT, metadata=metadata, - context={'localsiteurl': ''}) + context={'localsiteurl': ''}) for key, value in metadata.items(): self.assertTrue(hasattr(page, key)) self.assertEqual(value, getattr(page, key)) @@ -139,14 +140,9 @@ def test_datetime(self): page = Page(**page_kwargs) # page.locale_date is a unicode string in both python2 and python3 - dt_date = dt.strftime(DEFAULT_CONFIG['DEFAULT_DATE_FORMAT']) - # dt_date is a byte string in python2, and a unicode string in python3 - # Let's make sure it is a unicode string (relies on python 3.3 supporting the u prefix) - if type(dt_date) != type(u''): - # python2: - dt_date = unicode(dt_date, 'utf8') - - self.assertEqual(page.locale_date, dt_date ) + dt_date = dt.strftime(DEFAULT_CONFIG['DEFAULT_DATE_FORMAT']) + + self.assertEqual(page.locale_date, dt_date) page_kwargs['settings'] = get_settings() # I doubt this can work on all platforms ... @@ -307,10 +303,14 @@ def test_intrasite_link_more(self): args['settings'] = get_settings() args['source_path'] = 'content' args['context']['filenames'] = { - 'images/poster.jpg': type(cls_name, (object,), {'url': 'images/poster.jpg'}), - 'assets/video.mp4': type(cls_name, (object,), {'url': 'assets/video.mp4'}), - 'images/graph.svg': type(cls_name, (object,), {'url': 'images/graph.svg'}), - 'reference.rst': type(cls_name, (object,), {'url': 'reference.html'}), + 'images/poster.jpg': type( + cls_name, (object,), {'url': 'images/poster.jpg'}), + 'assets/video.mp4': type( + cls_name, (object,), {'url': 'assets/video.mp4'}), + 'images/graph.svg': type( + cls_name, (object,), {'url': 'images/graph.svg'}), + 'reference.rst': type( + cls_name, (object,), {'url': 'reference.html'}), } # video.poster @@ -325,20 +325,25 @@ def test_intrasite_link_more(self): content, 'There is a video with poster ' '<video controls poster="http://notmyidea.org/images/poster.jpg">' - '<source src="http://notmyidea.org/assets/video.mp4" type="video/mp4">' + '<source src="http://notmyidea.org/assets/video.mp4"' + ' type="video/mp4">' '</video>' ) # object.data args['content'] = ( 'There is a svg object ' - '<object data="{filename}/images/graph.svg" type="image/svg+xml"></object>' + '<object data="{filename}/images/graph.svg"' + ' type="image/svg+xml">' + '</object>' ) content = Page(**args).get_content('http://notmyidea.org') self.assertEqual( content, 'There is a svg object ' - '<object data="http://notmyidea.org/images/graph.svg" type="image/svg+xml"></object>' + '<object data="http://notmyidea.org/images/graph.svg"' + ' type="image/svg+xml">' + '</object>' ) # blockquote.cite @@ -350,7 +355,9 @@ def test_intrasite_link_more(self): self.assertEqual( content, 'There is a blockquote with cite attribute ' - '<blockquote cite="http://notmyidea.org/reference.html">blah blah</blockquote>' + '<blockquote cite="http://notmyidea.org/reference.html">' + 'blah blah' + '</blockquote>' ) def test_intrasite_link_markdown_spaces(self): @@ -401,17 +408,19 @@ def test_template(self): def test_slugify_category_author(self): settings = get_settings() - settings['SLUG_SUBSTITUTIONS'] = [ ('C#', 'csharp') ] + settings['SLUG_SUBSTITUTIONS'] = [('C#', 'csharp')] settings['ARTICLE_URL'] = '{author}/{category}/{slug}/' settings['ARTICLE_SAVE_AS'] = '{author}/{category}/{slug}/index.html' article_kwargs = self._copy_page_kwargs() article_kwargs['metadata']['author'] = Author("O'Brien", settings) - article_kwargs['metadata']['category'] = Category('C# & stuff', settings) + article_kwargs['metadata']['category'] = Category( + 'C# & stuff', settings) article_kwargs['metadata']['title'] = 'fnord' article_kwargs['settings'] = settings article = Article(**article_kwargs) self.assertEqual(article.url, 'obrien/csharp-stuff/fnord/') - self.assertEqual(article.save_as, 'obrien/csharp-stuff/fnord/index.html') + self.assertEqual( + article.save_as, 'obrien/csharp-stuff/fnord/index.html') class TestStatic(LoggedTestCase): @@ -426,7 +435,8 @@ def setUp(self): self.context = self.settings.copy() self.static = Static(content=None, metadata={}, settings=self.settings, - source_path=posix_join('dir', 'foo.jpg'), context=self.context) + source_path=posix_join('dir', 'foo.jpg'), + context=self.context) self.context['filenames'] = {self.static.source_path: self.static} @@ -436,8 +446,10 @@ def tearDown(self): def test_attach_to_same_dir(self): """attach_to() overrides a static file's save_as and url. """ - page = Page(content="fake page", - metadata={'title': 'fakepage'}, settings=self.settings, + page = Page( + content="fake page", + metadata={'title': 'fakepage'}, + settings=self.settings, source_path=os.path.join('dir', 'fakepage.md')) self.static.attach_to(page) @@ -449,7 +461,7 @@ def test_attach_to_parent_dir(self): """attach_to() preserves dirs inside the linking document dir. """ page = Page(content="fake page", metadata={'title': 'fakepage'}, - settings=self.settings, source_path='fakepage.md') + settings=self.settings, source_path='fakepage.md') self.static.attach_to(page) expected_save_as = os.path.join('outpages', 'dir', 'foo.jpg') @@ -460,8 +472,8 @@ def test_attach_to_other_dir(self): """attach_to() ignores dirs outside the linking document dir. """ page = Page(content="fake page", - metadata={'title': 'fakepage'}, settings=self.settings, - source_path=os.path.join('dir', 'otherdir', 'fakepage.md')) + metadata={'title': 'fakepage'}, settings=self.settings, + source_path=os.path.join('dir', 'otherdir', 'fakepage.md')) self.static.attach_to(page) expected_save_as = os.path.join('outpages', 'foo.jpg') @@ -472,8 +484,8 @@ def test_attach_to_ignores_subsequent_calls(self): """attach_to() does nothing when called a second time. """ page = Page(content="fake page", - metadata={'title': 'fakepage'}, settings=self.settings, - source_path=os.path.join('dir', 'fakepage.md')) + metadata={'title': 'fakepage'}, settings=self.settings, + source_path=os.path.join('dir', 'fakepage.md')) self.static.attach_to(page) @@ -481,8 +493,10 @@ def test_attach_to_ignores_subsequent_calls(self): otherdir_settings.update(dict( PAGE_SAVE_AS=os.path.join('otherpages', '{slug}.html'), PAGE_URL='otherpages/{slug}.html')) - otherdir_page = Page(content="other page", - metadata={'title': 'otherpage'}, settings=otherdir_settings, + otherdir_page = Page( + content="other page", + metadata={'title': 'otherpage'}, + settings=otherdir_settings, source_path=os.path.join('dir', 'otherpage.md')) self.static.attach_to(otherdir_page) @@ -497,8 +511,10 @@ def test_attach_to_does_nothing_after_save_as_referenced(self): """ original_save_as = self.static.save_as - page = Page(content="fake page", - metadata={'title': 'fakepage'}, settings=self.settings, + page = Page( + content="fake page", + metadata={'title': 'fakepage'}, + settings=self.settings, source_path=os.path.join('dir', 'fakepage.md')) self.static.attach_to(page) @@ -511,8 +527,10 @@ def test_attach_to_does_nothing_after_url_referenced(self): """ original_url = self.static.url - page = Page(content="fake page", - metadata={'title': 'fakepage'}, settings=self.settings, + page = Page( + content="fake page", + metadata={'title': 'fakepage'}, + settings=self.settings, source_path=os.path.join('dir', 'fakepage.md')) self.static.attach_to(page) @@ -523,13 +541,15 @@ def test_attach_to_does_not_override_an_override(self): """attach_to() does not override paths that were overridden elsewhere. (For example, by the user with EXTRA_PATH_METADATA) """ - customstatic = Static(content=None, + customstatic = Static( + content=None, metadata=dict(save_as='customfoo.jpg', url='customfoo.jpg'), settings=self.settings, source_path=os.path.join('dir', 'foo.jpg'), context=self.settings.copy()) - page = Page(content="fake page", + page = Page( + content="fake page", metadata={'title': 'fakepage'}, settings=self.settings, source_path=os.path.join('dir', 'fakepage.md')) @@ -542,13 +562,16 @@ def test_attach_link_syntax(self): """{attach} link syntax triggers output path override & url replacement. """ html = '<a href="{attach}../foo.jpg">link</a>' - page = Page(content=html, - metadata={'title': 'fakepage'}, settings=self.settings, + page = Page( + content=html, + metadata={'title': 'fakepage'}, + settings=self.settings, source_path=os.path.join('dir', 'otherdir', 'fakepage.md'), context=self.context) content = page.get_content('') - self.assertNotEqual(content, html, + self.assertNotEqual( + content, html, "{attach} link syntax did not trigger URL replacement.") expected_save_as = os.path.join('outpages', 'foo.jpg') @@ -561,7 +584,8 @@ def test_tag_link_syntax(self): html = '<a href="{tag}foo">link</a>' page = Page( content=html, - metadata={'title': 'fakepage'}, settings=self.settings, + metadata={'title': 'fakepage'}, + settings=self.settings, source_path=os.path.join('dir', 'otherdir', 'fakepage.md'), context=self.context) content = page.get_content('') @@ -572,8 +596,10 @@ def test_category_link_syntax(self): "{category} link syntax triggers url replacement." html = '<a href="{category}foo">link</a>' - page = Page(content=html, - metadata={'title': 'fakepage'}, settings=self.settings, + page = Page( + content=html, + metadata={'title': 'fakepage'}, + settings=self.settings, source_path=os.path.join('dir', 'otherdir', 'fakepage.md'), context=self.context) content = page.get_content('') @@ -588,11 +614,11 @@ def test_unknown_link_syntax(self): metadata={'title': 'fakepage'}, settings=self.settings, source_path=os.path.join('dir', 'otherdir', 'fakepage.md'), context=self.context) - content = page.get_content('') + content = page.get_content('') self.assertEqual(content, html) self.assertLogCountEqual( - count=1, - msg="Replacement Indicator 'unknown' not recognized, " - "skipping replacement", - level=logging.WARNING) + count=1, + msg="Replacement Indicator 'unknown' not recognized, " + "skipping replacement", + level=logging.WARNING) diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py --- a/pelican/tests/test_generators.py +++ b/pelican/tests/test_generators.py @@ -1,8 +1,18 @@ # -*- coding: utf-8 -*- from __future__ import unicode_literals +import locale import os + from codecs import open +from shutil import rmtree +from tempfile import mkdtemp + +from pelican.generators import (ArticlesGenerator, Generator, PagesGenerator, + StaticGenerator, TemplatePagesGenerator) +from pelican.tests.support import get_settings, unittest +from pelican.writers import Writer + try: from unittest.mock import MagicMock except ImportError: @@ -10,14 +20,7 @@ from mock import MagicMock except ImportError: MagicMock = False -from shutil import rmtree -from tempfile import mkdtemp -from pelican.generators import (Generator, ArticlesGenerator, PagesGenerator, - StaticGenerator, TemplatePagesGenerator) -from pelican.writers import Writer -from pelican.tests.support import unittest, get_settings -import locale CUR_DIR = os.path.dirname(__file__) CONTENT_DIR = os.path.join(CUR_DIR, 'content') @@ -35,7 +38,6 @@ def setUp(self): def tearDown(self): locale.setlocale(locale.LC_ALL, self.old_locale) - def test_include_path(self): self.settings['IGNORE_FILES'] = {'ignored1.rst', 'ignored2.rst'} @@ -52,7 +54,8 @@ def test_get_files_exclude(self): """Test that Generator.get_files() properly excludes directories. """ # We use our own Generator so we can give it our own content path - generator = Generator(context=self.settings.copy(), + generator = Generator( + context=self.settings.copy(), settings=self.settings, path=os.path.join(CUR_DIR, 'nested_content'), theme=self.settings['THEME'], output_path=None) @@ -60,34 +63,42 @@ def test_get_files_exclude(self): filepaths = generator.get_files(paths=['maindir']) found_files = {os.path.basename(f) for f in filepaths} expected_files = {'maindir.md', 'subdir.md'} - self.assertFalse(expected_files - found_files, + self.assertFalse( + expected_files - found_files, "get_files() failed to find one or more files") # Test string as `paths` argument rather than list filepaths = generator.get_files(paths='maindir') found_files = {os.path.basename(f) for f in filepaths} expected_files = {'maindir.md', 'subdir.md'} - self.assertFalse(expected_files - found_files, + self.assertFalse( + expected_files - found_files, "get_files() failed to find one or more files") filepaths = generator.get_files(paths=[''], exclude=['maindir']) found_files = {os.path.basename(f) for f in filepaths} - self.assertNotIn('maindir.md', found_files, + self.assertNotIn( + 'maindir.md', found_files, "get_files() failed to exclude a top-level directory") - self.assertNotIn('subdir.md', found_files, + self.assertNotIn( + 'subdir.md', found_files, "get_files() failed to exclude a subdir of an excluded directory") - filepaths = generator.get_files(paths=[''], + filepaths = generator.get_files( + paths=[''], exclude=[os.path.join('maindir', 'subdir')]) found_files = {os.path.basename(f) for f in filepaths} - self.assertNotIn('subdir.md', found_files, + self.assertNotIn( + 'subdir.md', found_files, "get_files() failed to exclude a subdirectory") filepaths = generator.get_files(paths=[''], exclude=['subdir']) found_files = {os.path.basename(f) for f in filepaths} - self.assertIn('subdir.md', found_files, + self.assertIn( + 'subdir.md', found_files, "get_files() excluded a subdirectory by name, ignoring its path") + class TestArticlesGenerator(unittest.TestCase): @classmethod @@ -96,7 +107,7 @@ def setUpClass(cls): settings['DEFAULT_CATEGORY'] = 'Default' settings['DEFAULT_DATE'] = (1970, 1, 1) settings['READERS'] = {'asc': None} - settings['CACHE_CONTENT'] = False # cache not needed for this logic tests + settings['CACHE_CONTENT'] = False cls.generator = ArticlesGenerator( context=settings.copy(), settings=settings, @@ -152,25 +163,30 @@ def test_generate_context(self): ['Test mkd File', 'published', 'test', 'article'], ['This is a super article !', 'published', 'Yeah', 'article'], ['This is a super article !', 'published', 'Yeah', 'article'], - ['Article with Nonconformant HTML meta tags', 'published', 'Default', 'article'], + ['Article with Nonconformant HTML meta tags', 'published', + 'Default', 'article'], ['This is a super article !', 'published', 'yeah', 'article'], ['This is a super article !', 'published', 'yeah', 'article'], ['This is a super article !', 'published', 'yeah', 'article'], ['This is a super article !', 'published', 'Default', 'article'], ['This is an article with category !', 'published', 'yeah', 'article'], - ['This is an article with multiple authors!', 'published', 'Default', 'article'], - ['This is an article with multiple authors!', 'published', 'Default', 'article'], - ['This is an article with multiple authors in list format!', 'published', 'Default', 'article'], - ['This is an article with multiple authors in lastname, firstname format!', 'published', 'Default', 'article'], + ['This is an article with multiple authors!', 'published', + 'Default', 'article'], + ['This is an article with multiple authors!', 'published', + 'Default', 'article'], + ['This is an article with multiple authors in list format!', + 'published', 'Default', 'article'], + ['This is an article with multiple authors in lastname, ' + 'firstname format!', 'published', 'Default', 'article'], ['This is an article without category !', 'published', 'Default', - 'article'], + 'article'], ['This is an article without category !', 'published', 'TestCategory', 'article'], ['An Article With Code Block To Test Typogrify Ignore', - 'published', 'Default', 'article'], - ['マックOS X 10.8でパイソンとVirtualenvをインストールと設定', 'published', - '指導書', 'article'], + 'published', 'Default', 'article'], + ['マックOS X 10.8でパイソンとVirtualenvをインストールと設定', + 'published', '指導書', 'article'], ] self.assertEqual(sorted(articles_expected), sorted(self.articles)) @@ -292,7 +308,7 @@ def test_period_in_timeperiod_archive(self): generator.generate_period_archives(write) dates = [d for d in generator.dates if d.date.year == 1970] self.assertEqual(len(dates), 1) - #among other things it must have at least been called with this + # among other things it must have at least been called with this settings["period"] = (1970,) write.assert_called_with("posts/1970/index.html", generator.get_template("period_archives"), @@ -300,37 +316,42 @@ def test_period_in_timeperiod_archive(self): blog=True, dates=dates) del settings["period"] - settings['MONTH_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/{date:%b}/index.html' + settings['MONTH_ARCHIVE_SAVE_AS'] = \ + 'posts/{date:%Y}/{date:%b}/index.html' generator = ArticlesGenerator( context=settings, settings=settings, path=CONTENT_DIR, theme=settings['THEME'], output_path=None) generator.generate_context() write = MagicMock() generator.generate_period_archives(write) - dates = [d for d in generator.dates if d.date.year == 1970 - and d.date.month == 1] + dates = [d for d in generator.dates + if d.date.year == 1970 and d.date.month == 1] self.assertEqual(len(dates), 1) settings["period"] = (1970, "January") - #among other things it must have at least been called with this + # among other things it must have at least been called with this write.assert_called_with("posts/1970/Jan/index.html", generator.get_template("period_archives"), settings, blog=True, dates=dates) del settings["period"] - settings['DAY_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/{date:%b}/{date:%d}/index.html' + settings['DAY_ARCHIVE_SAVE_AS'] = \ + 'posts/{date:%Y}/{date:%b}/{date:%d}/index.html' generator = ArticlesGenerator( context=settings, settings=settings, path=CONTENT_DIR, theme=settings['THEME'], output_path=None) generator.generate_context() write = MagicMock() generator.generate_period_archives(write) - dates = [d for d in generator.dates if d.date.year == 1970 - and d.date.month == 1 - and d.date.day == 1] + dates = [ + d for d in generator.dates if + d.date.year == 1970 and + d.date.month == 1 and + d.date.day == 1 + ] self.assertEqual(len(dates), 1) settings["period"] = (1970, "January", 1) - #among other things it must have at least been called with this + # among other things it must have at least been called with this write.assert_called_with("posts/1970/Jan/01/index.html", generator.get_template("period_archives"), settings, @@ -347,11 +368,14 @@ def test_nonexistent_template(self): def test_generate_authors(self): """Check authors generation.""" authors = [author.name for author, _ in self.generator.authors] - authors_expected = sorted(['Alexis Métaireau', 'Author, First', 'Author, Second', 'First Author', 'Second Author']) + authors_expected = sorted( + ['Alexis Métaireau', 'Author, First', 'Author, Second', + 'First Author', 'Second Author']) self.assertEqual(sorted(authors), authors_expected) # test for slug authors = [author.slug for author, _ in self.generator.authors] - authors_expected = ['alexis-metaireau', 'author-first', 'author-second', 'first-author', 'second-author'] + authors_expected = ['alexis-metaireau', 'author-first', + 'author-second', 'first-author', 'second-author'] self.assertEqual(sorted(authors), sorted(authors_expected)) def test_standard_metadata_in_default_metadata(self): @@ -391,7 +415,6 @@ def test_article_order_by(self): settings = get_settings(filenames={}) settings['DEFAULT_CATEGORY'] = 'Default' settings['DEFAULT_DATE'] = (1970, 1, 1) - settings['CACHE_CONTENT'] = False # cache not needed for this logic tests settings['ARTICLE_ORDER_BY'] = 'title' generator = ArticlesGenerator( @@ -420,7 +443,8 @@ def test_article_order_by(self): 'This is a super article !', 'This is a super article !', 'This is an article with category !', - 'This is an article with multiple authors in lastname, firstname format!', + ('This is an article with multiple authors in lastname, ' + 'firstname format!'), 'This is an article with multiple authors in list format!', 'This is an article with multiple authors!', 'This is an article with multiple authors!', @@ -435,7 +459,6 @@ def test_article_order_by(self): settings = get_settings(filenames={}) settings['DEFAULT_CATEGORY'] = 'Default' settings['DEFAULT_DATE'] = (1970, 1, 1) - settings['CACHE_CONTENT'] = False # cache not needed for this logic tests settings['ARTICLE_ORDER_BY'] = 'reversed-title' generator = ArticlesGenerator( @@ -561,7 +584,7 @@ def test_tag_and_category_links_on_generated_pages(self): are generated correctly on pages """ settings = get_settings(filenames={}) - settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR + settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR settings['CACHE_PATH'] = self.temp_cache settings['DEFAULT_DATE'] = (1970, 1, 1) @@ -586,7 +609,6 @@ def setUp(self): self.old_locale = locale.setlocale(locale.LC_ALL) locale.setlocale(locale.LC_ALL, str('C')) - def tearDown(self): rmtree(self.temp_content) rmtree(self.temp_output) @@ -632,59 +654,67 @@ def setUp(self): def test_static_excludes(self): """Test that StaticGenerator respects STATIC_EXCLUDES. """ - settings = get_settings(STATIC_EXCLUDES=['subdir'], - PATH=self.content_path, STATIC_PATHS=['']) + settings = get_settings( + STATIC_EXCLUDES=['subdir'], + PATH=self.content_path, + STATIC_PATHS=[''], + filenames={}) context = settings.copy() - context['filenames'] = {} - StaticGenerator(context=context, settings=settings, + StaticGenerator( + context=context, settings=settings, path=settings['PATH'], output_path=None, theme=settings['THEME']).generate_context() staticnames = [os.path.basename(c.source_path) - for c in context['staticfiles']] + for c in context['staticfiles']] - self.assertNotIn('subdir_fake_image.jpg', staticnames, + self.assertNotIn( + 'subdir_fake_image.jpg', staticnames, "StaticGenerator processed a file in a STATIC_EXCLUDES directory") - self.assertIn('fake_image.jpg', staticnames, + self.assertIn( + 'fake_image.jpg', staticnames, "StaticGenerator skipped a file that it should have included") def test_static_exclude_sources(self): """Test that StaticGenerator respects STATIC_EXCLUDE_SOURCES. """ - # Test STATIC_EXCLUDE_SOURCES=True - settings = get_settings(STATIC_EXCLUDE_SOURCES=True, - PATH=self.content_path, PAGE_PATHS=[''], STATIC_PATHS=[''], - CACHE_CONTENT=False) + settings = get_settings( + STATIC_EXCLUDE_SOURCES=True, + PATH=self.content_path, + PAGE_PATHS=[''], + STATIC_PATHS=[''], + CACHE_CONTENT=False, + filenames={}) context = settings.copy() - context['filenames'] = {} for generator_class in (PagesGenerator, StaticGenerator): - generator_class(context=context, settings=settings, + generator_class( + context=context, settings=settings, path=settings['PATH'], output_path=None, theme=settings['THEME']).generate_context() staticnames = [os.path.basename(c.source_path) - for c in context['staticfiles']] + for c in context['staticfiles']] - self.assertFalse(any(name.endswith(".md") for name in staticnames), + self.assertFalse( + any(name.endswith(".md") for name in staticnames), "STATIC_EXCLUDE_SOURCES=True failed to exclude a markdown file") - # Test STATIC_EXCLUDE_SOURCES=False - settings.update(STATIC_EXCLUDE_SOURCES=False) context = settings.copy() context['filenames'] = {} for generator_class in (PagesGenerator, StaticGenerator): - generator_class(context=context, settings=settings, + generator_class( + context=context, settings=settings, path=settings['PATH'], output_path=None, theme=settings['THEME']).generate_context() staticnames = [os.path.basename(c.source_path) - for c in context['staticfiles']] + for c in context['staticfiles']] - self.assertTrue(any(name.endswith(".md") for name in staticnames), + self.assertTrue( + any(name.endswith(".md") for name in staticnames), "STATIC_EXCLUDE_SOURCES=False failed to include a markdown file") - diff --git a/pelican/tests/test_importer.py b/pelican/tests/test_importer.py --- a/pelican/tests/test_importer.py +++ b/pelican/tests/test_importer.py @@ -1,16 +1,19 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function +# -*- coding: utf-8 -*- +from __future__ import print_function, unicode_literals +import locale import os import re -import locale from codecs import open -from pelican.tools.pelican_import import wp2fields, fields2pelican, decode_wp_content, build_header, build_markdown_header, get_attachments, download_attachments -from pelican.tests.support import (unittest, temporary_folder, mute, - skipIfNoExecutable) -from pelican.utils import slugify, path_to_file_url +from pelican.tests.support import (mute, skipIfNoExecutable, temporary_folder, + unittest) +from pelican.tools.pelican_import import (build_header, build_markdown_header, + decode_wp_content, + download_attachments, fields2pelican, + get_attachments, wp2fields) +from pelican.utils import path_to_file_url, slugify CUR_DIR = os.path.abspath(os.path.dirname(__file__)) WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'wordpressexport.xml') @@ -32,7 +35,6 @@ LXML = False - @skipIfNoExecutable(['pandoc', '--version']) @unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module') class TestWordpressXmlImporter(unittest.TestCase): @@ -48,17 +50,19 @@ def tearDown(self): def test_ignore_empty_posts(self): self.assertTrue(self.posts) - for title, content, fname, date, author, categ, tags, status, kind, format in self.posts: - self.assertTrue(title.strip()) + for (title, content, fname, date, author, + categ, tags, status, kind, format) in self.posts: + self.assertTrue(title.strip()) def test_recognise_page_kind(self): """ Check that we recognise pages in wordpress, as opposed to posts """ self.assertTrue(self.posts) # Collect (title, filename, kind) of non-empty posts recognised as page pages_data = [] - for title, content, fname, date, author, categ, tags, status, kind, format in self.posts: - if kind == 'page': - pages_data.append((title, fname)) + for (title, content, fname, date, author, + categ, tags, status, kind, format) in self.posts: + if kind == 'page': + pages_data.append((title, fname)) self.assertEqual(2, len(pages_data)) self.assertEqual(('Page', 'contact'), pages_data[0]) self.assertEqual(('Empty Page', 'empty'), pages_data[1]) @@ -67,7 +71,8 @@ def test_dirpage_directive_for_page_kind(self): silent_f2p = mute(True)(fields2pelican) test_post = filter(lambda p: p[0].startswith("Empty Page"), self.posts) with temporary_folder() as temp: - fname = list(silent_f2p(test_post, 'markdown', temp, dirpage=True))[0] + fname = list(silent_f2p(test_post, 'markdown', + temp, dirpage=True))[0] self.assertTrue(fname.endswith('pages%sempty.md' % os.path.sep)) def test_dircat(self): @@ -75,10 +80,11 @@ def test_dircat(self): test_posts = [] for post in self.posts: # check post kind - if len(post[5]) > 0: # Has a category + if len(post[5]) > 0: # Has a category test_posts.append(post) with temporary_folder() as temp: - fnames = list(silent_f2p(test_posts, 'markdown', temp, dircat=True)) + fnames = list(silent_f2p(test_posts, 'markdown', + temp, dircat=True)) index = 0 for post in test_posts: name = post[2] @@ -92,25 +98,33 @@ def test_dircat(self): def test_unless_custom_post_all_items_should_be_pages_or_posts(self): self.assertTrue(self.posts) pages_data = [] - for title, content, fname, date, author, categ, tags, status, kind, format in self.posts: - if kind == 'page' or kind == 'article': - pass - else: - pages_data.append((title, fname)) + for (title, content, fname, date, author, categ, + tags, status, kind, format) in self.posts: + if kind == 'page' or kind == 'article': + pass + else: + pages_data.append((title, fname)) self.assertEqual(0, len(pages_data)) def test_recognise_custom_post_type(self): self.assertTrue(self.custposts) cust_data = [] - for title, content, fname, date, author, categ, tags, status, kind, format in self.custposts: - if kind == 'article' or kind == 'page': - pass - else: - cust_data.append((title, kind)) + for (title, content, fname, date, author, categ, + tags, status, kind, format) in self.custposts: + if kind == 'article' or kind == 'page': + pass + else: + cust_data.append((title, kind)) self.assertEqual(3, len(cust_data)) - self.assertEqual(('A custom post in category 4', 'custom1'), cust_data[0]) - self.assertEqual(('A custom post in category 5', 'custom1'), cust_data[1]) - self.assertEqual(('A 2nd custom post type also in category 5', 'custom2'), cust_data[2]) + self.assertEqual( + ('A custom post in category 4', 'custom1'), + cust_data[0]) + self.assertEqual( + ('A custom post in category 5', 'custom1'), + cust_data[1]) + self.assertEqual( + ('A 2nd custom post type also in category 5', 'custom2'), + cust_data[2]) def test_custom_posts_put_in_own_dir(self): silent_f2p = mute(True)(fields2pelican) @@ -122,7 +136,8 @@ def test_custom_posts_put_in_own_dir(self): else: test_posts.append(post) with temporary_folder() as temp: - fnames = list(silent_f2p(test_posts, 'markdown', temp, wp_custpost = True)) + fnames = list(silent_f2p(test_posts, 'markdown', + temp, wp_custpost=True)) index = 0 for post in test_posts: name = post[2] @@ -144,7 +159,7 @@ def test_custom_posts_put_in_own_dir_and_catagory_sub_dir(self): test_posts.append(post) with temporary_folder() as temp: fnames = list(silent_f2p(test_posts, 'markdown', temp, - wp_custpost=True, dircat=True)) + wp_custpost=True, dircat=True)) index = 0 for post in test_posts: name = post[2] @@ -157,7 +172,7 @@ def test_custom_posts_put_in_own_dir_and_catagory_sub_dir(self): index += 1 def test_wp_custpost_true_dirpage_false(self): - #pages should only be put in their own directory when dirpage = True + # pages should only be put in their own directory when dirpage = True silent_f2p = mute(True)(fields2pelican) test_posts = [] for post in self.custposts: @@ -166,7 +181,7 @@ def test_wp_custpost_true_dirpage_false(self): test_posts.append(post) with temporary_folder() as temp: fnames = list(silent_f2p(test_posts, 'markdown', temp, - wp_custpost=True, dirpage=False)) + wp_custpost=True, dirpage=False)) index = 0 for post in test_posts: name = post[2] @@ -175,7 +190,6 @@ def test_wp_custpost_true_dirpage_false(self): out_name = fnames[index] self.assertFalse(out_name.endswith(filename)) - def test_can_toggle_raw_html_code_parsing(self): def r(f): with open(f, encoding='utf-8') as infile: @@ -184,10 +198,12 @@ def r(f): with temporary_folder() as temp: - rst_files = (r(f) for f in silent_f2p(self.posts, 'markdown', temp)) + rst_files = (r(f) for f + in silent_f2p(self.posts, 'markdown', temp)) self.assertTrue(any('<iframe' in rst for rst in rst_files)) - rst_files = (r(f) for f in silent_f2p(self.posts, 'markdown', temp, - strip_raw=True)) + rst_files = (r(f) for f + in silent_f2p(self.posts, 'markdown', + temp, strip_raw=True)) self.assertFalse(any('<iframe' in rst for rst in rst_files)) # no effect in rst rst_files = (r(f) for f in silent_f2p(self.posts, 'rst', temp)) @@ -197,13 +213,14 @@ def r(f): self.assertFalse(any('<iframe' in rst for rst in rst_files)) def test_decode_html_entities_in_titles(self): - test_posts = [post for post in self.posts if post[2] == 'html-entity-test'] + test_posts = [post for post + in self.posts if post[2] == 'html-entity-test'] self.assertEqual(len(test_posts), 1) post = test_posts[0] title = post[0] - self.assertTrue(title, "A normal post with some <html> entities in the" - " title. You can't miss them.") + self.assertTrue(title, "A normal post with some <html> entities in " + "the title. You can't miss them.") self.assertNotIn('&', title) def test_decode_wp_content_returns_empty(self): @@ -216,14 +233,18 @@ def test_decode_wp_content(self): encoded_content = encoded_file.read() with open(WORDPRESS_DECODED_CONTENT_SAMPLE, 'r') as decoded_file: decoded_content = decoded_file.read() - self.assertEqual(decode_wp_content(encoded_content, br=False), decoded_content) + self.assertEqual( + decode_wp_content(encoded_content, br=False), + decoded_content) def test_preserve_verbatim_formatting(self): def r(f): with open(f, encoding='utf-8') as infile: return infile.read() silent_f2p = mute(True)(fields2pelican) - test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts) + test_post = filter( + lambda p: p[0].startswith("Code in List"), + self.posts) with temporary_folder() as temp: md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0] self.assertTrue(re.search(r'\s+a = \[1, 2, 3\]', md)) @@ -231,14 +252,17 @@ def r(f): for_line = re.search(r'\s+for i in zip\(a, b\):', md).group(0) print_line = re.search(r'\s+print i', md).group(0) - self.assertTrue(for_line.rindex('for') < print_line.rindex('print')) + self.assertTrue( + for_line.rindex('for') < print_line.rindex('print')) def test_code_in_list(self): def r(f): with open(f, encoding='utf-8') as infile: return infile.read() silent_f2p = mute(True)(fields2pelican) - test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts) + test_post = filter( + lambda p: p[0].startswith("Code in List"), + self.posts) with temporary_folder() as temp: md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0] sample_line = re.search(r'- This is a code sample', md).group(0) @@ -285,26 +309,29 @@ def test_build_header_with_fields(self): self.assertEqual(build_header(*header_data), expected_docutils) self.assertEqual(build_markdown_header(*header_data), expected_md) - def test_build_header_with_east_asian_characters(self): header = build_header('これは広い幅の文字だけで構成されたタイトルです', - None, None, None, None, None) + None, None, None, None, None) self.assertEqual(header, - 'これは広い幅の文字だけで構成されたタイトルです\n' + - '##############################################\n\n') + ('これは広い幅の文字だけで構成されたタイトルです\n' + '##############################################' + '\n\n')) def test_galleries_added_to_header(self): - header = build_header('test', None, None, None, None, - None, attachments=['output/test1', 'output/test2']) - self.assertEqual(header, 'test\n####\n' + ':attachments: output/test1, ' - + 'output/test2\n\n') + header = build_header('test', None, None, None, None, None, + attachments=['output/test1', 'output/test2']) + self.assertEqual(header, ('test\n####\n' + ':attachments: output/test1, ' + 'output/test2\n\n')) def test_galleries_added_to_markdown_header(self): header = build_markdown_header('test', None, None, None, None, None, - attachments=['output/test1', 'output/test2']) - self.assertEqual(header, 'Title: test\n' + 'Attachments: output/test1, ' - + 'output/test2\n\n') + attachments=['output/test1', + 'output/test2']) + self.assertEqual( + header, + 'Title: test\nAttachments: output/test1, output/test2\n\n') @unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module') @@ -326,14 +353,24 @@ def test_attachments_associated_with_correct_post(self): self.assertTrue(self.attachments) for post in self.attachments.keys(): if post is None: - self.assertTrue(self.attachments[post][0] == 'https://upload.wikimedia.org/wikipedia/commons/thumb/2/2c/Pelican_lakes_entrance02.jpg/240px-Pelican_lakes_entrance02.jpg') + expected = ('https://upload.wikimedia.org/wikipedia/commons/' + 'thumb/2/2c/Pelican_lakes_entrance02.jpg/' + '240px-Pelican_lakes_entrance02.jpg') + self.assertEqual(self.attachments[post][0], expected) elif post == 'with-excerpt': - self.assertTrue(self.attachments[post][0] == 'http://thisurlisinvalid.notarealdomain/not_an_image.jpg') - self.assertTrue(self.attachments[post][1] == 'http://en.wikipedia.org/wiki/File:Pelikan_Walvis_Bay.jpg') + expected_invalid = ('http://thisurlisinvalid.notarealdomain/' + 'not_an_image.jpg') + expected_pelikan = ('http://en.wikipedia.org/wiki/' + 'File:Pelikan_Walvis_Bay.jpg') + self.assertEqual(self.attachments[post][0], expected_invalid) + self.assertEqual(self.attachments[post][1], expected_pelikan) elif post == 'with-tags': - self.assertTrue(self.attachments[post][0] == 'http://thisurlisinvalid.notarealdomain') + expected_invalid = ('http://thisurlisinvalid.notarealdomain') + self.assertEqual(self.attachments[post][0], expected_invalid) else: - self.fail('all attachments should match to a filename or None, {}'.format(post)) + self.fail('all attachments should match to a ' + 'filename or None, {}' + .format(post)) def test_download_attachments(self): real_file = os.path.join(CUR_DIR, 'content/article.rst') @@ -344,4 +381,6 @@ def test_download_attachments(self): locations = list(silent_da(temp, [good_url, bad_url])) self.assertEqual(1, len(locations)) directory = locations[0] - self.assertTrue(directory.endswith(os.path.join('content', 'article.rst')), directory) + self.assertTrue( + directory.endswith(os.path.join('content', 'article.rst')), + directory) diff --git a/pelican/tests/test_paginator.py b/pelican/tests/test_paginator.py --- a/pelican/tests/test_paginator.py +++ b/pelican/tests/test_paginator.py @@ -1,18 +1,21 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, absolute_import +from __future__ import absolute_import, unicode_literals + import locale -from pelican.tests.support import unittest, get_settings +from jinja2.utils import generate_lorem_ipsum -from pelican.paginator import Paginator from pelican.contents import Article, Author +from pelican.paginator import Paginator from pelican.settings import DEFAULT_CONFIG -from jinja2.utils import generate_lorem_ipsum +from pelican.tests.support import get_settings, unittest + # generate one paragraph, enclosed with <p> TEST_CONTENT = str(generate_lorem_ipsum(n=1)) TEST_SUMMARY = generate_lorem_ipsum(n=1, html=False) + class TestPage(unittest.TestCase): def setUp(self): super(TestPage, self).setUp() @@ -49,7 +52,8 @@ def test_save_as_preservation(self): ) self.page_kwargs['metadata']['author'] = Author('Blogger', settings) - object_list = [Article(**self.page_kwargs), Article(**self.page_kwargs)] + object_list = [Article(**self.page_kwargs), + Article(**self.page_kwargs)] paginator = Paginator('foobar.foo', object_list, settings) page = paginator.page(1) self.assertEqual(page.save_as, 'foobar.foo') diff --git a/pelican/tests/test_pelican.py b/pelican/tests/test_pelican.py --- a/pelican/tests/test_pelican.py +++ b/pelican/tests/test_pelican.py @@ -1,23 +1,25 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function +from __future__ import print_function, unicode_literals import collections -import os -import sys -from tempfile import mkdtemp -from shutil import rmtree import locale import logging +import os import subprocess +import sys + +from shutil import rmtree +from tempfile import mkdtemp from pelican import Pelican from pelican.generators import StaticGenerator from pelican.settings import read_settings -from pelican.tests.support import LoggedTestCase, mute, locale_available, unittest +from pelican.tests.support import (LoggedTestCase, locale_available, + mute, unittest) CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) SAMPLES_PATH = os.path.abspath(os.path.join( - CURRENT_DIR, os.pardir, os.pardir, 'samples')) + CURRENT_DIR, os.pardir, os.pardir, 'samples')) OUTPUT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, 'output')) INPUT_PATH = os.path.join(SAMPLES_PATH, "content") @@ -27,13 +29,10 @@ def recursiveDiff(dcmp): diff = { - 'diff_files': [os.path.join(dcmp.right, f) - for f in dcmp.diff_files], - 'left_only': [os.path.join(dcmp.right, f) - for f in dcmp.left_only], - 'right_only': [os.path.join(dcmp.right, f) - for f in dcmp.right_only], - } + 'diff_files': [os.path.join(dcmp.right, f) for f in dcmp.diff_files], + 'left_only': [os.path.join(dcmp.right, f) for f in dcmp.left_only], + 'right_only': [os.path.join(dcmp.right, f) for f in dcmp.right_only], + } for sub_dcmp in dcmp.subdirs.values(): for k, v in recursiveDiff(sub_dcmp).items(): diff[k] += v @@ -60,9 +59,13 @@ def tearDown(self): def assertDirsEqual(self, left_path, right_path): out, err = subprocess.Popen( - ['git', 'diff', '--no-ext-diff', '--exit-code', '-w', left_path, right_path], - env={str('PAGER'): str('')}, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ['git', 'diff', '--no-ext-diff', '--exit-code', + '-w', left_path, right_path], + env={str('PAGER'): str('')}, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE ).communicate() + def ignorable_git_crlf_errors(line): # Work around for running tests on Windows for msg in [ @@ -85,9 +88,11 @@ def test_order_of_generators(self): pelican = Pelican(settings=read_settings(path=None)) generator_classes = pelican.get_generator_classes() - self.assertTrue(generator_classes[-1] is StaticGenerator, + self.assertTrue( + generator_classes[-1] is StaticGenerator, "StaticGenerator must be the last generator, but it isn't!") - self.assertIsInstance(generator_classes, collections.Sequence, + self.assertIsInstance( + generator_classes, collections.Sequence, "get_generator_classes() must return a Sequence to preserve order") def test_basic_generation_works(self): @@ -98,10 +103,11 @@ def test_basic_generation_works(self): 'OUTPUT_PATH': self.temp_path, 'CACHE_PATH': self.temp_cache, 'LOCALE': locale.normalize('en_US'), - }) + }) pelican = Pelican(settings=settings) mute(True)(pelican.run)() - self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'basic')) + self.assertDirsEqual( + self.temp_path, os.path.join(OUTPUT_PATH, 'basic')) self.assertLogCountEqual( count=3, msg="Unable to find.*skipping url replacement", @@ -114,10 +120,11 @@ def test_custom_generation_works(self): 'OUTPUT_PATH': self.temp_path, 'CACHE_PATH': self.temp_cache, 'LOCALE': locale.normalize('en_US'), - }) + }) pelican = Pelican(settings=settings) mute(True)(pelican.run)() - self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'custom')) + self.assertDirsEqual( + self.temp_path, os.path.join(OUTPUT_PATH, 'custom')) @unittest.skipUnless(locale_available('fr_FR.UTF-8') or locale_available('French'), 'French locale needed') @@ -133,10 +140,11 @@ def test_custom_locale_generation_works(self): 'OUTPUT_PATH': self.temp_path, 'CACHE_PATH': self.temp_cache, 'LOCALE': our_locale, - }) + }) pelican = Pelican(settings=settings) mute(True)(pelican.run)() - self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'custom_locale')) + self.assertDirsEqual( + self.temp_path, os.path.join(OUTPUT_PATH, 'custom_locale')) def test_theme_static_paths_copy(self): # the same thing with a specified set of settings should work @@ -146,8 +154,9 @@ def test_theme_static_paths_copy(self): 'CACHE_PATH': self.temp_cache, 'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, 'very'), os.path.join(SAMPLES_PATH, 'kinda'), - os.path.join(SAMPLES_PATH, 'theme_standard')] - }) + os.path.join(SAMPLES_PATH, + 'theme_standard')] + }) pelican = Pelican(settings=settings) mute(True)(pelican.run)() theme_output = os.path.join(self.temp_path, 'theme') @@ -165,8 +174,9 @@ def test_theme_static_paths_copy_single_file(self): 'PATH': INPUT_PATH, 'OUTPUT_PATH': self.temp_path, 'CACHE_PATH': self.temp_cache, - 'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, 'theme_standard')] - }) + 'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, + 'theme_standard')] + }) pelican = Pelican(settings=settings) mute(True)(pelican.run)() @@ -184,9 +194,9 @@ def test_write_only_selected(self): 'WRITE_SELECTED': [ os.path.join(self.temp_path, 'oh-yeah.html'), os.path.join(self.temp_path, 'categories.html'), - ], + ], 'LOCALE': locale.normalize('en_US'), - }) + }) pelican = Pelican(settings=settings) logger = logging.getLogger() orig_level = logger.getEffectiveLevel() diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -1,11 +1,12 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function +from __future__ import print_function, unicode_literals import os from pelican import readers +from pelican.tests.support import get_settings, unittest from pelican.utils import SafeDatetime -from pelican.tests.support import unittest, get_settings + CUR_DIR = os.path.dirname(__file__) CONTENT_PATH = os.path.join(CUR_DIR, 'content') @@ -29,22 +30,26 @@ def assertDictHasSubset(self, dictionary, subset): self.assertEqual( value, real_value, - 'Expected %s to have value %s, but was %s' % (key, value, real_value)) + 'Expected %s to have value %s, but was %s' % + (key, value, real_value)) else: self.fail( - 'Expected %s to have value %s, but was not in Dict' % (key, value)) + 'Expected %s to have value %s, but was not in Dict' % + (key, value)) + class TestAssertDictHasSubset(ReaderTest): def setUp(self): self.dictionary = { - 'key-a' : 'val-a', - 'key-b' : 'val-b'} + 'key-a': 'val-a', + 'key-b': 'val-b' + } def tearDown(self): self.dictionary = None def test_subset(self): - self.assertDictHasSubset(self.dictionary, {'key-a':'val-a'}) + self.assertDictHasSubset(self.dictionary, {'key-a': 'val-a'}) def test_equal(self): self.assertDictHasSubset(self.dictionary, self.dictionary) @@ -54,18 +59,17 @@ def test_fail_not_set(self): AssertionError, 'Expected.*key-c.*to have value.*val-c.*but was not in Dict', self.assertDictHasSubset, - self.dictionary, - {'key-c':'val-c'} - ) + self.dictionary, + {'key-c': 'val-c'}) def test_fail_wrong_val(self): self.assertRaisesRegexp( AssertionError, 'Expected .*key-a.* to have value .*val-b.* but was .*val-a.*', self.assertDictHasSubset, - self.dictionary, - {'key-a':'val-b'} - ) + self.dictionary, + {'key-a': 'val-b'}) + class DefaultReaderTest(ReaderTest): @@ -153,17 +157,17 @@ def test_article_extra_path_metadata(self): '(?P<date>\d{4}-\d{2}-\d{2})' '_(?P<Slug>.*)' '#(?P<MyMeta>.*)-(?P<author>.*)' - ), + ), EXTRA_PATH_METADATA={ input_with_metadata: { 'key-1a': 'value-1a', 'key-1b': 'value-1b' - } } - ) + } + ) expected_metadata = { 'category': 'yeah', - 'author' : 'Alexis Métaireau', + 'author': 'Alexis Métaireau', 'title': 'Rst with filename metadata', 'date': SafeDatetime(2012, 11, 29), 'slug': 'rst_w_filename_meta', @@ -179,38 +183,41 @@ def test_article_extra_path_metadata(self): path=input_file_path_without_metadata, EXTRA_PATH_METADATA={ input_file_path_without_metadata: { - 'author': 'Charlès Overwrite'} + 'author': 'Charlès Overwrite' } - ) + } + ) expected_without_metadata = { - 'category' : 'misc', - 'author' : 'Charlès Overwrite', - 'title' : 'Article title', - 'reader' : 'rst', + 'category': 'misc', + 'author': 'Charlès Overwrite', + 'title': 'Article title', + 'reader': 'rst', } self.assertDictHasSubset( page_without_metadata.metadata, expected_without_metadata) def test_article_extra_path_metadata_dont_overwrite(self): - #EXTRA_PATH_METADATA['author'] should get ignored - #since we don't overwrite already set values + # EXTRA_PATH_METADATA['author'] should get ignored + # since we don't overwrite already set values input_file_path = '2012-11-29_rst_w_filename_meta#foo-bar.rst' page = self.read_file( path=input_file_path, FILENAME_METADATA=( '(?P<date>\d{4}-\d{2}-\d{2})' '_(?P<Slug>.*)' - '#(?P<MyMeta>.*)-(?P<orginalauthor>.*)'), + '#(?P<MyMeta>.*)-(?P<orginalauthor>.*)' + ), EXTRA_PATH_METADATA={ input_file_path: { 'author': 'Charlès Overwrite', - 'key-1b': 'value-1b'} + 'key-1b': 'value-1b' } - ) + } + ) expected = { 'category': 'yeah', - 'author' : 'Alexis Métaireau', + 'author': 'Alexis Métaireau', 'title': 'Rst with filename metadata', 'date': SafeDatetime(2012, 11, 29), 'slug': 'rst_w_filename_meta', @@ -273,7 +280,7 @@ def test_typogrify_ignore_tags(self): # typogrify should be able to ignore user specified tags, # but tries to be clever with widont extension page = self.read_file(path='article.rst', TYPOGRIFY=True, - TYPOGRIFY_IGNORE_TAGS = ['p']) + TYPOGRIFY_IGNORE_TAGS=['p']) expected = ('<p>THIS is some content. With some stuff to&nbsp;' '&quot;typogrify&quot;...</p>\n<p>Now with added ' 'support for <abbr title="three letter acronym">' @@ -284,7 +291,7 @@ def test_typogrify_ignore_tags(self): # typogrify should ignore code blocks by default because # code blocks are composed inside the pre tag page = self.read_file(path='article_with_code_block.rst', - TYPOGRIFY=True) + TYPOGRIFY=True) expected = ('<p>An article with some&nbsp;code</p>\n' '<div class="highlight"><pre><span class="n">x</span>' @@ -292,13 +299,17 @@ def test_typogrify_ignore_tags(self): ' <span class="n">y</span>\n</pre></div>\n' '<p>A block&nbsp;quote:</p>\n<blockquote>\nx ' '<span class="amp">&amp;</span> y</blockquote>\n' - '<p>Normal:\nx <span class="amp">&amp;</span>&nbsp;y</p>\n') + '<p>Normal:\nx' + ' <span class="amp">&amp;</span>' + '&nbsp;y' + '</p>\n') self.assertEqual(page.content, expected) # instruct typogrify to also ignore blockquotes page = self.read_file(path='article_with_code_block.rst', - TYPOGRIFY=True, TYPOGRIFY_IGNORE_TAGS = ['blockquote']) + TYPOGRIFY=True, + TYPOGRIFY_IGNORE_TAGS=['blockquote']) expected = ('<p>An article with some&nbsp;code</p>\n' '<div class="highlight"><pre><span class="n">x</span>' @@ -306,7 +317,10 @@ def test_typogrify_ignore_tags(self): ' <span class="n">y</span>\n</pre></div>\n' '<p>A block&nbsp;quote:</p>\n<blockquote>\nx ' '&amp; y</blockquote>\n' - '<p>Normal:\nx <span class="amp">&amp;</span>&nbsp;y</p>\n') + '<p>Normal:\nx' + ' <span class="amp">&amp;</span>' + '&nbsp;y' + '</p>\n') self.assertEqual(page.content, expected) except ImportError: @@ -339,6 +353,7 @@ def test_article_with_multiple_authors_list(self): self.assertDictHasSubset(page.metadata, expected) + @unittest.skipUnless(readers.Markdown, "markdown isn't installed") class MdReaderTest(ReaderTest): @@ -400,7 +415,8 @@ def test_article_with_footnote(self): 'modified': SafeDatetime(2012, 11, 1), 'multiline': [ 'Line Metadata should be handle properly.', - 'See syntax of Meta-Data extension of Python Markdown package:', + 'See syntax of Meta-Data extension of ' + 'Python Markdown package:', 'If a line is indented by 4 or more spaces,', 'that line is assumed to be an additional line of the value', 'for the previous keyword.', diff --git a/pelican/tests/test_rstdirectives.py b/pelican/tests/test_rstdirectives.py --- a/pelican/tests/test_rstdirectives.py +++ b/pelican/tests/test_rstdirectives.py @@ -1,5 +1,8 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function +from __future__ import print_function, unicode_literals + +from pelican.tests.support import unittest + try: from unittest.mock import Mock except ImportError: @@ -7,7 +10,7 @@ from mock import Mock except ImportError: Mock = False -from pelican.tests.support import unittest + @unittest.skipUnless(Mock, 'Needs Mock module') class Test_abbr_role(unittest.TestCase): diff --git a/pelican/tests/test_settings.py b/pelican/tests/test_settings.py --- a/pelican/tests/test_settings.py +++ b/pelican/tests/test_settings.py @@ -1,13 +1,15 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function +from __future__ import print_function, unicode_literals + import copy -import os import locale +import os +from os.path import abspath, dirname, join from sys import platform -from os.path import dirname, abspath, join -from pelican.settings import (read_settings, configure_settings, - DEFAULT_CONFIG, DEFAULT_THEME) + +from pelican.settings import (DEFAULT_CONFIG, DEFAULT_THEME, + configure_settings, read_settings) from pelican.tests.support import unittest @@ -28,12 +30,14 @@ def tearDown(self): def test_overwrite_existing_settings(self): self.assertEqual(self.settings.get('SITENAME'), "Alexis' log") - self.assertEqual(self.settings.get('SITEURL'), - 'http://blog.notmyidea.org') + self.assertEqual( + self.settings.get('SITEURL'), + 'http://blog.notmyidea.org') def test_keep_default_settings(self): # Keep default settings if not defined. - self.assertEqual(self.settings.get('DEFAULT_CATEGORY'), + self.assertEqual( + self.settings.get('DEFAULT_CATEGORY'), DEFAULT_CONFIG['DEFAULT_CATEGORY']) def test_dont_copy_small_keys(self): @@ -69,28 +73,31 @@ def test_defaults_not_overwritten(self): def test_static_path_settings_safety(self): # Disallow static paths from being strings - settings = {'STATIC_PATHS': 'foo/bar', - 'THEME_STATIC_PATHS': 'bar/baz', - # These 4 settings are required to run configure_settings - 'PATH': '.', - 'THEME': DEFAULT_THEME, - 'SITEURL': 'http://blog.notmyidea.org/', - 'LOCALE': '', - } + settings = { + 'STATIC_PATHS': 'foo/bar', + 'THEME_STATIC_PATHS': 'bar/baz', + # These 4 settings are required to run configure_settings + 'PATH': '.', + 'THEME': DEFAULT_THEME, + 'SITEURL': 'http://blog.notmyidea.org/', + 'LOCALE': '', + } configure_settings(settings) - self.assertEqual(settings['STATIC_PATHS'], - DEFAULT_CONFIG['STATIC_PATHS']) - self.assertEqual(settings['THEME_STATIC_PATHS'], - DEFAULT_CONFIG['THEME_STATIC_PATHS']) + self.assertEqual( + settings['STATIC_PATHS'], + DEFAULT_CONFIG['STATIC_PATHS']) + self.assertEqual( + settings['THEME_STATIC_PATHS'], + DEFAULT_CONFIG['THEME_STATIC_PATHS']) def test_configure_settings(self): # Manipulations to settings should be applied correctly. settings = { - 'SITEURL': 'http://blog.notmyidea.org/', - 'LOCALE': '', - 'PATH': os.curdir, - 'THEME': DEFAULT_THEME, - } + 'SITEURL': 'http://blog.notmyidea.org/', + 'LOCALE': '', + 'PATH': os.curdir, + 'THEME': DEFAULT_THEME, + } configure_settings(settings) # SITEURL should not have a trailing slash @@ -154,7 +161,7 @@ def test_invalid_settings_throw_exception(self): settings['PATH'] = '' self.assertRaises(Exception, configure_settings, settings) - # Test nonexistent THEME + # Test nonexistent THEME settings['PATH'] = os.curdir settings['THEME'] = 'foo' diff --git a/pelican/tests/test_urlwrappers.py b/pelican/tests/test_urlwrappers.py --- a/pelican/tests/test_urlwrappers.py +++ b/pelican/tests/test_urlwrappers.py @@ -1,8 +1,9 @@ # -*- coding: utf-8 -*- from __future__ import unicode_literals -from pelican.urlwrappers import URLWrapper, Tag, Category from pelican.tests.support import unittest +from pelican.urlwrappers import Category, Tag, URLWrapper + class TestURLWrapper(unittest.TestCase): def test_ordering(self): diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py --- a/pelican/tests/test_utils.py +++ b/pelican/tests/test_utils.py @@ -1,20 +1,22 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals, print_function, absolute_import +from __future__ import absolute_import, print_function, unicode_literals + +import locale import logging -import shutil import os +import shutil import time -import locale from sys import platform from tempfile import mkdtemp import pytz +from pelican import utils from pelican.generators import TemplatePagesGenerator -from pelican.writers import Writer from pelican.settings import read_settings -from pelican import utils -from pelican.tests.support import get_article, LoggedTestCase, locale_available, unittest +from pelican.tests.support import (LoggedTestCase, get_article, + locale_available, unittest) +from pelican.writers import Writer class TestUtils(LoggedTestCase): @@ -72,7 +74,7 @@ def test_get_date(self): '2012-11-22T22:11:10Z': date_hour_sec_z, '2012-11-22T22:11:10-0500': date_hour_sec_est, '2012-11-22T22:11:10.123Z': date_hour_sec_frac_z, - } + } # examples from http://www.w3.org/TR/NOTE-datetime iso_8601_date = utils.SafeDatetime(year=1997, month=7, day=16) @@ -95,7 +97,6 @@ def test_get_date(self): # invalid ones invalid_dates = ['2010-110-12', 'yay'] - for value, expected in dates.items(): self.assertEqual(utils.get_date(value), expected, value) @@ -290,7 +291,9 @@ def test_strftime(self): self.assertEqual(utils.strftime(d, '%d/%m/%Y'), '29/08/2012') # RFC 3339 - self.assertEqual(utils.strftime(d, '%Y-%m-%dT%H:%M:%SZ'),'2012-08-29T00:00:00Z') + self.assertEqual( + utils.strftime(d, '%Y-%m-%dT%H:%M:%SZ'), + '2012-08-29T00:00:00Z') # % escaped self.assertEqual(utils.strftime(d, '%d%%%m%%%y'), '29%08%12') @@ -306,8 +309,9 @@ def test_strftime(self): 'Published in 29-08-2012') # with non-ascii text - self.assertEqual(utils.strftime(d, '%d/%m/%Y Øl trinken beim Besäufnis'), - '29/08/2012 Øl trinken beim Besäufnis') + self.assertEqual( + utils.strftime(d, '%d/%m/%Y Øl trinken beim Besäufnis'), + '29/08/2012 Øl trinken beim Besäufnis') # alternative formatting options self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '29/8/12') @@ -316,7 +320,6 @@ def test_strftime(self): d = utils.SafeDatetime(2012, 8, 9) self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '9/8/12') - # test the output of utils.strftime in a different locale # Turkish locale @unittest.skipUnless(locale_available('tr_TR.UTF-8') or @@ -339,17 +342,18 @@ def test_strftime_locale_dependent_turkish(self): 'Çarşamba, 29 Ağustos 2012') # with text - self.assertEqual(utils.strftime(d, 'Yayınlanma tarihi: %A, %d %B %Y'), + self.assertEqual( + utils.strftime(d, 'Yayınlanma tarihi: %A, %d %B %Y'), 'Yayınlanma tarihi: Çarşamba, 29 Ağustos 2012') # non-ascii format candidate (someone might pass it... for some reason) - self.assertEqual(utils.strftime(d, '%Y yılında %üretim artışı'), + self.assertEqual( + utils.strftime(d, '%Y yılında %üretim artışı'), '2012 yılında %üretim artışı') # restore locale back locale.setlocale(locale.LC_ALL, old_locale) - # test the output of utils.strftime in a different locale # French locale @unittest.skipUnless(locale_available('fr_FR.UTF-8') or @@ -373,21 +377,28 @@ def test_strftime_locale_dependent_french(self): self.assertTrue(utils.strftime(d, '%A') in ('mercredi', 'Mercredi')) # with text - self.assertEqual(utils.strftime(d, 'Écrit le %d %B %Y'), + self.assertEqual( + utils.strftime(d, 'Écrit le %d %B %Y'), 'Écrit le 29 août 2012') # non-ascii format candidate (someone might pass it... for some reason) - self.assertEqual(utils.strftime(d, '%écrits en %Y'), + self.assertEqual( + utils.strftime(d, '%écrits en %Y'), '%écrits en 2012') # restore locale back locale.setlocale(locale.LC_ALL, old_locale) - def test_maybe_pluralize(self): - self.assertEqual(utils.maybe_pluralize(0, 'Article', 'Articles'), '0 Articles') - self.assertEqual(utils.maybe_pluralize(1, 'Article', 'Articles'), '1 Article') - self.assertEqual(utils.maybe_pluralize(2, 'Article', 'Articles'), '2 Articles') + self.assertEqual( + utils.maybe_pluralize(0, 'Article', 'Articles'), + '0 Articles') + self.assertEqual( + utils.maybe_pluralize(1, 'Article', 'Articles'), + '1 Article') + self.assertEqual( + utils.maybe_pluralize(2, 'Article', 'Articles'), + '2 Articles') class TestCopy(unittest.TestCase): @@ -435,8 +446,9 @@ def test_copy_file_different_path(self): def test_copy_file_create_dirs(self): self._create_file('a.txt') - utils.copy(os.path.join(self.root_dir, 'a.txt'), - os.path.join(self.root_dir, 'b0', 'b1', 'b2', 'b3', 'b.txt')) + utils.copy( + os.path.join(self.root_dir, 'a.txt'), + os.path.join(self.root_dir, 'b0', 'b1', 'b2', 'b3', 'b.txt')) self._exist_dir('b0') self._exist_dir('b0', 'b1') self._exist_dir('b0', 'b1', 'b2') @@ -491,35 +503,39 @@ def setUp(self): template_file.write('date = {{ date|strftime("%A, %d %B %Y") }}') self.date = utils.SafeDatetime(2012, 8, 29) - def tearDown(self): shutil.rmtree(self.temp_content) shutil.rmtree(self.temp_output) # reset locale to default locale.setlocale(locale.LC_ALL, '') - @unittest.skipUnless(locale_available('fr_FR.UTF-8') or locale_available('French'), 'French locale needed') def test_french_strftime(self): - # This test tries to reproduce an issue that occurred with python3.3 under macos10 only + # This test tries to reproduce an issue that + # occurred with python3.3 under macos10 only if platform == 'win32': locale.setlocale(locale.LC_ALL, str('French')) else: locale.setlocale(locale.LC_ALL, str('fr_FR.UTF-8')) - date = utils.SafeDatetime(2014,8,14) - # we compare the lower() dates since macos10 returns "Jeudi" for %A whereas linux reports "jeudi" - self.assertEqual( u'jeudi, 14 août 2014', utils.strftime(date, date_format="%A, %d %B %Y").lower() ) + date = utils.SafeDatetime(2014, 8, 14) + # we compare the lower() dates since macos10 returns + # "Jeudi" for %A whereas linux reports "jeudi" + self.assertEqual( + u'jeudi, 14 août 2014', + utils.strftime(date, date_format="%A, %d %B %Y").lower()) df = utils.DateFormatter() - self.assertEqual( u'jeudi, 14 août 2014', df(date, date_format="%A, %d %B %Y").lower() ) + self.assertEqual( + u'jeudi, 14 août 2014', + df(date, date_format="%A, %d %B %Y").lower()) # Let us now set the global locale to C: locale.setlocale(locale.LC_ALL, str('C')) - # DateFormatter should still work as expected since it is the whole point of DateFormatter + # DateFormatter should still work as expected + # since it is the whole point of DateFormatter # (This is where pre-2014/4/15 code fails on macos10) df_date = df(date, date_format="%A, %d %B %Y").lower() - self.assertEqual( u'jeudi, 14 août 2014', df_date ) - + self.assertEqual(u'jeudi, 14 août 2014', df_date) @unittest.skipUnless(locale_available('fr_FR.UTF-8') or locale_available('French'), @@ -530,9 +546,12 @@ def test_french_locale(self): else: locale_string = 'fr_FR.UTF-8' settings = read_settings( - override = {'LOCALE': locale_string, - 'TEMPLATE_PAGES': {'template/source.html': - 'generated/file.html'}}) + override={ + 'LOCALE': locale_string, + 'TEMPLATE_PAGES': { + 'template/source.html': 'generated/file.html' + } + }) generator = TemplatePagesGenerator( {'date': self.date}, settings, @@ -543,7 +562,7 @@ def test_french_locale(self): generator.generate_output(writer) output_path = os.path.join( - self.temp_output, 'generated', 'file.html') + self.temp_output, 'generated', 'file.html') # output file has been generated self.assertTrue(os.path.exists(output_path)) @@ -553,7 +572,6 @@ def test_french_locale(self): self.assertEqual(output_file, utils.strftime(self.date, 'date = %A, %d %B %Y')) - @unittest.skipUnless(locale_available('tr_TR.UTF-8') or locale_available('Turkish'), 'Turkish locale needed') @@ -563,9 +581,12 @@ def test_turkish_locale(self): else: locale_string = 'tr_TR.UTF-8' settings = read_settings( - override = {'LOCALE': locale_string, - 'TEMPLATE_PAGES': {'template/source.html': - 'generated/file.html'}}) + override={ + 'LOCALE': locale_string, + 'TEMPLATE_PAGES': { + 'template/source.html': 'generated/file.html' + } + }) generator = TemplatePagesGenerator( {'date': self.date}, settings, @@ -576,7 +597,7 @@ def test_turkish_locale(self): generator.generate_output(writer) output_path = os.path.join( - self.temp_output, 'generated', 'file.html') + self.temp_output, 'generated', 'file.html') # output file has been generated self.assertTrue(os.path.exists(output_path))
make source code pretty - docutils strings are `'''` and `"""` in some places. - lots of pep8 violations - add a tox environment that runs flake8 checks ``` [testenv:flake8] basepython = python2.7 deps = flake8 commands = flake8 pelican ```
2015-06-09T19:59:21Z
[]
[]
getpelican/pelican
1,756
getpelican__pelican-1756
[ "1647" ]
33d91579293507f9560f292a87ea998eb381e085
diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -561,8 +561,7 @@ def generate_context(self): self.tags[tag].append(article) for author in getattr(article, 'authors', []): self.authors[author].append(article) - # sort the articles by date - self.articles.sort(key=attrgetter('date'), reverse=True) + self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), reverse=self.context['NEWEST_FIRST_ARCHIVES']) diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -65,7 +65,7 @@ 'OUTPUT_RETENTION': [], 'ARTICLE_URL': '{slug}.html', 'ARTICLE_SAVE_AS': '{slug}.html', - 'ARTICLE_ORDER_BY': 'slug', + 'ARTICLE_ORDER_BY': 'reversed-date', 'ARTICLE_LANG_URL': '{slug}-{lang}.html', 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html', 'DRAFT_URL': 'drafts/{slug}.html', diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -541,16 +541,28 @@ def process_translations(content_list, order_by=None): try: index.sort(key=order_by) except Exception: - logger.error('Error sorting with function {}'.format(order_by)) - elif order_by == 'basename': - index.sort(key=lambda x: os.path.basename(x.source_path or '')) - elif order_by != 'slug': - try: - index.sort(key=attrgetter(order_by)) - except AttributeError: - error_msg = ('There is no "{}" attribute in the item metadata.' - 'Defaulting to slug order.') - logger.warning(error_msg.format(order_by)) + logger.error('Error sorting with function %s', order_by) + elif isinstance(order_by, six.string_types): + if order_by.startswith('reversed-'): + order_reversed = True + order_by = order_by.replace('reversed-', '', 1) + else: + order_reversed = False + + if order_by == 'basename': + index.sort(key=lambda x: os.path.basename(x.source_path or ''), + reverse=order_reversed) + # already sorted by slug, no need to sort again + elif not (order_by == 'slug' and not order_reversed): + try: + index.sort(key=attrgetter(order_by), + reverse=order_reversed) + except AttributeError: + logger.warning('There is no "%s" attribute in the item ' + 'metadata. Defaulting to slug order.', order_by) + else: + logger.warning('Invalid *_ORDER_BY setting (%s).' + 'Valid options are strings and functions.', order_by) return index, translations
diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py --- a/pelican/tests/test_generators.py +++ b/pelican/tests/test_generators.py @@ -387,6 +387,65 @@ def test_standard_metadata_in_default_metadata(self): 'パイソン', 'マック']) self.assertEqual(tags, tags_expected) + def test_article_order_by(self): + settings = get_settings(filenames={}) + settings['DEFAULT_CATEGORY'] = 'Default' + settings['DEFAULT_DATE'] = (1970, 1, 1) + settings['CACHE_CONTENT'] = False # cache not needed for this logic tests + settings['ARTICLE_ORDER_BY'] = 'title' + + generator = ArticlesGenerator( + context=settings.copy(), settings=settings, + path=CONTENT_DIR, theme=settings['THEME'], output_path=None) + generator.generate_context() + + expected = [ + 'An Article With Code Block To Test Typogrify Ignore', + 'Article title', + 'Article with Nonconformant HTML meta tags', + 'Article with markdown and summary metadata multi', + 'Article with markdown and summary metadata single', + 'Article with markdown containing footnotes', + 'Article with template', + 'Rst with filename metadata', + 'Test Markdown extensions', + 'Test markdown File', + 'Test md File', + 'Test mdown File', + 'Test mkd File', + 'This is a super article !', + 'This is a super article !', + 'This is a super article !', + 'This is a super article !', + 'This is a super article !', + 'This is a super article !', + 'This is an article with category !', + 'This is an article with multiple authors in lastname, firstname format!', + 'This is an article with multiple authors in list format!', + 'This is an article with multiple authors!', + 'This is an article with multiple authors!', + 'This is an article without category !', + 'This is an article without category !', + 'マックOS X 10.8でパイソンとVirtualenvをインストールと設定'] + + articles = [article.title for article in generator.articles] + self.assertEqual(articles, expected) + + # reversed title + settings = get_settings(filenames={}) + settings['DEFAULT_CATEGORY'] = 'Default' + settings['DEFAULT_DATE'] = (1970, 1, 1) + settings['CACHE_CONTENT'] = False # cache not needed for this logic tests + settings['ARTICLE_ORDER_BY'] = 'reversed-title' + + generator = ArticlesGenerator( + context=settings.copy(), settings=settings, + path=CONTENT_DIR, theme=settings['THEME'], output_path=None) + generator.generate_context() + + articles = [article.title for article in generator.articles] + self.assertEqual(articles, list(reversed(expected))) + class TestPageGenerator(unittest.TestCase): # Note: Every time you want to test for a new field; Make sure the test @@ -473,6 +532,23 @@ def test_generate_sorted(self): pages = self.distill_pages(generator.pages) self.assertEqual(pages_expected_sorted_by_title, pages) + # sort by title reversed + pages_expected_sorted_by_title = [ + ['This is a test page with a preset template', 'published', + 'custom'], + ['This is a test page', 'published', 'page'], + ['This is a markdown test page', 'published', 'page'], + ['Page with a bunch of links', 'published', 'page'], + ['A Page (Test) for sorting', 'published', 'page'], + ] + settings['PAGE_ORDER_BY'] = 'reversed-title' + generator = PagesGenerator( + context=settings.copy(), settings=settings, + path=CUR_DIR, theme=settings['THEME'], output_path=None) + generator.generate_context() + pages = self.distill_pages(generator.pages) + self.assertEqual(pages_expected_sorted_by_title, pages) + def test_tag_and_category_links_on_generated_pages(self): """ Test to ensure links of the form {tag}tagname and {category}catname
Metadata field not recognized I can't seem to get ARTICLE_ORDER_BY working. I see this message in the log irrespective of the custom metadata field I try to sort by: WARNING: There is no "date_parsed" attribute in the item metadata.Defaulting to slug order. I know the metadata is present because I can use it in Jinja templates like so: {{ article.date_parsed }} I'm using Pelican 3.5.0 and generating RST files that contain the metadata. Am I missing something obvious?
I managed to get the metadata warning to go away, but still can't seem to get ARTICLE_ORDER_BY working. Just seems to ignore it. Any ideas to help figure out why would be greatly appreciated! For what it's worth, the relevant code is in `utils.process_translations`, lines 507-521: https://github.com/getpelican/pelican/blob/30b8a2069ee610b581c4120c40ebdbb6ec93a09d/pelican/utils.py#L507-521 Hmm… same problem here `ARTICLE_ORDER_BY = 'modified'` there is no error nor warning but index is as before. This is because `ArticlesGenerator` [sorts articles](https://github.com/getpelican/pelican/blob/master/pelican/generators.py#L552) after `process_translations` is performed. I'm guessing, when this feature was added (#1348) main concern was `PAGE_ORDER_BY`. `ARTICLE_ORDER_BY` was an afterthought and wasn't tested properly. As a reviewer of that PR, I should share the blame :). Basically, the sorting done afterwards should be moved into `process_translations` as a default for `ARTICLE_ORDER_BY`. That means, `*_ORDER_BY` has to be able to manage reversing and the way to do that right now is a custom function. I'll try to come up with a reasonable solution. Preferably something that'll handle reversing the order easier than a custom function. Maybe something like `'reversed-date'` vs `'date'`. Alternatively, the `*_ORDER_BY` settings can be changed to a `tuple`/`list` in the form of `(key, reversed)` pairs (e.g. `('date', True)`) Just use `*_ORDER_DIRECTION`, pretty standard stuff... we need this but going to have to hack it I guess. I have `reversed-date`/`date` pretty much implemented. I just need to add tests before submitting a PR. It should be ready in a day or two if my time permits. I don't want to add additional settings. For one, settings is pretty overcrowded as is. And secondly, `ARTICLE_ORDER_DIRECTION` (or `ARTICLE_ORDER_REVERSED` as I'd prefer) would be `True` by default and may trip up some people that don't pay attention.
2015-06-12T21:22:41Z
[]
[]
getpelican/pelican
1,764
getpelican__pelican-1764
[ "1758" ]
3f69a1fb62218975116e3fdd6d3ec6b1c1e33ae5
diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -25,6 +25,7 @@ logger = logging.getLogger(__name__) +@python_2_unicode_compatible class Content(object): """Represents a content. @@ -148,12 +149,7 @@ def __init__(self, content, metadata=None, settings=None, signals.content_object_init.send(self) def __str__(self): - if self.source_path is None: - return repr(self) - elif six.PY3: - return self.source_path or repr(self) - else: - return str(self.source_path.encode('utf-8', 'replace')) + return self.source_path or repr(self) def check_properties(self): """Test mandatory properties are set.""" diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py --- a/pelican/urlwrappers.py +++ b/pelican/urlwrappers.py @@ -1,7 +1,9 @@ -import os +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + import functools import logging - +import os import six from pelican.utils import (slugify, python_2_unicode_compatible) @@ -52,27 +54,36 @@ def as_dict(self): def __hash__(self): return hash(self.slug) - def _key(self): - return self.slug - def _normalize_key(self, key): subs = self.settings.get('SLUG_SUBSTITUTIONS', ()) return six.text_type(slugify(key, subs)) def __eq__(self, other): - return self._key() == self._normalize_key(other) + if isinstance(other, self.__class__): + return self.slug == other.slug + if isinstance(other, six.text_type): + return self.slug == self._normalize_key(other) + return False def __ne__(self, other): - return self._key() != self._normalize_key(other) + if isinstance(other, self.__class__): + return self.slug != other.slug + if isinstance(other, six.text_type): + return self.slug != self._normalize_key(other) + return True def __lt__(self, other): - return self._key() < self._normalize_key(other) + if isinstance(other, self.__class__): + return self.slug < other.slug + if isinstance(other, six.text_type): + return self.slug < self._normalize_key(other) + return False def __str__(self): return self.name def __repr__(self): - return '<{} {}>'.format(type(self).__name__, str(self)) + return '<{} {}>'.format(type(self).__name__, repr(self._name)) def _from_settings(self, key, get_page_name=False): """Returns URL information as defined in settings.
diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -578,30 +578,3 @@ def test_category_link_syntax(self): content = page.get_content('') self.assertNotEqual(content, html) - - -class TestURLWrapper(unittest.TestCase): - def test_comparisons(self): - # URLWrappers are sorted by name - wrapper_a = URLWrapper(name='first', settings={}) - wrapper_b = URLWrapper(name='last', settings={}) - self.assertFalse(wrapper_a > wrapper_b) - self.assertFalse(wrapper_a >= wrapper_b) - self.assertFalse(wrapper_a == wrapper_b) - self.assertTrue(wrapper_a != wrapper_b) - self.assertTrue(wrapper_a <= wrapper_b) - self.assertTrue(wrapper_a < wrapper_b) - wrapper_b.name = 'first' - self.assertFalse(wrapper_a > wrapper_b) - self.assertTrue(wrapper_a >= wrapper_b) - self.assertTrue(wrapper_a == wrapper_b) - self.assertFalse(wrapper_a != wrapper_b) - self.assertTrue(wrapper_a <= wrapper_b) - self.assertFalse(wrapper_a < wrapper_b) - wrapper_a.name = 'last' - self.assertTrue(wrapper_a > wrapper_b) - self.assertTrue(wrapper_a >= wrapper_b) - self.assertFalse(wrapper_a == wrapper_b) - self.assertTrue(wrapper_a != wrapper_b) - self.assertFalse(wrapper_a <= wrapper_b) - self.assertFalse(wrapper_a < wrapper_b) diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -29,12 +29,10 @@ def assertDictHasSubset(self, dictionary, subset): self.assertEqual( value, real_value, - str('Expected %r to have value %r, but was %r') - % (key, value, real_value)) + 'Expected %s to have value %s, but was %s' % (key, value, real_value)) else: self.fail( - str('Expected %r to have value %r, but was not in Dict') - % (key, value)) + 'Expected %s to have value %s, but was not in Dict' % (key, value)) class TestAssertDictHasSubset(ReaderTest): def setUp(self): @@ -566,9 +564,12 @@ def test_article_with_null_attributes(self): def test_article_metadata_key_lowercase(self): # Keys of metadata should be lowercase. page = self.read_file(path='article_with_uppercase_metadata.html') + + # Key should be lowercase self.assertIn('category', page.metadata, 'Key should be lowercase.') - self.assertEqual('Yeah', page.metadata.get('category'), - 'Value keeps cases.') + + # Value should keep cases + self.assertEqual('Yeah', page.metadata.get('category')) def test_article_with_nonconformant_meta_tags(self): page = self.read_file(path='article_with_nonconformant_meta_tags.html') diff --git a/pelican/tests/test_urlwrappers.py b/pelican/tests/test_urlwrappers.py new file mode 100644 --- /dev/null +++ b/pelican/tests/test_urlwrappers.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from pelican.urlwrappers import URLWrapper, Tag, Category +from pelican.tests.support import unittest + +class TestURLWrapper(unittest.TestCase): + def test_ordering(self): + # URLWrappers are sorted by name + wrapper_a = URLWrapper(name='first', settings={}) + wrapper_b = URLWrapper(name='last', settings={}) + self.assertFalse(wrapper_a > wrapper_b) + self.assertFalse(wrapper_a >= wrapper_b) + self.assertFalse(wrapper_a == wrapper_b) + self.assertTrue(wrapper_a != wrapper_b) + self.assertTrue(wrapper_a <= wrapper_b) + self.assertTrue(wrapper_a < wrapper_b) + wrapper_b.name = 'first' + self.assertFalse(wrapper_a > wrapper_b) + self.assertTrue(wrapper_a >= wrapper_b) + self.assertTrue(wrapper_a == wrapper_b) + self.assertFalse(wrapper_a != wrapper_b) + self.assertTrue(wrapper_a <= wrapper_b) + self.assertFalse(wrapper_a < wrapper_b) + wrapper_a.name = 'last' + self.assertTrue(wrapper_a > wrapper_b) + self.assertTrue(wrapper_a >= wrapper_b) + self.assertFalse(wrapper_a == wrapper_b) + self.assertTrue(wrapper_a != wrapper_b) + self.assertFalse(wrapper_a <= wrapper_b) + self.assertFalse(wrapper_a < wrapper_b) + + def test_equality(self): + tag = Tag('test', settings={}) + cat = Category('test', settings={}) + + # same name, but different class + self.assertNotEqual(tag, cat) + + # should be equal vs text representing the same name + self.assertEqual(tag, u'test') + + # should not be equal vs binary + self.assertNotEqual(tag, b'test') + + # Tags describing the same should be equal + tag_equal = Tag('Test', settings={}) + self.assertEqual(tag, tag_equal) + + cat_ascii = Category('指導書', settings={}) + self.assertEqual(cat_ascii, u'zhi-dao-shu')
improve URLWrappers comparision current URLWrappers slugify a lot, resulting in a lot of processing time lost ( #1493 ). we need to check if we can avoid the always occurring slugify call (which is wrong anyway) and speed up comparisions to other URLWrappers via: source: @avaris ``` try: return other.slug except AttributeError: return slugify(blabla) ``` and maybe to strings as well as those comparisons are often done in themes `{% if cat == "bla" %}`
2015-06-19T09:38:51Z
[]
[]
getpelican/pelican
1,795
getpelican__pelican-1795
[ "1794" ]
ed71ad0d3a54d825e73778b6964a2f47785d3740
diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -248,6 +248,11 @@ def replacer(m): origin = '/'.join((siteurl, Category(path, self.settings).url)) elif what == 'tag': origin = '/'.join((siteurl, Tag(path, self.settings).url)) + else: + logger.warning( + "Replacement Indicator '%s' not recognized, " + "skipping replacement", + what) # keep all other parts, such as query, fragment, etc. parts = list(value)
diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals, absolute_import +import logging import locale import os.path import six @@ -11,7 +12,7 @@ Author, Category) from pelican.settings import DEFAULT_CONFIG from pelican.signals import content_object_init -from pelican.tests.support import unittest, get_settings +from pelican.tests.support import LoggedTestCase, mute, unittest, get_settings from pelican.utils import (path_to_url, truncate_html_words, SafeDatetime, posix_join) @@ -413,10 +414,10 @@ def test_slugify_category_author(self): self.assertEqual(article.save_as, 'obrien/csharp-stuff/fnord/index.html') -class TestStatic(unittest.TestCase): +class TestStatic(LoggedTestCase): def setUp(self): - + super(TestStatic, self).setUp() self.settings = get_settings( STATIC_SAVE_AS='{path}', STATIC_URL='{path}', @@ -578,3 +579,20 @@ def test_category_link_syntax(self): content = page.get_content('') self.assertNotEqual(content, html) + + def test_unknown_link_syntax(self): + "{unknown} link syntax should trigger warning." + + html = '<a href="{unknown}foo">link</a>' + page = Page(content=html, + metadata={'title': 'fakepage'}, settings=self.settings, + source_path=os.path.join('dir', 'otherdir', 'fakepage.md'), + context=self.context) + content = page.get_content('') + + self.assertEqual(content, html) + self.assertLogCountEqual( + count=1, + msg="Replacement Indicator 'unknown' not recognized, " + "skipping replacement", + level=logging.WARNING)
output warning during unknown content replacement https://github.com/getpelican/pelican/blob/master/pelican/contents.py#L215 output a warning if what is invalid and no substitutions occur because of it
2015-08-08T12:43:33Z
[]
[]
getpelican/pelican
1,837
getpelican__pelican-1837
[ "1260", "1260" ]
661ee49edae0bfbb0641f4f9bc223e6f6feb50f2
diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -18,18 +18,14 @@ from pelican import signals from pelican.cache import FileStampDataCacher from pelican.contents import Author, Category, Page, Tag -from pelican.utils import SafeDatetime, get_date, pelican_open, posixize_path +from pelican.utils import SafeDatetime, escape_html, get_date, pelican_open, \ + posixize_path try: from markdown import Markdown except ImportError: Markdown = False # NOQA -try: - from html import escape -except ImportError: - from cgi import escape - # Metadata processors have no way to discard an unwanted value, so we have # them return this value instead to signal that it should be discarded later. # This means that _filter_discardable_metadata() must be called on processed @@ -354,7 +350,7 @@ def handle_endtag(self, tag): self._in_body = False self._in_top_level = True elif self._in_body: - self._data_buffer += '</{}>'.format(escape(tag)) + self._data_buffer += '</{}>'.format(escape_html(tag)) def handle_startendtag(self, tag, attrs): if tag == 'meta' and self._in_head: @@ -375,11 +371,16 @@ def handle_charref(self, data): self._data_buffer += '&#{};'.format(data) def build_tag(self, tag, attrs, close_tag): - result = '<{}'.format(escape(tag)) + result = '<{}'.format(escape_html(tag)) for k, v in attrs: - result += ' ' + escape(k) + result += ' ' + escape_html(k) if v is not None: - result += '="{}"'.format(escape(v)) + # If the attribute value contains a double quote, surround + # with single quotes, otherwise use double quotes. + if '"' in v: + result += "='{}'".format(escape_html(v, quote=False)) + else: + result += '="{}"'.format(escape_html(v, quote=False)) if close_tag: return result + ' />' return result + '>' diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -28,6 +28,11 @@ from six.moves import html_entities from six.moves.html_parser import HTMLParser +try: + from html import escape +except ImportError: + from cgi import escape + logger = logging.getLogger(__name__) @@ -548,6 +553,14 @@ def truncate_html_words(s, num, end_text='...'): return out +def escape_html(text, quote=True): + """Escape '&', '<' and '>' to HTML-safe sequences. + + In Python 2 this uses cgi.escape and in Python 3 this uses html.escape. We + wrap here to ensure the quote argument has an identical default.""" + return escape(text, quote=quote) + + def process_translations(content_list, order_by=None): """ Finds translation and returns them.
diff --git a/pelican/tests/content/article_with_attributes_containing_double_quotes.html b/pelican/tests/content/article_with_attributes_containing_double_quotes.html new file mode 100644 --- /dev/null +++ b/pelican/tests/content/article_with_attributes_containing_double_quotes.html @@ -0,0 +1,11 @@ +<html> + <head> + </head> + <body> + Ensure that if an attribute value contains a double quote, it is + surrounded with single quotes, otherwise with double quotes. + <span data-test="'single quoted string'">Span content</span> + <span data-test='"double quoted string"'>Span content</span> + <span data-test="string without quotes">Span content</span> + </body> +</html> diff --git a/pelican/tests/test_cache.py b/pelican/tests/test_cache.py --- a/pelican/tests/test_cache.py +++ b/pelican/tests/test_cache.py @@ -61,7 +61,7 @@ def test_article_object_caching(self): - article_with_null_attributes.html - 2012-11-30_md_w_filename_meta#foo-bar.md """ - self.assertEqual(generator.readers.read_file.call_count, 3) + self.assertEqual(generator.readers.read_file.call_count, 4) @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_article_reader_content_caching(self): diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -587,6 +587,17 @@ def test_article_with_null_attributes(self): <input name="test" disabled style="" /> ''', page.content) + def test_article_with_attributes_containing_double_quotes(self): + page = self.read_file(path='article_with_attributes_containing_' + + 'double_quotes.html') + self.assertEqual(''' + Ensure that if an attribute value contains a double quote, it is + surrounded with single quotes, otherwise with double quotes. + <span data-test="'single quoted string'">Span content</span> + <span data-test='"double quoted string"'>Span content</span> + <span data-test="string without quotes">Span content</span> + ''', page.content) + def test_article_metadata_key_lowercase(self): # Keys of metadata should be lowercase. page = self.read_file(path='article_with_uppercase_metadata.html')
Quote escapes improperly handled when generating HTML articles Say if I want to include JSON dataset in an HTML element say ``` html <section id="my-section" data-my-data='{"myJSON": "object"}'> ... <\section> ``` the output will be ``` html <section id="my-section" data-my-data="{"myJSON": "object"}"> ... <\section> ``` which is terrible. Note the significant change from single to double quotes. Under most circumstances this isn't a problem, since reversing the quotes (double quotes surrounding single quotes) will retain the orders, but that is not a valid JSON-object. It keys and strings _have_ to be surrounded by double quotes. I tried looking through the code base to fix it my self. But I have no idea where the transition happens. Best regards, Quote escapes improperly handled when generating HTML articles Say if I want to include JSON dataset in an HTML element say ``` html <section id="my-section" data-my-data='{"myJSON": "object"}'> ... <\section> ``` the output will be ``` html <section id="my-section" data-my-data="{"myJSON": "object"}"> ... <\section> ``` which is terrible. Note the significant change from single to double quotes. Under most circumstances this isn't a problem, since reversing the quotes (double quotes surrounding single quotes) will retain the orders, but that is not a valid JSON-object. It keys and strings _have_ to be surrounded by double quotes. I tried looking through the code base to fix it my self. But I have no idea where the transition happens. Best regards,
2015-10-04T18:35:02Z
[]
[]
getpelican/pelican
1,850
getpelican__pelican-1850
[ "1847" ]
e16ca36fe0ff8873eb7582f6c9fdaf8b9a8a8bee
diff --git a/pelican/log.py b/pelican/log.py --- a/pelican/log.py +++ b/pelican/log.py @@ -172,21 +172,49 @@ def __init__(self, *args, **kwargs): logging.setLoggerClass(LimitLogger) -def init(level=None, handler=logging.StreamHandler()): +def supports_color(): + """ + Returns True if the running system's terminal supports color, + and False otherwise. + + from django.core.management.color + """ + plat = sys.platform + supported_platform = plat != 'Pocket PC' and \ + (plat != 'win32' or 'ANSICON' in os.environ) - logger = logging.getLogger() + # isatty is not always implemented, #6223. + is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() + if not supported_platform or not is_a_tty: + return False + return True - if os.isatty(sys.stdout.fileno()) and not sys.platform.startswith('win'): - fmt = ANSIFormatter() + +def get_formatter(): + if supports_color(): + return ANSIFormatter() else: - fmt = TextFormatter() - handler.setFormatter(fmt) + return TextFormatter() + + +def init(level=None, handler=logging.StreamHandler(), name=None): + + logger = logging.getLogger(name) + + handler.setFormatter(get_formatter()) logger.addHandler(handler) if level: logger.setLevel(level) +def log_warnings(): + import warnings + logging.captureWarnings(True) + warnings.simplefilter("default", DeprecationWarning) + init(logging.DEBUG, name='py.warnings') + + if __name__ == '__main__': init(level=logging.DEBUG) diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -424,7 +424,11 @@ def __init__(self, truncate_at): def __init__(self, max_words): # In Python 2, HTMLParser is not a new-style class, # hence super() cannot be used. - HTMLParser.__init__(self) + try: + HTMLParser.__init__(self, convert_charrefs=False) + except TypeError: + # pre Python 3.3 + HTMLParser.__init__(self) self.max_words = max_words self.words_found = 0 diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -55,6 +55,7 @@ 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Software Development :: Libraries :: Python Modules', ],
diff --git a/pelican/tests/__init__.py b/pelican/tests/__init__.py --- a/pelican/tests/__init__.py +++ b/pelican/tests/__init__.py @@ -1,2 +1,15 @@ import logging +import warnings + +from pelican.log import log_warnings + +# redirect warnings modulole to use logging instead +log_warnings() + +# setup warnings to log DeprecationWarning's and error on +# warnings in pelican's codebase +warnings.simplefilter("default", DeprecationWarning) +warnings.filterwarnings("error", ".*", Warning, "pelican") + +# Add a NullHandler to silence warning about no available handlers logging.getLogger().addHandler(logging.NullHandler()) diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -3,6 +3,8 @@ import os +import six + from pelican import readers from pelican.tests.support import get_settings, unittest from pelican.utils import SafeDatetime @@ -55,7 +57,8 @@ def test_equal(self): self.assertDictHasSubset(self.dictionary, self.dictionary) def test_fail_not_set(self): - self.assertRaisesRegexp( + six.assertRaisesRegex( + self, AssertionError, 'Expected.*key-c.*to have value.*val-c.*but was not in Dict', self.assertDictHasSubset, @@ -63,7 +66,8 @@ def test_fail_not_set(self): {'key-c': 'val-c'}) def test_fail_wrong_val(self): - self.assertRaisesRegexp( + six.assertRaisesRegex( + self, AssertionError, 'Expected .*key-a.* to have value .*val-b.* but was .*val-a.*', self.assertDictHasSubset, diff --git a/pelican/tests/test_testsuite.py b/pelican/tests/test_testsuite.py new file mode 100644 --- /dev/null +++ b/pelican/tests/test_testsuite.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, unicode_literals + +import sys +import warnings + +from pelican.tests.support import unittest + + +class TestSuiteTest(unittest.TestCase): + + @unittest.skipIf(sys.version_info[:2] == (3, 3), + "does not throw an exception on python 3.3") + def test_error_on_warning(self): + with self.assertRaises(UserWarning): + warnings.warn('test warning')
truncate_html_words not working with python 3.5 while preparing a pr to add py35 to tox and travis I discovered that `pelican.utils.truncate_html_words` is no longer working with python 3.5. No shorting is taking place. it is possible a good idea to avoid python 3.5 for now till we have made sure it is working properly. If you are suffering from this issue please have a look at the docs which explain how to use virtualenv to create a custom virtual environment that contains a python version that is supported (2.7, 3.3, 3.4)
Changed in version 3.5: The default value for argument convert_charrefs is now True. If we overwrite it with False it works again, thoughts on the matter (nudge @avaris )?
2015-10-20T08:33:55Z
[]
[]
getpelican/pelican
1,926
getpelican__pelican-1926
[ "1873" ]
70665ea0fa659c01b8dd0124b0ea82696d2bf91d
diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -172,6 +172,7 @@ def url_format(self): 'lang': getattr(self, 'lang', 'en'), 'date': getattr(self, 'date', SafeDatetime.now()), 'author': self.author.slug if hasattr(self, 'author') else '', + 'tag': self.tag.slug if hasattr(self, 'tag') else '', 'category': self.category.slug if hasattr(self, 'category') else '' }) return metadata diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py --- a/pelican/urlwrappers.py +++ b/pelican/urlwrappers.py @@ -112,13 +112,33 @@ def _from_settings(self, key, get_page_name=False): class Category(URLWrapper): - pass + @property + def slug(self): + if self._slug is None: + substitutions = self.settings.get('SLUG_SUBSTITUTIONS', ()) + substitutions += tuple(self.settings.get('CATEGORY_SUBSTITUTIONS', + ())) + self._slug = slugify(self.name, substitutions) + return self._slug class Tag(URLWrapper): def __init__(self, name, *args, **kwargs): super(Tag, self).__init__(name.strip(), *args, **kwargs) + @property + def slug(self): + if self._slug is None: + substitutions = self.settings.get('SLUG_SUBSTITUTIONS', ()) + substitutions += tuple(self.settings.get('TAG_SUBSTITUTIONS', ())) + self._slug = slugify(self.name, substitutions) + return self._slug + class Author(URLWrapper): - pass + @property + def slug(self): + if self._slug is None: + self._slug = slugify(self.name, + self.settings.get('AUTHOR_SUBSTITUTIONS', ())) + return self._slug diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -270,10 +270,34 @@ def slugify(value, substitutions=()): value = value.decode('ascii') # still unicode value = unicodedata.normalize('NFKD', value).lower() - for src, dst in substitutions: + + # backward compatible covert from 2-tuples to 3-tuples + new_subs = [] + for tpl in substitutions: + try: + src, dst, skip = tpl + except ValueError: + src, dst = tpl + skip = False + new_subs.append((src, dst, skip)) + substitutions = tuple(new_subs) + + # by default will replace non-alphanum characters + replace = True + for src, dst, skip in substitutions: + orig_value = value value = value.replace(src.lower(), dst.lower()) - value = re.sub('[^\w\s-]', '', value).strip() - value = re.sub('[-\s]+', '-', value) + # if replacement was made then skip non-alphanum + # replacement if instructed to do so + if value != orig_value: + replace = replace and not skip + + if replace: + value = re.sub('[^\w\s-]', '', value).strip() + value = re.sub('[-\s]+', '-', value) + else: + value = value.strip() + # we want only ASCII chars value = value.encode('ascii', 'ignore') # but Pelican should generally use only unicode
diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -11,7 +11,7 @@ import six -from pelican.contents import Article, Author, Category, Page, Static +from pelican.contents import Article, Author, Category, Page, Static, Tag from pelican.settings import DEFAULT_CONFIG from pelican.signals import content_object_init from pelican.tests.support import LoggedTestCase, get_settings, unittest @@ -457,6 +457,46 @@ def test_slugify_category_author(self): self.assertEqual( article.save_as, 'obrien/csharp-stuff/fnord/index.html') + def test_slugify_with_author_substitutions(self): + settings = get_settings() + settings['AUTHOR_SUBSTITUTIONS'] = [ + ('Alexander Todorov', 'atodorov', False), + ('Krasimir Tsonev', 'krasimir', False), + ] + settings['ARTICLE_URL'] = 'blog/{author}/{slug}/' + settings['ARTICLE_SAVE_AS'] = 'blog/{author}/{slug}/index.html' + article_kwargs = self._copy_page_kwargs() + article_kwargs['metadata']['author'] = Author('Alexander Todorov', + settings) + article_kwargs['metadata']['title'] = 'fnord' + article_kwargs['settings'] = settings + article = Article(**article_kwargs) + self.assertEqual(article.url, 'blog/atodorov/fnord/') + self.assertEqual(article.save_as, 'blog/atodorov/fnord/index.html') + + def test_slugify_category_with_dots(self): + settings = get_settings() + settings['CATEGORY_SUBSTITUTIONS'] = [('Fedora QA', 'fedora.qa', True)] + settings['ARTICLE_URL'] = '{category}/{slug}/' + article_kwargs = self._copy_page_kwargs() + article_kwargs['metadata']['category'] = Category('Fedora QA', + settings) + article_kwargs['metadata']['title'] = 'This Week in Fedora QA' + article_kwargs['settings'] = settings + article = Article(**article_kwargs) + self.assertEqual(article.url, 'fedora.qa/this-week-in-fedora-qa/') + + def test_slugify_tags_with_dots(self): + settings = get_settings() + settings['TAG_SUBSTITUTIONS'] = [('Fedora QA', 'fedora.qa', True)] + settings['ARTICLE_URL'] = '{tag}/{slug}/' + article_kwargs = self._copy_page_kwargs() + article_kwargs['metadata']['tag'] = Tag('Fedora QA', settings) + article_kwargs['metadata']['title'] = 'This Week in Fedora QA' + article_kwargs['settings'] = settings + article = Article(**article_kwargs) + self.assertEqual(article.url, 'fedora.qa/this-week-in-fedora-qa/') + class TestStatic(LoggedTestCase): diff --git a/pelican/tests/test_urlwrappers.py b/pelican/tests/test_urlwrappers.py --- a/pelican/tests/test_urlwrappers.py +++ b/pelican/tests/test_urlwrappers.py @@ -56,3 +56,34 @@ def test_equality(self): cat_ascii = Category('指導書', settings={}) self.assertEqual(cat_ascii, u'zhi-dao-shu') + + def test_slugify_with_substitutions_and_dots(self): + tag = Tag('Tag Dot', + settings={ + 'TAG_SUBSTITUTIONS': [('Tag Dot', 'tag.dot', True)] + }) + cat = Category('Category Dot', + settings={ + 'CATEGORY_SUBSTITUTIONS': (('Category Dot', + 'cat.dot', + True),) + }) + + self.assertEqual(tag.slug, 'tag.dot') + self.assertEqual(cat.slug, 'cat.dot') + + def test_author_slug_substitutions(self): + settings = { + 'AUTHOR_SUBSTITUTIONS': [ + ('Alexander Todorov', 'atodorov', False), + ('Krasimir Tsonev', 'krasimir', False), + ] + } + + author1 = Author('Mr. Senko', settings=settings) + author2 = Author('Alexander Todorov', settings=settings) + author3 = Author('Krasimir Tsonev', settings=settings) + + self.assertEqual(author1.slug, 'mr-senko') + self.assertEqual(author2.slug, 'atodorov') + self.assertEqual(author3.slug, 'krasimir') diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py --- a/pelican/tests/test_utils.py +++ b/pelican/tests/test_utils.py @@ -131,6 +131,18 @@ def test_slugify_substitute(self): for value, expected in samples: self.assertEqual(utils.slugify(value, subs), expected) + def test_slugify_substitute_and_keeping_non_alphanum(self): + + samples = (('Fedora QA', 'fedora.qa'), + ('C++ is used by Fedora QA', 'cpp is used by fedora.qa'), + ('C++ is based on C', 'cpp-is-based-on-c'), + ('C+++ test C+ test', 'cpp-test-c-test'),) + + subs = (('Fedora QA', 'fedora.qa', True), + ('c++', 'cpp'),) + for value, expected in samples: + self.assertEqual(utils.slugify(value, subs), expected) + def test_get_relative_path(self): samples = ((os.path.join('test', 'test.html'), os.pardir),
RFE: Add slugs for tags for backward compatibility with existing URLs Hi guys, I've migrated to Pelican from Octropress and managed to keep all articles URLs the same but had troubles with some tags. If your tag contains a dot (e.g. fedora.planet) then its sluge removes the dot (becomes fedoraplanet) and I don't see any way to keep my existing tag URL.
2016-03-13T22:19:35Z
[]
[]
getpelican/pelican
1,927
getpelican__pelican-1927
[ "1024", "1024" ]
594b9c963390b77ae6e8c9eface6566ebefa4219
diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -246,8 +246,16 @@ class MarkdownReader(BaseReader): def __init__(self, *args, **kwargs): super(MarkdownReader, self).__init__(*args, **kwargs) - self.extensions = self.settings['MD_EXTENSIONS'] - self.extensions.setdefault('markdown.extensions.meta', {}) + # make sure 'extension_configs' exists and + # and either way 'markdown.extensions.meta' must be in there + settings = self.settings['MARKDOWN'] + settings.setdefault('extension_configs', {}) + settings['extension_configs'].setdefault( + 'markdown.extensions.meta', {}) + settings.setdefault('extensions', []) + settings['extensions'].extend( + list(settings['extension_configs'].keys())) + settings['extensions'] = list(set(settings['extensions'])) self._source_path = None def _parse_metadata(self, meta): @@ -283,8 +291,7 @@ def read(self, source_path): """Parse content and metadata of markdown files""" self._source_path = source_path - self._md = Markdown(extensions=self.extensions.keys(), - extension_configs=self.extensions) + self._md = Markdown(**self.settings['MARKDOWN']) with pelican_open(source_path) as text: content = self._md.convert(text) diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -101,10 +101,13 @@ def load_source(name, path): 'PELICAN_CLASS': 'pelican.Pelican', 'DEFAULT_DATE_FORMAT': '%a %d %B %Y', 'DATE_FORMATS': {}, - 'MD_EXTENSIONS': { - 'markdown.extensions.codehilite': {'css_class': 'highlight'}, - 'markdown.extensions.extra': {}, - 'markdown.extensions.meta': {}, + 'MARKDOWN': { + 'extension_configs': { + 'markdown.extensions.codehilite': {'css_class': 'highlight'}, + 'markdown.extensions.extra': {}, + 'markdown.extensions.meta': {}, + }, + 'output_format': 'html5', }, 'JINJA_EXTENSIONS': [], 'JINJA_FILTERS': {}, @@ -368,13 +371,11 @@ def configure_settings(settings): PATH_KEY) settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY] - # Save people from declaring MD_EXTENSIONS as a list rather than a dict - if not isinstance(settings.get('MD_EXTENSIONS', {}), dict): - logger.warning('The format of the MD_EXTENSIONS setting has ' - 'changed. It should now be a dict mapping ' - 'fully-qualified extension names to their ' - 'configurations. Falling back to the default.') - settings['MD_EXTENSIONS'] = DEFAULT_CONFIG['MD_EXTENSIONS'] + # Deprecated warning of MD_EXTENSIONS + if 'MD_EXTENSIONS' in settings: + logger.warning('MD_EXTENSIONS is deprecated use MARKDOWN ' + 'instead. Falling back to the default.') + settings['MARKDOWN'] = DEFAULT_CONFIG['MARKDOWN'] # Add {PAGE,ARTICLE}_PATHS to {ARTICLE,PAGE}_EXCLUDES mutually_exclusive = ('ARTICLE', 'PAGE')
diff --git a/pelican/tests/test_pelican.py b/pelican/tests/test_pelican.py --- a/pelican/tests/test_pelican.py +++ b/pelican/tests/test_pelican.py @@ -208,18 +208,17 @@ def test_write_only_selected(self): msg="Writing .*", level=logging.INFO) - def test_md_extensions_list_deprecation(self): - """Test that a warning is issued if MD_EXTENSIONS is a list""" + def test_md_extensions_deprecation(self): + """Test that a warning is issued if MD_EXTENSIONS is used""" settings = read_settings(path=None, override={ 'PATH': INPUT_PATH, 'OUTPUT_PATH': self.temp_path, 'CACHE_PATH': self.temp_cache, - 'MD_EXTENSIONS': ['meta'], + 'MD_EXTENSIONS': {}, }) pelican = Pelican(settings=settings) mute(True)(pelican.run)() - self.assertIsInstance(pelican.settings['MD_EXTENSIONS'], dict) self.assertLogCountEqual( count=1, - msg="The format of the MD_EXTENSIONS setting has changed", + msg="MD_EXTENSIONS is deprecated use MARKDOWN instead.", level=logging.WARNING) diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -405,20 +405,20 @@ def test_article_with_footnote(self): _path('article_with_markdown_and_footnote.md')) expected_content = ( '<p>This is some content' - '<sup id="fnref:1"><a class="footnote-ref" href="#fn:1" ' - 'rel="footnote">1</a></sup>' + '<sup id="fnref-1"><a class="footnote-ref" href="#fn-1"' + '>1</a></sup>' ' with some footnotes' - '<sup id="fnref:footnote"><a class="footnote-ref" ' - 'href="#fn:footnote" rel="footnote">2</a></sup></p>\n' + '<sup id="fnref-footnote"><a class="footnote-ref" ' + 'href="#fn-footnote">2</a></sup></p>\n' '<div class="footnote">\n' - '<hr />\n<ol>\n<li id="fn:1">\n' + '<hr>\n<ol>\n<li id="fn-1">\n' '<p>Numbered footnote&#160;' - '<a class="footnote-backref" href="#fnref:1" rev="footnote" ' + '<a class="footnote-backref" href="#fnref-1" ' 'title="Jump back to footnote 1 in the text">&#8617;</a></p>\n' - '</li>\n<li id="fn:footnote">\n' + '</li>\n<li id="fn-footnote">\n' '<p>Named footnote&#160;' - '<a class="footnote-backref" href="#fnref:footnote" rev="footnote"' + '<a class="footnote-backref" href="#fnref-footnote"' ' title="Jump back to footnote 2 in the text">&#8617;</a></p>\n' '</li>\n</ol>\n</div>') expected_metadata = { @@ -482,10 +482,12 @@ def test_article_with_markdown_markup_extension(self): # expected page = self.read_file( path='article_with_markdown_markup_extensions.md', - MD_EXTENSIONS={ - 'markdown.extensions.toc': {}, - 'markdown.extensions.codehilite': {}, - 'markdown.extensions.extra': {} + MARKDOWN={ + 'extension_configs': { + 'markdown.extensions.toc': {}, + 'markdown.extensions.codehilite': {}, + 'markdown.extensions.extra': {} + } } ) expected = ('<div class="toc">\n'
Footnote processing returns non-valid html rel="footnote" When validating pelican-created HTML5, validation fails on the markup of footnotes. It would appear that the Markdown processor automatically includes the attribute _rel="footnote"_, which attribute is not included in html specification. Footnote processing returns non-valid html rel="footnote" When validating pelican-created HTML5, validation fails on the markup of footnotes. It would appear that the Markdown processor automatically includes the attribute _rel="footnote"_, which attribute is not included in html specification.
You should add `output_format='html5'` to the Markdown class, then the footnote extension will check for this : http://pythonhosted.org/Markdown/reference.html#output_format The problem is that there is no way to do this currently. Maybe we should add a `MD_SETTINGS` like the `DOCUTILS_SETTINGS` ? The `MD_EXTENSIONS` is just a subset of the Markdown settings, it could be replaced with: ``` MD_SETTINGS = { 'extensions': [], # same as MD_EXTENSIONS 'output_format': 'html5', ... } ``` @saimn: Sounds like a good idea to me! @shabob: Might you be interested in implementing a solution based on Simon's suggestion? @Mihara: Since you seem to have run into this issue recently, what do you think of @saimn's suggestion? I think it's the way to go. Since I don't currently have the time to pick apart pelican's own internals, I hacked up the [markdown_thirdparty plugin](https://github.com/FuzzyWuzzie/markdown_thirdparty) to force the output_format to html5, but a solution for actually configuring markdown settings in a generic way is obviously better. As a side note, amending that suggestion to also make pelican look for markdown extensions in the same locations it loads plugins from, rather than just site-packages _(as far as I can tell it doesn't, but correct me if I'm wrong)_ would be even better. Just because it fits with the problem domain "invalid HTML": I've found invalid HTML being generated using reStructuredText (_not_ Markdown) too, and footnote is a matching problem there. I've mentioned the specific validation error messages in a PR of a Pelican theme: [cid (PR #3)](https://github.com/hdra/Pelican-Cid/pull/3) Can these problems be fixed using a specific configuration, or do I have to suggest a different HTML output being generated in general at the [Sphinx project](http://sphinx-doc.org/)? Ideas anyone? You should add `output_format='html5'` to the Markdown class, then the footnote extension will check for this : http://pythonhosted.org/Markdown/reference.html#output_format The problem is that there is no way to do this currently. Maybe we should add a `MD_SETTINGS` like the `DOCUTILS_SETTINGS` ? The `MD_EXTENSIONS` is just a subset of the Markdown settings, it could be replaced with: ``` MD_SETTINGS = { 'extensions': [], # same as MD_EXTENSIONS 'output_format': 'html5', ... } ``` @saimn: Sounds like a good idea to me! @shabob: Might you be interested in implementing a solution based on Simon's suggestion? @Mihara: Since you seem to have run into this issue recently, what do you think of @saimn's suggestion? I think it's the way to go. Since I don't currently have the time to pick apart pelican's own internals, I hacked up the [markdown_thirdparty plugin](https://github.com/FuzzyWuzzie/markdown_thirdparty) to force the output_format to html5, but a solution for actually configuring markdown settings in a generic way is obviously better. As a side note, amending that suggestion to also make pelican look for markdown extensions in the same locations it loads plugins from, rather than just site-packages _(as far as I can tell it doesn't, but correct me if I'm wrong)_ would be even better. Just because it fits with the problem domain "invalid HTML": I've found invalid HTML being generated using reStructuredText (_not_ Markdown) too, and footnote is a matching problem there. I've mentioned the specific validation error messages in a PR of a Pelican theme: [cid (PR #3)](https://github.com/hdra/Pelican-Cid/pull/3) Can these problems be fixed using a specific configuration, or do I have to suggest a different HTML output being generated in general at the [Sphinx project](http://sphinx-doc.org/)? Ideas anyone?
2016-03-14T18:21:41Z
[]
[]
getpelican/pelican
1,937
getpelican__pelican-1937
[ "1933" ]
132fe9a4fa370706cccbc4f06c79cfee2031a009
diff --git a/pelican/writers.py b/pelican/writers.py --- a/pelican/writers.py +++ b/pelican/writers.py @@ -55,8 +55,10 @@ def _add_item_to_the_feed(self, feed, item): categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo( - item.modified if hasattr(item, 'modified') else item.date, - self.settings.get('TIMEZONE', None))) + item.date, self.settings.get('TIMEZONE', None)), + updateddate=set_date_tzinfo( + item.modified, self.settings.get('TIMEZONE', None) + ) if hasattr(item, 'modified') else None) def _open_w(self, filename, encoding, override=False): """Open a file to write some content to it. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup -requires = ['feedgenerator >= 1.6', 'jinja2 >= 2.7', 'pygments', 'docutils', +requires = ['feedgenerator >= 1.8', 'jinja2 >= 2.7', 'pygments', 'docutils', 'pytz >= 0a', 'blinker', 'unidecode', 'six >= 1.4', 'python-dateutil']
diff --git a/pelican/tests/output/basic/feeds/alexis-metaireau.atom.xml b/pelican/tests/output/basic/feeds/alexis-metaireau.atom.xml --- a/pelican/tests/output/basic/feeds/alexis-metaireau.atom.xml +++ b/pelican/tests/output/basic/feeds/alexis-metaireau.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+00:00</published><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -11,7 +11,7 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><updated>2010-10-20T10:14:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><author><name>Alexis Métaireau</name></author><id>tag:,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; diff --git a/pelican/tests/output/basic/feeds/alexis-metaireau.rss.xml b/pelican/tests/output/basic/feeds/alexis-metaireau.rss.xml --- a/pelican/tests/output/basic/feeds/alexis-metaireau.rss.xml +++ b/pelican/tests/output/basic/feeds/alexis-metaireau.rss.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>A Pelican Blog</title><link>/</link><description></description><atom:link href="/feeds/alexis-metaireau.rss.xml" rel="self"></atom:link><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0000</lastBuildDate><item><title>This is a super article !</title><link>/this-is-a-super-article.html</link><description>&lt;p&gt;Some content here !&lt;/p&gt; +<rss version="2.0"><channel><title>A Pelican Blog</title><link>/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0000</lastBuildDate><item><title>This is a super article !</title><link>/this-is-a-super-article.html</link><description>&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -11,10 +11,10 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 17 Nov 2013 23:29:00 +0000</pubDate><guid>tag:,2010-12-02:this-is-a-super-article.html</guid><category>foo</category><category>bar</category><category>foobar</category></item><item><title>Oh yeah !</title><link>/oh-yeah.html</link><description>&lt;div class="section" id="why-not"&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 02 Dec 2010 10:14:00 +0000</pubDate><guid isPermaLink="false">tag:,2010-12-02:this-is-a-super-article.html</guid><category>foo</category><category>bar</category><category>foobar</category></item><item><title>Oh yeah !</title><link>/oh-yeah.html</link><description>&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="|filename|/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0000</pubDate><guid>tag:,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0000</pubDate><guid isPermaLink="false">tag:,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/all-en.atom.xml b/pelican/tests/output/basic/feeds/all-en.atom.xml --- a/pelican/tests/output/basic/feeds/all-en.atom.xml +++ b/pelican/tests/output/basic/feeds/all-en.atom.xml @@ -1,12 +1,12 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all-en.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><updated>2012-11-30T00:00:00+00:00</updated><author><name></name></author><id>tag:,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary></entry><entry><title>Second article</title><link href="/second-article.html" rel="alternate"></link><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><updated>2011-04-20T00:00:00+00:00</updated><author><name></name></author><id>tag:,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all-en.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+00:00</published><author><name></name></author><id>tag:,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><author><name></name></author><id>tag:,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="/article-1.html" rel="alternate"></link><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary></entry><entry><title>Article 2</title><link href="/article-2.html" rel="alternate"></link><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary></entry><entry><title>Article 3</title><link href="/article-3.html" rel="alternate"></link><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary></entry><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +</summary></entry><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+00:00</published><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -18,13 +18,13 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><updated>2010-10-20T10:14:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><author><name>Alexis Métaireau</name></author><id>tag:,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="|filename|/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+00:00</updated><author><name></name></author><id>tag:,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+00:00</published><author><name></name></author><id>tag:,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -57,5 +57,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</summary></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+00:00</updated><author><name></name></author><id>tag:,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+00:00</published><author><name></name></author><id>tag:,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/all-fr.atom.xml b/pelican/tests/output/basic/feeds/all-fr.atom.xml --- a/pelican/tests/output/basic/feeds/all-fr.atom.xml +++ b/pelican/tests/output/basic/feeds/all-fr.atom.xml @@ -1,3 +1,3 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all-fr.atom.xml" rel="self"></link><id>/</id><updated>2012-02-29T00:00:00+00:00</updated><entry><title>Deuxième article</title><link href="/second-article-fr.html" rel="alternate"></link><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all-fr.atom.xml" rel="self"></link><id>/</id><updated>2012-02-29T00:00:00+00:00</updated><entry><title>Deuxième article</title><link href="/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><author><name></name></author><id>tag:,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; </summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/all.atom.xml b/pelican/tests/output/basic/feeds/all.atom.xml --- a/pelican/tests/output/basic/feeds/all.atom.xml +++ b/pelican/tests/output/basic/feeds/all.atom.xml @@ -1,13 +1,13 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><updated>2012-11-30T00:00:00+00:00</updated><author><name></name></author><id>tag:,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary></entry><entry><title>Second article</title><link href="/second-article.html" rel="alternate"></link><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="/second-article-fr.html" rel="alternate"></link><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><updated>2011-04-20T00:00:00+00:00</updated><author><name></name></author><id>tag:,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+00:00</published><author><name></name></author><id>tag:,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><author><name></name></author><id>tag:,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><author><name></name></author><id>tag:,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="/article-1.html" rel="alternate"></link><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary></entry><entry><title>Article 2</title><link href="/article-2.html" rel="alternate"></link><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary></entry><entry><title>Article 3</title><link href="/article-3.html" rel="alternate"></link><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary></entry><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +</summary></entry><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+00:00</published><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -19,13 +19,13 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><updated>2010-10-20T10:14:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><author><name>Alexis Métaireau</name></author><id>tag:,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="|filename|/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+00:00</updated><author><name></name></author><id>tag:,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+00:00</published><author><name></name></author><id>tag:,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -58,5 +58,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</summary></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+00:00</updated><author><name></name></author><id>tag:,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+00:00</published><author><name></name></author><id>tag:,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/bar.atom.xml b/pelican/tests/output/basic/feeds/bar.atom.xml --- a/pelican/tests/output/basic/feeds/bar.atom.xml +++ b/pelican/tests/output/basic/feeds/bar.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/bar.atom.xml" rel="self"></link><id>/</id><updated>2010-10-20T10:14:00+00:00</updated><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><updated>2010-10-20T10:14:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/bar.atom.xml" rel="self"></link><id>/</id><updated>2010-10-20T10:14:00+00:00</updated><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><author><name>Alexis Métaireau</name></author><id>tag:,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; diff --git a/pelican/tests/output/basic/feeds/cat1.atom.xml b/pelican/tests/output/basic/feeds/cat1.atom.xml --- a/pelican/tests/output/basic/feeds/cat1.atom.xml +++ b/pelican/tests/output/basic/feeds/cat1.atom.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/cat1.atom.xml" rel="self"></link><id>/</id><updated>2011-04-20T00:00:00+00:00</updated><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><updated>2011-04-20T00:00:00+00:00</updated><author><name></name></author><id>tag:,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/cat1.atom.xml" rel="self"></link><id>/</id><updated>2011-04-20T00:00:00+00:00</updated><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="/article-1.html" rel="alternate"></link><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary></entry><entry><title>Article 2</title><link href="/article-2.html" rel="alternate"></link><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary></entry><entry><title>Article 3</title><link href="/article-3.html" rel="alternate"></link><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/misc.atom.xml b/pelican/tests/output/basic/feeds/misc.atom.xml --- a/pelican/tests/output/basic/feeds/misc.atom.xml +++ b/pelican/tests/output/basic/feeds/misc.atom.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/misc.atom.xml" rel="self"></link><id>/</id><updated>2012-11-30T00:00:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><updated>2012-11-30T00:00:00+00:00</updated><author><name></name></author><id>tag:,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary></entry><entry><title>Second article</title><link href="/second-article.html" rel="alternate"></link><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+00:00</updated><author><name></name></author><id>tag:,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/misc.atom.xml" rel="self"></link><id>/</id><updated>2012-11-30T00:00:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+00:00</published><author><name></name></author><id>tag:,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><author><name></name></author><id>tag:,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+00:00</published><author><name></name></author><id>tag:,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -34,5 +34,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</summary></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+00:00</updated><author><name></name></author><id>tag:,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+00:00</published><author><name></name></author><id>tag:,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/yeah.atom.xml b/pelican/tests/output/basic/feeds/yeah.atom.xml --- a/pelican/tests/output/basic/feeds/yeah.atom.xml +++ b/pelican/tests/output/basic/feeds/yeah.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/yeah.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/yeah.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+00:00</published><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; diff --git a/pelican/tests/output/custom/feeds/alexis-metaireau.atom.xml b/pelican/tests/output/custom/feeds/alexis-metaireau.atom.xml --- a/pelican/tests/output/custom/feeds/alexis-metaireau.atom.xml +++ b/pelican/tests/output/custom/feeds/alexis-metaireau.atom.xml @@ -1,12 +1,12 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/this-is-a-super-article.html" rel="alternate"></link><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -18,13 +18,13 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -57,5 +57,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/alexis-metaireau.rss.xml b/pelican/tests/output/custom/feeds/alexis-metaireau.rss.xml --- a/pelican/tests/output/custom/feeds/alexis-metaireau.rss.xml +++ b/pelican/tests/output/custom/feeds/alexis-metaireau.rss.xml @@ -1,12 +1,12 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/alexis-metaireau.rss.xml" rel="self"></atom:link><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/filename_metadata-example.html</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/second-article.html</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:second-article.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/a-markdown-powered-article.html</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/filename_metadata-example.html</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/second-article.html</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:second-article.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/a-markdown-powered-article.html</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/article-1.html</link><description>&lt;p&gt;Article 1&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:article-1.html</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/article-2.html</link><description>&lt;p&gt;Article 2&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:article-2.html</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/article-3.html</link><description>&lt;p&gt;Article 3&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:article-3.html</guid></item><item><title>This is a super article !</title><link>http://blog.notmyidea.org/this-is-a-super-article.html</link><description>&lt;p&gt;Some content here !&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/article-1.html</link><description>&lt;p&gt;Article 1&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:article-1.html</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/article-2.html</link><description>&lt;p&gt;Article 2&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:article-2.html</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/article-3.html</link><description>&lt;p&gt;Article 3&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:article-3.html</guid></item><item><title>This is a super article !</title><link>http://blog.notmyidea.org/this-is-a-super-article.html</link><description>&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -18,13 +18,13 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 17 Nov 2013 23:29:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</guid><category>foo</category><category>bar</category><category>foobar</category></item><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/oh-yeah.html</link><description>&lt;div class="section" id="why-not"&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 02 Dec 2010 10:14:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</guid><category>foo</category><category>bar</category><category>foobar</category></item><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/oh-yeah.html</link><description>&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -57,5 +57,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/all-en.atom.xml b/pelican/tests/output/custom/feeds/all-en.atom.xml --- a/pelican/tests/output/custom/feeds/all-en.atom.xml +++ b/pelican/tests/output/custom/feeds/all-en.atom.xml @@ -1,12 +1,12 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-en.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-en.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/this-is-a-super-article.html" rel="alternate"></link><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -18,13 +18,13 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -57,5 +57,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/all-fr.atom.xml b/pelican/tests/output/custom/feeds/all-fr.atom.xml --- a/pelican/tests/output/custom/feeds/all-fr.atom.xml +++ b/pelican/tests/output/custom/feeds/all-fr.atom.xml @@ -1,4 +1,4 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-fr.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-03-02T14:01:01+01:00</updated><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</summary></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-fr.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-03-02T14:01:01+01:00</updated><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</summary></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; </summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/all.atom.xml b/pelican/tests/output/custom/feeds/all.atom.xml --- a/pelican/tests/output/custom/feeds/all.atom.xml +++ b/pelican/tests/output/custom/feeds/all.atom.xml @@ -1,14 +1,14 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/this-is-a-super-article.html" rel="alternate"></link><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -20,13 +20,13 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -59,5 +59,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/all.rss.xml b/pelican/tests/output/custom/feeds/all.rss.xml --- a/pelican/tests/output/custom/feeds/all.rss.xml +++ b/pelican/tests/output/custom/feeds/all.rss.xml @@ -1,14 +1,14 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/all.rss.xml" rel="self"></atom:link><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/filename_metadata-example.html</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</guid></item><item><title>Trop bien !</title><link>http://blog.notmyidea.org/oh-yeah-fr.html</link><description>&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 02 Mar 2012 14:01:01 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/second-article.html</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:second-article.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Deuxième article</title><link>http://blog.notmyidea.org/second-article-fr.html</link><description>&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/a-markdown-powered-article.html</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/filename_metadata-example.html</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</guid></item><item><title>Trop bien !</title><link>http://blog.notmyidea.org/oh-yeah-fr.html</link><description>&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 02 Mar 2012 14:01:01 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/second-article.html</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:second-article.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Deuxième article</title><link>http://blog.notmyidea.org/second-article-fr.html</link><description>&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/a-markdown-powered-article.html</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/article-1.html</link><description>&lt;p&gt;Article 1&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:article-1.html</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/article-2.html</link><description>&lt;p&gt;Article 2&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:article-2.html</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/article-3.html</link><description>&lt;p&gt;Article 3&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:article-3.html</guid></item><item><title>This is a super article !</title><link>http://blog.notmyidea.org/this-is-a-super-article.html</link><description>&lt;p&gt;Some content here !&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/article-1.html</link><description>&lt;p&gt;Article 1&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:article-1.html</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/article-2.html</link><description>&lt;p&gt;Article 2&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:article-2.html</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/article-3.html</link><description>&lt;p&gt;Article 3&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:article-3.html</guid></item><item><title>This is a super article !</title><link>http://blog.notmyidea.org/this-is-a-super-article.html</link><description>&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -20,13 +20,13 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 17 Nov 2013 23:29:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</guid><category>foo</category><category>bar</category><category>foobar</category></item><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/oh-yeah.html</link><description>&lt;div class="section" id="why-not"&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 02 Dec 2010 10:14:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</guid><category>foo</category><category>bar</category><category>foobar</category></item><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/oh-yeah.html</link><description>&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -59,5 +59,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/bar.atom.xml b/pelican/tests/output/custom/feeds/bar.atom.xml --- a/pelican/tests/output/custom/feeds/bar.atom.xml +++ b/pelican/tests/output/custom/feeds/bar.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/bar.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2010-10-20T10:14:00+02:00</updated><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/bar.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2010-10-20T10:14:00+02:00</updated><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; diff --git a/pelican/tests/output/custom/feeds/bar.rss.xml b/pelican/tests/output/custom/feeds/bar.rss.xml --- a/pelican/tests/output/custom/feeds/bar.rss.xml +++ b/pelican/tests/output/custom/feeds/bar.rss.xml @@ -1,8 +1,8 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/bar.rss.xml" rel="self"></atom:link><lastBuildDate>Wed, 20 Oct 2010 10:14:00 +0200</lastBuildDate><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/oh-yeah.html</link><description>&lt;div class="section" id="why-not"&gt; +<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Wed, 20 Oct 2010 10:14:00 +0200</lastBuildDate><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/oh-yeah.html</link><description>&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/cat1.atom.xml b/pelican/tests/output/custom/feeds/cat1.atom.xml --- a/pelican/tests/output/custom/feeds/cat1.atom.xml +++ b/pelican/tests/output/custom/feeds/cat1.atom.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/cat1.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2011-04-20T00:00:00+02:00</updated><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/cat1.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2011-04-20T00:00:00+02:00</updated><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/cat1.rss.xml b/pelican/tests/output/custom/feeds/cat1.rss.xml --- a/pelican/tests/output/custom/feeds/cat1.rss.xml +++ b/pelican/tests/output/custom/feeds/cat1.rss.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/cat1.rss.xml" rel="self"></atom:link><lastBuildDate>Wed, 20 Apr 2011 00:00:00 +0200</lastBuildDate><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/a-markdown-powered-article.html</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Wed, 20 Apr 2011 00:00:00 +0200</lastBuildDate><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/a-markdown-powered-article.html</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/article-1.html</link><description>&lt;p&gt;Article 1&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:article-1.html</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/article-2.html</link><description>&lt;p&gt;Article 2&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:article-2.html</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/article-3.html</link><description>&lt;p&gt;Article 3&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:article-3.html</guid></item></channel></rss> \ No newline at end of file +&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/article-1.html</link><description>&lt;p&gt;Article 1&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:article-1.html</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/article-2.html</link><description>&lt;p&gt;Article 2&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:article-2.html</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/article-3.html</link><description>&lt;p&gt;Article 3&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:article-3.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/misc.atom.xml b/pelican/tests/output/custom/feeds/misc.atom.xml --- a/pelican/tests/output/custom/feeds/misc.atom.xml +++ b/pelican/tests/output/custom/feeds/misc.atom.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/misc.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-11-30T00:00:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/misc.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-11-30T00:00:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -34,5 +34,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/misc.rss.xml b/pelican/tests/output/custom/feeds/misc.rss.xml --- a/pelican/tests/output/custom/feeds/misc.rss.xml +++ b/pelican/tests/output/custom/feeds/misc.rss.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/misc.rss.xml" rel="self"></atom:link><lastBuildDate>Fri, 30 Nov 2012 00:00:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/filename_metadata-example.html</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/second-article.html</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:second-article.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Fri, 30 Nov 2012 00:00:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/filename_metadata-example.html</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/second-article.html</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:second-article.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -34,5 +34,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/yeah.atom.xml b/pelican/tests/output/custom/feeds/yeah.atom.xml --- a/pelican/tests/output/custom/feeds/yeah.atom.xml +++ b/pelican/tests/output/custom/feeds/yeah.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/yeah.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/this-is-a-super-article.html" rel="alternate"></link><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/yeah.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; diff --git a/pelican/tests/output/custom/feeds/yeah.rss.xml b/pelican/tests/output/custom/feeds/yeah.rss.xml --- a/pelican/tests/output/custom/feeds/yeah.rss.xml +++ b/pelican/tests/output/custom/feeds/yeah.rss.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/yeah.rss.xml" rel="self"></atom:link><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>This is a super article !</title><link>http://blog.notmyidea.org/this-is-a-super-article.html</link><description>&lt;p&gt;Some content here !&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>This is a super article !</title><link>http://blog.notmyidea.org/this-is-a-super-article.html</link><description>&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -11,4 +11,4 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 17 Nov 2013 23:29:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</guid><category>foo</category><category>bar</category><category>foobar</category></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 02 Dec 2010 10:14:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</guid><category>foo</category><category>bar</category><category>foobar</category></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml --- a/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml @@ -1,12 +1,12 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/" rel="alternate"></link><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -18,13 +18,13 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -57,5 +57,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/alexis-metaireau.rss.xml b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.rss.xml --- a/pelican/tests/output/custom_locale/feeds/alexis-metaireau.rss.xml +++ b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.rss.xml @@ -1,12 +1,12 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/alexis-metaireau.rss.xml" rel="self"></atom:link><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/</link><description>&lt;p&gt;Article 1&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/</link><description>&lt;p&gt;Article 2&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/</link><description>&lt;p&gt;Article 3&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</guid></item><item><title>This is a super article !</title><link>http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/</link><description>&lt;p&gt;Some content here !&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/</link><description>&lt;p&gt;Article 1&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/</link><description>&lt;p&gt;Article 2&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/</link><description>&lt;p&gt;Article 3&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</guid></item><item><title>This is a super article !</title><link>http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/</link><description>&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -18,13 +18,13 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 17 Nov 2013 23:29:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</guid><category>foo</category><category>bar</category><category>foobar</category></item><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/</link><description>&lt;div class="section" id="why-not"&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 02 Dec 2010 10:14:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</guid><category>foo</category><category>bar</category><category>foobar</category></item><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/</link><description>&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</guid><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</guid><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -57,5 +57,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/all-en.atom.xml b/pelican/tests/output/custom_locale/feeds/all-en.atom.xml --- a/pelican/tests/output/custom_locale/feeds/all-en.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/all-en.atom.xml @@ -1,12 +1,12 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-en.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-en.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/" rel="alternate"></link><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -18,13 +18,13 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -57,5 +57,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml b/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml --- a/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml @@ -1,4 +1,4 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-fr.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-03-02T14:01:01+01:00</updated><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</summary></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-fr.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-03-02T14:01:01+01:00</updated><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</summary></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; </summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/all.atom.xml b/pelican/tests/output/custom_locale/feeds/all.atom.xml --- a/pelican/tests/output/custom_locale/feeds/all.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/all.atom.xml @@ -1,14 +1,14 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/" rel="alternate"></link><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +</summary></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -20,13 +20,13 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -59,5 +59,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/all.rss.xml b/pelican/tests/output/custom_locale/feeds/all.rss.xml --- a/pelican/tests/output/custom_locale/feeds/all.rss.xml +++ b/pelican/tests/output/custom_locale/feeds/all.rss.xml @@ -1,14 +1,14 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/all.rss.xml" rel="self"></atom:link><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</guid></item><item><title>Trop bien !</title><link>http://blog.notmyidea.org/oh-yeah-fr.html</link><description>&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 02 Mar 2012 14:01:01 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Deuxième article</title><link>http://blog.notmyidea.org/second-article-fr.html</link><description>&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</guid></item><item><title>Trop bien !</title><link>http://blog.notmyidea.org/oh-yeah-fr.html</link><description>&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 02 Mar 2012 14:01:01 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-03-02:oh-yeah-fr.html</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Deuxième article</title><link>http://blog.notmyidea.org/second-article-fr.html</link><description>&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:second-article-fr.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/</link><description>&lt;p&gt;Article 1&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/</link><description>&lt;p&gt;Article 2&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/</link><description>&lt;p&gt;Article 3&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</guid></item><item><title>This is a super article !</title><link>http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/</link><description>&lt;p&gt;Some content here !&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/</link><description>&lt;p&gt;Article 1&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/</link><description>&lt;p&gt;Article 2&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/</link><description>&lt;p&gt;Article 3&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</guid></item><item><title>This is a super article !</title><link>http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/</link><description>&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -20,13 +20,13 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 17 Nov 2013 23:29:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</guid><category>foo</category><category>bar</category><category>foobar</category></item><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/</link><description>&lt;div class="section" id="why-not"&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 02 Dec 2010 10:14:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</guid><category>foo</category><category>bar</category><category>foobar</category></item><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/</link><description>&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</guid><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</guid><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -59,5 +59,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/bar.atom.xml b/pelican/tests/output/custom_locale/feeds/bar.atom.xml --- a/pelican/tests/output/custom_locale/feeds/bar.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/bar.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/bar.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2010-10-20T10:14:00+02:00</updated><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/bar.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2010-10-20T10:14:00+02:00</updated><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; diff --git a/pelican/tests/output/custom_locale/feeds/bar.rss.xml b/pelican/tests/output/custom_locale/feeds/bar.rss.xml --- a/pelican/tests/output/custom_locale/feeds/bar.rss.xml +++ b/pelican/tests/output/custom_locale/feeds/bar.rss.xml @@ -1,8 +1,8 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/bar.rss.xml" rel="self"></atom:link><lastBuildDate>Wed, 20 Oct 2010 10:14:00 +0200</lastBuildDate><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/</link><description>&lt;div class="section" id="why-not"&gt; +<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Wed, 20 Oct 2010 10:14:00 +0200</lastBuildDate><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/</link><description>&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/cat1.atom.xml b/pelican/tests/output/custom_locale/feeds/cat1.atom.xml --- a/pelican/tests/output/custom_locale/feeds/cat1.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/cat1.atom.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/cat1.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2011-04-20T00:00:00+02:00</updated><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/cat1.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2011-04-20T00:00:00+02:00</updated><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; +</summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; +</summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/cat1.rss.xml b/pelican/tests/output/custom_locale/feeds/cat1.rss.xml --- a/pelican/tests/output/custom_locale/feeds/cat1.rss.xml +++ b/pelican/tests/output/custom_locale/feeds/cat1.rss.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/cat1.rss.xml" rel="self"></atom:link><lastBuildDate>Wed, 20 Apr 2011 00:00:00 +0200</lastBuildDate><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Wed, 20 Apr 2011 00:00:00 +0200</lastBuildDate><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/</link><description>&lt;p&gt;Article 1&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/</link><description>&lt;p&gt;Article 2&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/</link><description>&lt;p&gt;Article 3&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</guid></item></channel></rss> \ No newline at end of file +&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/</link><description>&lt;p&gt;Article 1&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/</link><description>&lt;p&gt;Article 2&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/</link><description>&lt;p&gt;Article 3&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/misc.atom.xml b/pelican/tests/output/custom_locale/feeds/misc.atom.xml --- a/pelican/tests/output/custom_locale/feeds/misc.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/misc.atom.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/misc.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-11-30T00:00:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/" rel="alternate"></link><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/misc.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-11-30T00:00:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -34,5 +34,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </summary></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/misc.rss.xml b/pelican/tests/output/custom_locale/feeds/misc.rss.xml --- a/pelican/tests/output/custom_locale/feeds/misc.rss.xml +++ b/pelican/tests/output/custom_locale/feeds/misc.rss.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/misc.rss.xml" rel="self"></atom:link><lastBuildDate>Fri, 30 Nov 2012 00:00:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Fri, 30 Nov 2012 00:00:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; @@ -34,5 +34,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid>tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/yeah.atom.xml b/pelican/tests/output/custom_locale/feeds/yeah.atom.xml --- a/pelican/tests/output/custom_locale/feeds/yeah.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/yeah.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/yeah.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/" rel="alternate"></link><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/yeah.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; diff --git a/pelican/tests/output/custom_locale/feeds/yeah.rss.xml b/pelican/tests/output/custom_locale/feeds/yeah.rss.xml --- a/pelican/tests/output/custom_locale/feeds/yeah.rss.xml +++ b/pelican/tests/output/custom_locale/feeds/yeah.rss.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><atom:link href="http://blog.notmyidea.org/feeds/yeah.rss.xml" rel="self"></atom:link><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>This is a super article !</title><link>http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/</link><description>&lt;p&gt;Some content here !&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>This is a super article !</title><link>http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/</link><description>&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -11,4 +11,4 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 17 Nov 2013 23:29:00 +0100</pubDate><guid>tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</guid><category>foo</category><category>bar</category><category>foobar</category></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 02 Dec 2010 10:14:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</guid><category>foo</category><category>bar</category><category>foobar</category></item></channel></rss> \ No newline at end of file
Atom feed could/should use <published> date I've noticed that my atom feed will use `:date:` in the `<updated>` field in feed.xml, and will override that with `:modified:` if modified is specified. This results in an unwanted email re-send when using MailChimp's RSS --> Email feature. Going by this page, I think they would use `<published>` first if it existed: http://kb.mailchimp.com/campaigns/rss-in-campaigns/troubleshooting-rss-in-campaigns And I think in general it would be nice to call out both `<published>` to mark the original date, and `<updated>` for modifications. (And it aligns better with what I expect with `:date:` and `:modified:` meta in my .rst files.) Not a big problem -- I'll just have to refrain from using :modified: to avoid this behavior, but I figured it wouldn't hurt to ask. :-)
feedgenerator would need to be updated to the current django implementation, which would offer [updateddate](https://github.com/django/django/blob/master/django/utils/feedgenerator.py#L119) After that it is trivial to adapt pelican's code to make use of it. The [current code](https://github.com/getpelican/pelican/blob/72ee73f886d05cea10c39bddec1ba99a3aae9504/pelican/writers.py#L57-L59) for individual item is: ``` python pubdate=set_date_tzinfo( item.modified if hasattr(item, 'modified') else item.date, self.settings.get('TIMEZONE', None))) ``` Current [feedgenerator.py](https://github.com/django/django/blob/60586dd7379b295b72d8af4e03423c286913b5e8/django/utils/feedgenerator.py) in Django. The root element has https://github.com/django/django/blob/60586dd7379b295b72d8af4e03423c286913b5e8/django/utils/feedgenerator.py#L378 ``` python handler.addQuickElement("updated", rfc3339_date(self.latest_post_date())) ``` And for individual items https://github.com/django/django/blob/60586dd7379b295b72d8af4e03423c286913b5e8/django/utils/feedgenerator.py#L404 ``` python if item['pubdate'] is not None: handler.addQuickElement('published', rfc3339_date(item['pubdate'])) if item['updateddate'] is not None: handler.addQuickElement('updated', rfc3339_date(item['updateddate'])) ``` The Atom specification for the [root feed element](http://tools.ietf.org/html/rfc4287#section-4.1.1) has ``` atomFeed = element atom:feed { atomCommonAttributes, (atomAuthor* & atomCategory* & atomContributor* & atomGenerator? & atomIcon? & atomId & atomLink* & atomLogo? & atomRights? & atomSubtitle? & atomTitle & atomUpdated & extensionElement*), atomEntry* } ``` There is no `published` element for the `feed` root element So feedgenerator has already the capability to publish both `updated` and `published` for `item`. So I guess we just need a patch for Pelican.
2016-04-04T08:08:22Z
[]
[]
getpelican/pelican
1,945
getpelican__pelican-1945
[ "1291", "1291", "1465", "1291" ]
9cff2efb62ccecc3ab262d5a28b62f9a6d851c56
diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -309,24 +309,26 @@ def generate_feeds(self, writer): if self.settings.get('CATEGORY_FEED_ATOM'): writer.write_feed(arts, self.context, self.settings['CATEGORY_FEED_ATOM'] - % cat.slug) + % cat.slug, feed_title=cat.name) if self.settings.get('CATEGORY_FEED_RSS'): writer.write_feed(arts, self.context, self.settings['CATEGORY_FEED_RSS'] - % cat.slug, feed_type='rss') + % cat.slug, feed_title=cat.name, + feed_type='rss') for auth, arts in self.authors: arts.sort(key=attrgetter('date'), reverse=True) if self.settings.get('AUTHOR_FEED_ATOM'): writer.write_feed(arts, self.context, self.settings['AUTHOR_FEED_ATOM'] - % auth.slug) + % auth.slug, feed_title=auth.name) if self.settings.get('AUTHOR_FEED_RSS'): writer.write_feed(arts, self.context, self.settings['AUTHOR_FEED_RSS'] - % auth.slug, feed_type='rss') + % auth.slug, feed_title=auth.name, + feed_type='rss') if (self.settings.get('TAG_FEED_ATOM') or self.settings.get('TAG_FEED_RSS')): @@ -335,12 +337,12 @@ def generate_feeds(self, writer): if self.settings.get('TAG_FEED_ATOM'): writer.write_feed(arts, self.context, self.settings['TAG_FEED_ATOM'] - % tag.slug) + % tag.slug, feed_title=tag.name) if self.settings.get('TAG_FEED_RSS'): writer.write_feed(arts, self.context, self.settings['TAG_FEED_RSS'] % tag.slug, - feed_type='rss') + feed_title=tag.name, feed_type='rss') if (self.settings.get('TRANSLATION_FEED_ATOM') or self.settings.get('TRANSLATION_FEED_RSS')): diff --git a/pelican/writers.py b/pelican/writers.py --- a/pelican/writers.py +++ b/pelican/writers.py @@ -31,11 +31,14 @@ def __init__(self, output_path, settings=None): self._written_files = set() self._overridden_files = set() - def _create_new_feed(self, feed_type, context): + def _create_new_feed(self, feed_type, feed_title, context): feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed - sitename = Markup(context['SITENAME']).striptags() + if feed_title: + feed_title = context['SITENAME'] + ' - ' + feed_title + else: + feed_title = context['SITENAME'] feed = feed_class( - title=sitename, + title=Markup(feed_title).striptags(), link=(self.site_url + '/'), feed_url=self.feed_url, description=context.get('SITESUBTITLE', '')) @@ -84,7 +87,7 @@ def _open_w(self, filename, encoding, override=False): return open(filename, 'w', encoding=encoding) def write_feed(self, elements, context, path=None, feed_type='atom', - override_output=False): + override_output=False, feed_title=None): """Generate a feed with the list of articles provided Return the feed. If no path or output_path is specified, just @@ -97,6 +100,7 @@ def write_feed(self, elements, context, path=None, feed_type='atom', :param override_output: boolean telling if we can override previous output with the same name (and if next files written with the same name should be skipped to keep that one) + :param feed_title: the title of the feed.o """ if not is_selected_for_writing(self.settings, path): return @@ -107,7 +111,7 @@ def write_feed(self, elements, context, path=None, feed_type='atom', self.feed_domain = context.get('FEED_DOMAIN') self.feed_url = '{}/{}'.format(self.feed_domain, path) - feed = self._create_new_feed(feed_type, context) + feed = self._create_new_feed(feed_type, feed_title, context) max_items = len(elements) if self.settings['FEED_MAX_ITEMS']:
diff --git a/pelican/tests/output/basic/feeds/alexis-metaireau.atom.xml b/pelican/tests/output/basic/feeds/alexis-metaireau.atom.xml --- a/pelican/tests/output/basic/feeds/alexis-metaireau.atom.xml +++ b/pelican/tests/output/basic/feeds/alexis-metaireau.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+00:00</published><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog - Alexis Métaireau</title><link href="/" rel="alternate"></link><link href="/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+00:00</published><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -17,4 +17,4 @@ YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="|filename|/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry></feed> \ No newline at end of file +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry></feed> diff --git a/pelican/tests/output/basic/feeds/alexis-metaireau.rss.xml b/pelican/tests/output/basic/feeds/alexis-metaireau.rss.xml --- a/pelican/tests/output/basic/feeds/alexis-metaireau.rss.xml +++ b/pelican/tests/output/basic/feeds/alexis-metaireau.rss.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0"><channel><title>A Pelican Blog</title><link>/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0000</lastBuildDate><item><title>This is a super article !</title><link>/this-is-a-super-article.html</link><description>&lt;p&gt;Some content here !&lt;/p&gt; +<rss version="2.0"><channel><title>A Pelican Blog - Alexis Métaireau</title><link>/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0000</lastBuildDate><item><title>This is a super article !</title><link>/this-is-a-super-article.html</link><description>&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -17,4 +17,4 @@ YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="|filename|/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0000</pubDate><guid isPermaLink="false">tag:,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0000</pubDate><guid isPermaLink="false">tag:,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> diff --git a/pelican/tests/output/basic/feeds/bar.atom.xml b/pelican/tests/output/basic/feeds/bar.atom.xml --- a/pelican/tests/output/basic/feeds/bar.atom.xml +++ b/pelican/tests/output/basic/feeds/bar.atom.xml @@ -1,8 +1,8 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/bar.atom.xml" rel="self"></link><id>/</id><updated>2010-10-20T10:14:00+00:00</updated><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><author><name>Alexis Métaireau</name></author><id>tag:,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog - bar</title><link href="/" rel="alternate"></link><link href="/feeds/bar.atom.xml" rel="self"></link><id>/</id><updated>2010-10-20T10:14:00+00:00</updated><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><author><name>Alexis Métaireau</name></author><id>tag:,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="|filename|/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry></feed> \ No newline at end of file +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry></feed> diff --git a/pelican/tests/output/basic/feeds/cat1.atom.xml b/pelican/tests/output/basic/feeds/cat1.atom.xml --- a/pelican/tests/output/basic/feeds/cat1.atom.xml +++ b/pelican/tests/output/basic/feeds/cat1.atom.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/cat1.atom.xml" rel="self"></link><id>/</id><updated>2011-04-20T00:00:00+00:00</updated><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog - cat1</title><link href="/" rel="alternate"></link><link href="/feeds/cat1.atom.xml" rel="self"></link><id>/</id><updated>2011-04-20T00:00:00+00:00</updated><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; &lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; </summary></entry><entry><title>Article 2</title><link href="/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; </summary></entry><entry><title>Article 3</title><link href="/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><author><name></name></author><id>tag:,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary></entry></feed> \ No newline at end of file +</summary></entry></feed> diff --git a/pelican/tests/output/basic/feeds/misc.atom.xml b/pelican/tests/output/basic/feeds/misc.atom.xml --- a/pelican/tests/output/basic/feeds/misc.atom.xml +++ b/pelican/tests/output/basic/feeds/misc.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/misc.atom.xml" rel="self"></link><id>/</id><updated>2012-11-30T00:00:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+00:00</published><author><name></name></author><id>tag:,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog - misc</title><link href="/" rel="alternate"></link><link href="/feeds/misc.atom.xml" rel="self"></link><id>/</id><updated>2012-11-30T00:00:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+00:00</published><author><name></name></author><id>tag:,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; </summary></entry><entry><title>Second article</title><link href="/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><author><name></name></author><id>tag:,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; </summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+00:00</published><author><name></name></author><id>tag:,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; @@ -35,4 +35,4 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; </summary></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+00:00</published><author><name></name></author><id>tag:,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary></entry></feed> \ No newline at end of file +</summary></entry></feed> diff --git a/pelican/tests/output/basic/feeds/yeah.atom.xml b/pelican/tests/output/basic/feeds/yeah.atom.xml --- a/pelican/tests/output/basic/feeds/yeah.atom.xml +++ b/pelican/tests/output/basic/feeds/yeah.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/yeah.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+00:00</published><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog - yeah</title><link href="/" rel="alternate"></link><link href="/feeds/yeah.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+00:00</published><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -11,4 +11,4 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry></feed> \ No newline at end of file +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry></feed> diff --git a/pelican/tests/output/custom/feeds/alexis-metaireau.atom.xml b/pelican/tests/output/custom/feeds/alexis-metaireau.atom.xml --- a/pelican/tests/output/custom/feeds/alexis-metaireau.atom.xml +++ b/pelican/tests/output/custom/feeds/alexis-metaireau.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - Alexis Métaireau</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; </summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; </summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; @@ -58,4 +58,4 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; </summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary></entry></feed> \ No newline at end of file +</summary></entry></feed> diff --git a/pelican/tests/output/custom/feeds/alexis-metaireau.rss.xml b/pelican/tests/output/custom/feeds/alexis-metaireau.rss.xml --- a/pelican/tests/output/custom/feeds/alexis-metaireau.rss.xml +++ b/pelican/tests/output/custom/feeds/alexis-metaireau.rss.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/filename_metadata-example.html</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log - Alexis Métaireau</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/filename_metadata-example.html</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/second-article.html</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:second-article.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/a-markdown-powered-article.html</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; @@ -58,4 +58,4 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> diff --git a/pelican/tests/output/custom/feeds/bar.atom.xml b/pelican/tests/output/custom/feeds/bar.atom.xml --- a/pelican/tests/output/custom/feeds/bar.atom.xml +++ b/pelican/tests/output/custom/feeds/bar.atom.xml @@ -1,8 +1,8 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/bar.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2010-10-20T10:14:00+02:00</updated><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - bar</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/bar.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2010-10-20T10:14:00+02:00</updated><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry></feed> \ No newline at end of file +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry></feed> diff --git a/pelican/tests/output/custom/feeds/bar.rss.xml b/pelican/tests/output/custom/feeds/bar.rss.xml --- a/pelican/tests/output/custom/feeds/bar.rss.xml +++ b/pelican/tests/output/custom/feeds/bar.rss.xml @@ -1,8 +1,8 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Wed, 20 Oct 2010 10:14:00 +0200</lastBuildDate><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/oh-yeah.html</link><description>&lt;div class="section" id="why-not"&gt; +<rss version="2.0"><channel><title>Alexis' log - bar</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Wed, 20 Oct 2010 10:14:00 +0200</lastBuildDate><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/oh-yeah.html</link><description>&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:oh-yeah.html</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> diff --git a/pelican/tests/output/custom/feeds/cat1.atom.xml b/pelican/tests/output/custom/feeds/cat1.atom.xml --- a/pelican/tests/output/custom/feeds/cat1.atom.xml +++ b/pelican/tests/output/custom/feeds/cat1.atom.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/cat1.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2011-04-20T00:00:00+02:00</updated><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - cat1</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/cat1.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2011-04-20T00:00:00+02:00</updated><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; &lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; </summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; </summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary></entry></feed> \ No newline at end of file +</summary></entry></feed> diff --git a/pelican/tests/output/custom/feeds/cat1.rss.xml b/pelican/tests/output/custom/feeds/cat1.rss.xml --- a/pelican/tests/output/custom/feeds/cat1.rss.xml +++ b/pelican/tests/output/custom/feeds/cat1.rss.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Wed, 20 Apr 2011 00:00:00 +0200</lastBuildDate><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/a-markdown-powered-article.html</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log - cat1</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Wed, 20 Apr 2011 00:00:00 +0200</lastBuildDate><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/a-markdown-powered-article.html</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; &lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-04-20:a-markdown-powered-article.html</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/article-1.html</link><description>&lt;p&gt;Article 1&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:article-1.html</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/article-2.html</link><description>&lt;p&gt;Article 2&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:article-2.html</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/article-3.html</link><description>&lt;p&gt;Article 3&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:article-3.html</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:article-3.html</guid></item></channel></rss> diff --git a/pelican/tests/output/custom/feeds/misc.atom.xml b/pelican/tests/output/custom/feeds/misc.atom.xml --- a/pelican/tests/output/custom/feeds/misc.atom.xml +++ b/pelican/tests/output/custom/feeds/misc.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/misc.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-11-30T00:00:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - misc</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/misc.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-11-30T00:00:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; </summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; </summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; @@ -35,4 +35,4 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; </summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary></entry></feed> \ No newline at end of file +</summary></entry></feed> diff --git a/pelican/tests/output/custom/feeds/misc.rss.xml b/pelican/tests/output/custom/feeds/misc.rss.xml --- a/pelican/tests/output/custom/feeds/misc.rss.xml +++ b/pelican/tests/output/custom/feeds/misc.rss.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Fri, 30 Nov 2012 00:00:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/filename_metadata-example.html</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log - misc</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Fri, 30 Nov 2012 00:00:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/filename_metadata-example.html</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:filename_metadata-example.html</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/second-article.html</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:second-article.html</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; @@ -35,4 +35,4 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-15:unbelievable.html</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> diff --git a/pelican/tests/output/custom/feeds/yeah.atom.xml b/pelican/tests/output/custom/feeds/yeah.atom.xml --- a/pelican/tests/output/custom/feeds/yeah.atom.xml +++ b/pelican/tests/output/custom/feeds/yeah.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/yeah.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - yeah</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/yeah.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -11,4 +11,4 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry></feed> \ No newline at end of file +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry></feed> diff --git a/pelican/tests/output/custom/feeds/yeah.rss.xml b/pelican/tests/output/custom/feeds/yeah.rss.xml --- a/pelican/tests/output/custom/feeds/yeah.rss.xml +++ b/pelican/tests/output/custom/feeds/yeah.rss.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>This is a super article !</title><link>http://blog.notmyidea.org/this-is-a-super-article.html</link><description>&lt;p&gt;Some content here !&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log - yeah</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>This is a super article !</title><link>http://blog.notmyidea.org/this-is-a-super-article.html</link><description>&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -11,4 +11,4 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 02 Dec 2010 10:14:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</guid><category>foo</category><category>bar</category><category>foobar</category></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 02 Dec 2010 10:14:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-12-02:this-is-a-super-article.html</guid><category>foo</category><category>bar</category><category>foobar</category></item></channel></rss> diff --git a/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml --- a/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - Alexis Métaireau</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; </summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; </summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; @@ -58,4 +58,4 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; </summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary></entry></feed> \ No newline at end of file +</summary></entry></feed> diff --git a/pelican/tests/output/custom_locale/feeds/alexis-metaireau.rss.xml b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.rss.xml --- a/pelican/tests/output/custom_locale/feeds/alexis-metaireau.rss.xml +++ b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.rss.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log - Alexis Métaireau</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; @@ -58,4 +58,4 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> diff --git a/pelican/tests/output/custom_locale/feeds/bar.atom.xml b/pelican/tests/output/custom_locale/feeds/bar.atom.xml --- a/pelican/tests/output/custom_locale/feeds/bar.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/bar.atom.xml @@ -1,8 +1,8 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/bar.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2010-10-20T10:14:00+02:00</updated><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - bar</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/bar.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2010-10-20T10:14:00+02:00</updated><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry></feed> \ No newline at end of file +</summary><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry></feed> diff --git a/pelican/tests/output/custom_locale/feeds/bar.rss.xml b/pelican/tests/output/custom_locale/feeds/bar.rss.xml --- a/pelican/tests/output/custom_locale/feeds/bar.rss.xml +++ b/pelican/tests/output/custom_locale/feeds/bar.rss.xml @@ -1,8 +1,8 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Wed, 20 Oct 2010 10:14:00 +0200</lastBuildDate><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/</link><description>&lt;div class="section" id="why-not"&gt; +<rss version="2.0"><channel><title>Alexis' log - bar</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Wed, 20 Oct 2010 10:14:00 +0200</lastBuildDate><item><title>Oh yeah !</title><link>http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/</link><description>&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:posts/2010/octobre/20/oh-yeah/</guid><category>oh</category><category>bar</category><category>yeah</category></item></channel></rss> diff --git a/pelican/tests/output/custom_locale/feeds/cat1.atom.xml b/pelican/tests/output/custom_locale/feeds/cat1.atom.xml --- a/pelican/tests/output/custom_locale/feeds/cat1.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/cat1.atom.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/cat1.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2011-04-20T00:00:00+02:00</updated><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - cat1</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/cat1.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2011-04-20T00:00:00+02:00</updated><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; &lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; </summary></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; </summary></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary></entry></feed> \ No newline at end of file +</summary></entry></feed> diff --git a/pelican/tests/output/custom_locale/feeds/cat1.rss.xml b/pelican/tests/output/custom_locale/feeds/cat1.rss.xml --- a/pelican/tests/output/custom_locale/feeds/cat1.rss.xml +++ b/pelican/tests/output/custom_locale/feeds/cat1.rss.xml @@ -1,7 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Wed, 20 Apr 2011 00:00:00 +0200</lastBuildDate><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log - cat1</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Wed, 20 Apr 2011 00:00:00 +0200</lastBuildDate><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; &lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Apr 2011 00:00:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-04-20:posts/2011/avril/20/a-markdown-powered-article/</guid></item><item><title>Article 1</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/</link><description>&lt;p&gt;Article 1&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-1/</guid></item><item><title>Article 2</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/</link><description>&lt;p&gt;Article 2&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-2/</guid></item><item><title>Article 3</title><link>http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/</link><description>&lt;p&gt;Article 3&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 17 Feb 2011 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2011-02-17:posts/2011/février/17/article-3/</guid></item></channel></rss> diff --git a/pelican/tests/output/custom_locale/feeds/misc.atom.xml b/pelican/tests/output/custom_locale/feeds/misc.atom.xml --- a/pelican/tests/output/custom_locale/feeds/misc.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/misc.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/misc.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-11-30T00:00:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - misc</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/misc.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-11-30T00:00:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; </summary></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; </summary><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; @@ -35,4 +35,4 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; </summary></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary></entry></feed> \ No newline at end of file +</summary></entry></feed> diff --git a/pelican/tests/output/custom_locale/feeds/misc.rss.xml b/pelican/tests/output/custom_locale/feeds/misc.rss.xml --- a/pelican/tests/output/custom_locale/feeds/misc.rss.xml +++ b/pelican/tests/output/custom_locale/feeds/misc.rss.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Fri, 30 Nov 2012 00:00:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log - misc</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Fri, 30 Nov 2012 00:00:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:posts/2012/novembre/30/filename_metadata-example/</guid></item><item><title>Second article</title><link>http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:posts/2012/février/29/second-article/</guid><category>foo</category><category>bar</category><category>baz</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; @@ -35,4 +35,4 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 15 Oct 2010 20:30:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-15:posts/2010/octobre/15/unbelievable/</guid></item><item><title>The baz tag</title><link>http://blog.notmyidea.org/tag/baz.html</link><description>&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Sun, 14 Mar 2010 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-03-14:tag/baz.html</guid></item></channel></rss> diff --git a/pelican/tests/output/custom_locale/feeds/yeah.atom.xml b/pelican/tests/output/custom_locale/feeds/yeah.atom.xml --- a/pelican/tests/output/custom_locale/feeds/yeah.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/yeah.atom.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/yeah.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - yeah</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/yeah.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</id><summary type="html">&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -11,4 +11,4 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry></feed> \ No newline at end of file +</summary><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry></feed> diff --git a/pelican/tests/output/custom_locale/feeds/yeah.rss.xml b/pelican/tests/output/custom_locale/feeds/yeah.rss.xml --- a/pelican/tests/output/custom_locale/feeds/yeah.rss.xml +++ b/pelican/tests/output/custom_locale/feeds/yeah.rss.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>This is a super article !</title><link>http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/</link><description>&lt;p&gt;Some content here !&lt;/p&gt; +<rss version="2.0"><channel><title>Alexis' log - yeah</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>This is a super article !</title><link>http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/</link><description>&lt;p&gt;Some content here !&lt;/p&gt; &lt;div class="section" id="this-is-a-simple-title"&gt; &lt;h2&gt;This is a simple title&lt;/h2&gt; &lt;p&gt;And here comes the cool &lt;a class="reference external" href="http://books.couchdb.org/relax/design-documents/views"&gt;stuff&lt;/a&gt;.&lt;/p&gt; @@ -11,4 +11,4 @@ &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 02 Dec 2010 10:14:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</guid><category>foo</category><category>bar</category><category>foobar</category></item></channel></rss> \ No newline at end of file +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Thu, 02 Dec 2010 10:14:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-12-02:posts/2010/décembre/02/this-is-a-super-article/</guid><category>foo</category><category>bar</category><category>foobar</category></item></channel></rss>
Ambiguous feed titles on sites with multiple categories If you have multiple categories in your pelican site, the feeds for all of them will have the same title. On feed aggregation sites like feedly, this can be confusing when a user searches for your web site only to see a list of feeds with the same name (your site name) and different content. I think the solution to this should be to include the category name in the feed title. I couldn't figure out a way to customize the feed titles with configuration or templating. Ambiguous feed titles on sites with multiple categories If you have multiple categories in your pelican site, the feeds for all of them will have the same title. On feed aggregation sites like feedly, this can be confusing when a user searches for your web site only to see a list of feeds with the same name (your site name) and different content. I think the solution to this should be to include the category name in the feed title. I couldn't figure out a way to customize the feed titles with configuration or templating. Fix #1291: Feeds have ambiguous titles on sites with multiple categories... This fixes #1291 by generating proper feed titles in the form "[sitename] - [category]". This applies to category, tag, and author feeds. Other feeds use just the sitename as title. Not sure what to do with the titles of translation feeds (I'll gladly take advice on that). Ambiguous feed titles on sites with multiple categories If you have multiple categories in your pelican site, the feeds for all of them will have the same title. On feed aggregation sites like feedly, this can be confusing when a user searches for your web site only to see a list of feeds with the same name (your site name) and different content. I think the solution to this should be to include the category name in the feed title. I couldn't figure out a way to customize the feed titles with configuration or templating.
To be clear I'm talking about the title _in_ the generated feed (e.g. atom xml content) itself, not the in the <link> tag that links to the feed. To be clear I'm talking about the title _in_ the generated feed (e.g. atom xml content) itself, not the in the <link> tag that links to the feed. To be clear I'm talking about the title _in_ the generated feed (e.g. atom xml content) itself, not the in the <link> tag that links to the feed.
2016-04-17T15:15:03Z
[]
[]
getpelican/pelican
1,959
getpelican__pelican-1959
[ "1464", "1464" ]
2ceeb88c631b731b05877026cd3c7c26f34e5124
diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -607,7 +607,10 @@ def default_metadata(settings=None, process=None): metadata['category'] = value if settings.get('DEFAULT_DATE', None) and \ settings['DEFAULT_DATE'] != 'fs': - metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE']) + if isinstance(settings['DEFAULT_DATE'], six.string_types): + metadata['date'] = get_date(settings['DEFAULT_DATE']) + else: + metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE']) return metadata
diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -359,6 +359,15 @@ def test_article_with_multiple_authors_list(self): self.assertDictHasSubset(page.metadata, expected) + def test_default_date_formats(self): + tuple_date = self.read_file(path='article.rst', + DEFAULT_DATE=(2012, 5, 1)) + string_date = self.read_file(path='article.rst', + DEFAULT_DATE='2012-05-01') + + self.assertEqual(tuple_date.metadata['date'], + string_date.metadata['date']) + @unittest.skipUnless(readers.Markdown, "markdown isn't installed") class MdReaderTest(ReaderTest):
Support string formatted dates for DEFAULT_DATE Currently the documentation implies that it supports various formats, but does not explain which. The most intuitive one however is not supported. It would make sense that this option would support the format of the meta tag that it seeks to provide a default value for, as this would be the most intuitive value type. Support string formatted dates for DEFAULT_DATE Currently the documentation implies that it supports various formats, but does not explain which. The most intuitive one however is not supported. It would make sense that this option would support the format of the meta tag that it seeks to provide a default value for, as this would be the most intuitive value type.
I'm not sure what you mean by "string formatted dates", but there is `DEFAULT_DATE_FORMAT` and `DATE_FORMATS` to control the output format of dates. `DEFAULT_DATE` is supposed to fill in the `date` field for articles that doesn't specify it. I mean I should be able to define it as: ``` DEFAULT_DATE = "2014-09-16" ``` Currently this does not work. Well, you can do: ``` DEFAULT_DATE = (2014, 9, 16) ``` I know, I've gotten around the problem already, I'm giving a suggestion on how to improve Pelican and make it more intuitive for newcomers. Your suggestion, while valid, is not intuitive. I don't think it's a question about whether it's intuitive — it's a question of documentation clarity. I've assigned this issue to myself and will add an example to the Docs > Settings > DEFAULT_DATE entry in order to make this clearer. Personally I disagree, yes the documentation should cover it but there is no reason the software cannot facilitate the most intuitive value for this variable. Especially considering it is meant to cover a meta field which holds the value as a string format. Not everyone reads the manual. Personally I don't read the manual unless I got stuck somewhere. Good software should not "need" a manual (not saying you shouldn't have a manual, I'm saying you shouldn't "need" it). If you would like to propose, and subsequently implement, a solution that you feel would be superior to the current method, you are of course completely welcome to do so. As it says in our documentation, improvements are encouraged — that is what open source is all about. (^_^) Regarding documentation, I do not agree with your stance on that topic. We try to make Pelican as easy to use as we can, and indeed it is — in my opinion — relatively easy to get started. But saying that you shouldn't need to read the documentation in order to _configure_ Pelican, with all of the switches and knobs that provide its flexibility, is simply madness. I'll leave the documentation topic alone as that's a whole different conversation. Of course it's up to you how you take my suggestion. I will say though that my suggestion is not meant to be "superior", it's simply meant to make things easier for newcomers. I don't see any technical reason why you wouldnt want to support string formatted dates. I personally do not really care for my own use whether they are supported or not because I by now know what format to use. My only point in creating this ticket was to communicate to you "hey, I'm new, and I got stuck on this. You could avoid other users getting stuck on the same thing by adding support for string formatted dates". Principle of least surprise would lead me to anticipate that `DEFAULT_DATE` accepts the same format as an article's `date` metadata field. The simplest change I can imagine is to take this in `readers.py`: ``` metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE']) ``` And instead call `get_date` from `utils` if we have a string: ``` if isinstance(settings['DEFAULT_DATE'], basestring): metadata['date'] = get_date(settings['DEFAULT_DATE']) else: metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE']) ``` This would preserve the current handling of a tuple argument, while also accepting any string that's currently acceptable for article dates. I'm new to Python development, so it's possible that's a silly suggestion. If no one objects, I'll see if I can get as far as setting up tests and docs for that? @Naatan: Good point, and well taken. Thanks for the feedback. Certainly didn't mean to discourage that! @areyoutoo: Sounds fine to me. Anyone else have any thoughts on this proposal? I'm not sure what you mean by "string formatted dates", but there is `DEFAULT_DATE_FORMAT` and `DATE_FORMATS` to control the output format of dates. `DEFAULT_DATE` is supposed to fill in the `date` field for articles that doesn't specify it. I mean I should be able to define it as: ``` DEFAULT_DATE = "2014-09-16" ``` Currently this does not work. Well, you can do: ``` DEFAULT_DATE = (2014, 9, 16) ``` I know, I've gotten around the problem already, I'm giving a suggestion on how to improve Pelican and make it more intuitive for newcomers. Your suggestion, while valid, is not intuitive. I don't think it's a question about whether it's intuitive — it's a question of documentation clarity. I've assigned this issue to myself and will add an example to the Docs > Settings > DEFAULT_DATE entry in order to make this clearer. Personally I disagree, yes the documentation should cover it but there is no reason the software cannot facilitate the most intuitive value for this variable. Especially considering it is meant to cover a meta field which holds the value as a string format. Not everyone reads the manual. Personally I don't read the manual unless I got stuck somewhere. Good software should not "need" a manual (not saying you shouldn't have a manual, I'm saying you shouldn't "need" it). If you would like to propose, and subsequently implement, a solution that you feel would be superior to the current method, you are of course completely welcome to do so. As it says in our documentation, improvements are encouraged — that is what open source is all about. (^_^) Regarding documentation, I do not agree with your stance on that topic. We try to make Pelican as easy to use as we can, and indeed it is — in my opinion — relatively easy to get started. But saying that you shouldn't need to read the documentation in order to _configure_ Pelican, with all of the switches and knobs that provide its flexibility, is simply madness. I'll leave the documentation topic alone as that's a whole different conversation. Of course it's up to you how you take my suggestion. I will say though that my suggestion is not meant to be "superior", it's simply meant to make things easier for newcomers. I don't see any technical reason why you wouldnt want to support string formatted dates. I personally do not really care for my own use whether they are supported or not because I by now know what format to use. My only point in creating this ticket was to communicate to you "hey, I'm new, and I got stuck on this. You could avoid other users getting stuck on the same thing by adding support for string formatted dates". Principle of least surprise would lead me to anticipate that `DEFAULT_DATE` accepts the same format as an article's `date` metadata field. The simplest change I can imagine is to take this in `readers.py`: ``` metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE']) ``` And instead call `get_date` from `utils` if we have a string: ``` if isinstance(settings['DEFAULT_DATE'], basestring): metadata['date'] = get_date(settings['DEFAULT_DATE']) else: metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE']) ``` This would preserve the current handling of a tuple argument, while also accepting any string that's currently acceptable for article dates. I'm new to Python development, so it's possible that's a silly suggestion. If no one objects, I'll see if I can get as far as setting up tests and docs for that? @Naatan: Good point, and well taken. Thanks for the feedback. Certainly didn't mean to discourage that! @areyoutoo: Sounds fine to me. Anyone else have any thoughts on this proposal?
2016-05-24T14:23:22Z
[]
[]
getpelican/pelican
1,982
getpelican__pelican-1982
[ "1982" ]
98d1d4e3387dc70a062f1897fa5d8f71d5431e2c
diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -2,6 +2,7 @@ from __future__ import print_function, unicode_literals import calendar +import errno import fnmatch import logging import os @@ -20,9 +21,8 @@ from pelican.cache import FileStampDataCacher from pelican.contents import Article, Draft, Page, Static, is_valid_content from pelican.readers import Readers -from pelican.utils import (DateFormatter, copy, copy_file_metadata, mkdir_p, - posixize_path, process_translations, - python_2_unicode_compatible) +from pelican.utils import (DateFormatter, copy, mkdir_p, posixize_path, + process_translations, python_2_unicode_compatible) logger = logging.getLogger(__name__) @@ -682,21 +682,9 @@ class StaticGenerator(Generator): def __init__(self, *args, **kwargs): super(StaticGenerator, self).__init__(*args, **kwargs) + self.fallback_to_symlinks = False signals.static_generator_init.send(self) - def _copy_paths(self, paths, source, destination, output_path, - final_path=None): - """Copy all the paths from source to destination""" - for path in paths: - if final_path: - copy(os.path.join(source, path), - os.path.join(output_path, destination, final_path), - self.settings['IGNORE_FILES']) - else: - copy(os.path.join(source, path), - os.path.join(output_path, destination, path), - self.settings['IGNORE_FILES']) - def generate_context(self): self.staticfiles = [] for f in self.get_files(self.settings['STATIC_PATHS'], @@ -724,13 +712,88 @@ def generate_output(self, writer): self._copy_paths(self.settings['THEME_STATIC_PATHS'], self.theme, self.settings['THEME_STATIC_DIR'], self.output_path, os.curdir) - # copy all Static files for sc in self.context['staticfiles']: - source_path = os.path.join(self.path, sc.source_path) - save_as = os.path.join(self.output_path, sc.save_as) - mkdir_p(os.path.dirname(save_as)) - logger.info('Copying %s to %s', sc.source_path, sc.save_as) - copy_file_metadata(source_path, save_as) + if self._file_update_required(sc): + self._link_or_copy_staticfile(sc) + else: + logger.debug('%s is up to date, not copying', sc.source_path) + + def _copy_paths(self, paths, source, destination, output_path, + final_path=None): + """Copy all the paths from source to destination""" + for path in paths: + if final_path: + copy(os.path.join(source, path), + os.path.join(output_path, destination, final_path), + self.settings['IGNORE_FILES']) + else: + copy(os.path.join(source, path), + os.path.join(output_path, destination, path), + self.settings['IGNORE_FILES']) + + def _file_update_required(self, staticfile): + source_path = os.path.join(self.path, staticfile.source_path) + save_as = os.path.join(self.output_path, staticfile.save_as) + if not os.path.exists(save_as): + return True + elif (self.settings['STATIC_CREATE_LINKS'] and + os.path.samefile(source_path, save_as)): + return False + elif (self.settings['STATIC_CREATE_LINKS'] and + os.path.realpath(save_as) == source_path): + return False + elif not self.settings['STATIC_CHECK_IF_MODIFIED']: + return True + else: + return self._source_is_newer(staticfile) + + def _source_is_newer(self, staticfile): + source_path = os.path.join(self.path, staticfile.source_path) + save_as = os.path.join(self.output_path, staticfile.save_as) + s_mtime = os.path.getmtime(source_path) + d_mtime = os.path.getmtime(save_as) + return s_mtime > d_mtime + + def _link_or_copy_staticfile(self, sc): + if self.settings['STATIC_CREATE_LINKS']: + self._link_staticfile(sc) + else: + self._copy_staticfile(sc) + + def _copy_staticfile(self, sc): + source_path = os.path.join(self.path, sc.source_path) + save_as = os.path.join(self.output_path, sc.save_as) + self._mkdir(os.path.dirname(save_as)) + copy(source_path, save_as) + logger.info('Copying %s to %s', sc.source_path, sc.save_as) + + def _link_staticfile(self, sc): + source_path = os.path.join(self.path, sc.source_path) + save_as = os.path.join(self.output_path, sc.save_as) + self._mkdir(os.path.dirname(save_as)) + try: + if os.path.lexists(save_as): + os.unlink(save_as) + logger.info('Linking %s and %s', sc.source_path, sc.save_as) + if self.fallback_to_symlinks: + os.symlink(source_path, save_as) + else: + os.link(source_path, save_as) + except OSError as err: + if err.errno == errno.EXDEV: # 18: Invalid cross-device link + logger.debug( + "Cross-device links not valid. " + "Creating symbolic links instead." + ) + self.fallback_to_symlinks = True + self._link_staticfile(sc) + else: + raise err + + def _mkdir(self, path): + if os.path.lexists(path) and not os.path.isdir(path): + os.unlink(path) + mkdir_p(path) class SourceFileGenerator(Generator): diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -82,6 +82,8 @@ def load_source(name, path): 'PAGE_LANG_SAVE_AS': posix_join('pages', '{slug}-{lang}.html'), 'STATIC_URL': '{path}', 'STATIC_SAVE_AS': '{path}', + 'STATIC_CREATE_LINKS': False, + 'STATIC_CHECK_IF_MODIFIED': False, 'CATEGORY_URL': 'category/{slug}.html', 'CATEGORY_SAVE_AS': posix_join('category', '{slug}.html'), 'TAG_URL': 'tag/{slug}.html',
diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py --- a/pelican/tests/test_generators.py +++ b/pelican/tests/test_generators.py @@ -5,7 +5,7 @@ import os from codecs import open -from shutil import rmtree +from shutil import copy, rmtree from tempfile import mkdtemp from pelican.generators import (ArticlesGenerator, Generator, PagesGenerator, @@ -674,6 +674,30 @@ class TestStaticGenerator(unittest.TestCase): def setUp(self): self.content_path = os.path.join(CUR_DIR, 'mixed_content') + self.temp_content = mkdtemp(prefix='testcontent.') + self.temp_output = mkdtemp(prefix='testoutput.') + self.settings = get_settings() + self.settings['PATH'] = self.temp_content + self.settings['STATIC_PATHS'] = ["static"] + self.settings['OUTPUT_PATH'] = self.temp_output + os.mkdir(os.path.join(self.temp_content, "static")) + self.startfile = os.path.join(self.temp_content, + "static", "staticfile") + self.endfile = os.path.join(self.temp_output, "static", "staticfile") + self.generator = StaticGenerator( + context={'filenames': {}}, + settings=self.settings, + path=self.temp_content, + theme="", + output_path=self.temp_output, + ) + + def tearDown(self): + rmtree(self.temp_content) + rmtree(self.temp_output) + + def set_ancient_mtime(self, path, timestamp=1): + os.utime(path, (timestamp, timestamp)) def test_static_excludes(self): """Test that StaticGenerator respects STATIC_EXCLUDES. @@ -687,7 +711,7 @@ def test_static_excludes(self): StaticGenerator( context=context, settings=settings, - path=settings['PATH'], output_path=None, + path=settings['PATH'], output_path=self.temp_output, theme=settings['THEME']).generate_context() staticnames = [os.path.basename(c.source_path) @@ -716,7 +740,7 @@ def test_static_exclude_sources(self): for generator_class in (PagesGenerator, StaticGenerator): generator_class( context=context, settings=settings, - path=settings['PATH'], output_path=None, + path=settings['PATH'], output_path=self.temp_output, theme=settings['THEME']).generate_context() staticnames = [os.path.basename(c.source_path) @@ -733,7 +757,7 @@ def test_static_exclude_sources(self): for generator_class in (PagesGenerator, StaticGenerator): generator_class( context=context, settings=settings, - path=settings['PATH'], output_path=None, + path=settings['PATH'], output_path=self.temp_output, theme=settings['THEME']).generate_context() staticnames = [os.path.basename(c.source_path) @@ -742,3 +766,135 @@ def test_static_exclude_sources(self): self.assertTrue( any(name.endswith(".md") for name in staticnames), "STATIC_EXCLUDE_SOURCES=False failed to include a markdown file") + + def test_copy_one_file(self): + with open(self.startfile, "w") as f: + f.write("staticcontent") + self.generator.generate_context() + self.generator.generate_output(None) + with open(self.endfile, "r") as f: + self.assertEqual(f.read(), "staticcontent") + + @unittest.skipUnless(MagicMock, 'Needs Mock module') + def test_file_update_required_when_dest_does_not_exist(self): + staticfile = MagicMock() + staticfile.source_path = self.startfile + staticfile.save_as = self.endfile + with open(staticfile.source_path, "w") as f: + f.write("a") + update_required = self.generator._file_update_required(staticfile) + self.assertTrue(update_required) + + @unittest.skipUnless(MagicMock, 'Needs Mock module') + def test_dest_and_source_mtimes_are_equal(self): + staticfile = MagicMock() + staticfile.source_path = self.startfile + staticfile.save_as = self.endfile + self.settings['STATIC_CHECK_IF_MODIFIED'] = True + with open(staticfile.source_path, "w") as f: + f.write("a") + os.mkdir(os.path.join(self.temp_output, "static")) + copy(staticfile.source_path, staticfile.save_as) + isnewer = self.generator._source_is_newer(staticfile) + self.assertFalse(isnewer) + + @unittest.skipUnless(MagicMock, 'Needs Mock module') + def test_source_is_newer(self): + staticfile = MagicMock() + staticfile.source_path = self.startfile + staticfile.save_as = self.endfile + with open(staticfile.source_path, "w") as f: + f.write("a") + os.mkdir(os.path.join(self.temp_output, "static")) + copy(staticfile.source_path, staticfile.save_as) + self.set_ancient_mtime(staticfile.save_as) + isnewer = self.generator._source_is_newer(staticfile) + self.assertTrue(isnewer) + + def test_skip_file_when_source_is_not_newer(self): + self.settings['STATIC_CHECK_IF_MODIFIED'] = True + with open(self.startfile, "w") as f: + f.write("staticcontent") + os.mkdir(os.path.join(self.temp_output, "static")) + with open(self.endfile, "w") as f: + f.write("staticcontent") + expected = os.path.getmtime(self.endfile) + self.set_ancient_mtime(self.startfile) + self.generator.generate_context() + self.generator.generate_output(None) + self.assertEqual(os.path.getmtime(self.endfile), expected) + + def test_dont_link_by_default(self): + with open(self.startfile, "w") as f: + f.write("staticcontent") + self.generator.generate_context() + self.generator.generate_output(None) + self.assertFalse(os.path.samefile(self.startfile, self.endfile)) + + def test_output_file_is_linked_to_source(self): + self.settings['STATIC_CREATE_LINKS'] = True + with open(self.startfile, "w") as f: + f.write("staticcontent") + self.generator.generate_context() + self.generator.generate_output(None) + self.assertTrue(os.path.samefile(self.startfile, self.endfile)) + + def test_output_file_exists_and_is_newer(self): + self.settings['STATIC_CREATE_LINKS'] = True + with open(self.startfile, "w") as f: + f.write("staticcontent") + os.mkdir(os.path.join(self.temp_output, "static")) + with open(self.endfile, "w") as f: + f.write("othercontent") + self.generator.generate_context() + self.generator.generate_output(None) + self.assertTrue(os.path.samefile(self.startfile, self.endfile)) + + def test_can_symlink_when_hardlink_not_possible(self): + self.settings['STATIC_CREATE_LINKS'] = True + with open(self.startfile, "w") as f: + f.write("staticcontent") + os.mkdir(os.path.join(self.temp_output, "static")) + self.generator.fallback_to_symlinks = True + self.generator.generate_context() + self.generator.generate_output(None) + self.assertTrue(os.path.islink(self.endfile)) + + def test_existing_symlink_is_considered_up_to_date(self): + self.settings['STATIC_CREATE_LINKS'] = True + with open(self.startfile, "w") as f: + f.write("staticcontent") + os.mkdir(os.path.join(self.temp_output, "static")) + os.symlink(self.startfile, self.endfile) + staticfile = MagicMock() + staticfile.source_path = self.startfile + staticfile.save_as = self.endfile + requires_update = self.generator._file_update_required(staticfile) + self.assertFalse(requires_update) + + def test_invalid_symlink_is_overwritten(self): + self.settings['STATIC_CREATE_LINKS'] = True + with open(self.startfile, "w") as f: + f.write("staticcontent") + os.mkdir(os.path.join(self.temp_output, "static")) + os.symlink("invalid", self.endfile) + staticfile = MagicMock() + staticfile.source_path = self.startfile + staticfile.save_as = self.endfile + requires_update = self.generator._file_update_required(staticfile) + self.assertTrue(requires_update) + self.generator.fallback_to_symlinks = True + self.generator.generate_context() + self.generator.generate_output(None) + self.assertEqual(os.path.realpath(self.endfile), self.startfile) + + def test_delete_existing_file_before_mkdir(self): + with open(self.startfile, "w") as f: + f.write("staticcontent") + with open(os.path.join(self.temp_output, "static"), "w") as f: + f.write("This file should be a directory") + self.generator.generate_context() + self.generator.generate_output(None) + self.assertTrue( + os.path.isdir(os.path.join(self.temp_output, "static"))) + self.assertTrue(os.path.isfile(self.endfile))
Add static file options: hard/symlink & only-when-modified Hello, I am interested in improving `StaticGenerator`. Please let me know if a similar effort is already under way, and also bring your recommendations and ideas. I don't have a timeline but I'd really like to have at least a basic working feature in about 2 weeks given the free time that I have. The problem is when the site content has several large static files, like videos. When generating the site, all the static files are copied, regardless of modification time. Even `WRITE_SELECTED` doesn't prevent that. For example, the pages and article take 4 seconds to process, but the static files add another almost 30 seconds to it. I'm thinking of adding a setting `STATIC_CHECK_MODIFIED`, false by default (the current behaviour). I'm currently working on inserting the logic in the `StaticGenerator.generate_context()` method. Once it works well with mtimes, then maybe I can add a `STATIC_CHECK_MODIFIED_METHOD` settings which would be "mtime" by default, or the name of a hashlib function. But I doubt that computing hashes on two video files (source_path and save_as) would be faster than just copying one over the other. Then I also want to try symlinks or hardlinks from save_as to source_path. I imagine that would be fast and even space efficient. Do you think of any problems that could arise if the output files are links and not copies? I also think #1980 is a great idea which could also solve the problem and I might look into it. Sincerely, Alexandre de Verteuil
2016-08-06T07:56:18Z
[]
[]
getpelican/pelican
2,196
getpelican__pelican-2196
[ "2031" ]
56a483475b0a42bd13cf114248c9141c9051ed84
diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -11,7 +11,7 @@ import pytz import six -from six.moves.urllib.parse import urlparse, urlunparse +from six.moves.urllib.parse import urljoin, urlparse, urlunparse from pelican import signals from pelican.settings import DEFAULT_CONFIG @@ -228,6 +228,87 @@ def get_url_setting(self, key): key = key if self.in_default_lang else 'lang_%s' % key return self._expand_settings(key) + def _link_replacer(self, siteurl, m): + what = m.group('what') + value = urlparse(m.group('value')) + path = value.path + origin = m.group('path') + + # urllib.parse.urljoin() produces `a.html` for urljoin("..", "a.html") + # so if RELATIVE_URLS are enabled, we fall back to os.path.join() to + # properly get `../a.html`. However, os.path.join() produces + # `baz/http://foo/bar.html` for join("baz", "http://foo/bar.html") + # instead of correct "http://foo/bar.html", so one has to pick a side + # as there is no silver bullet. + if self.settings['RELATIVE_URLS']: + joiner = os.path.join + else: + joiner = urljoin + + # However, it's not *that* simple: urljoin("blog", "index.html") + # produces just `index.html` instead of `blog/index.html` (unlike + # os.path.join()), so in order to get a correct answer one needs to + # append a trailing slash to siteurl in that case. This also makes + # the new behavior fully compatible with Pelican 3.7.1. + if not siteurl.endswith('/'): + siteurl += '/' + + # XXX Put this in a different location. + if what in {'filename', 'attach'}: + if path.startswith('/'): + path = path[1:] + else: + # relative to the source path of this content + path = self.get_relative_source_path( + os.path.join(self.relative_dir, path) + ) + + if path not in self._context['filenames']: + unquoted_path = path.replace('%20', ' ') + + if unquoted_path in self._context['filenames']: + path = unquoted_path + + linked_content = self._context['filenames'].get(path) + if linked_content: + if what == 'attach': + if isinstance(linked_content, Static): + linked_content.attach_to(self) + else: + logger.warning( + "%s used {attach} link syntax on a " + "non-static file. Use {filename} instead.", + self.get_relative_source_path()) + origin = joiner(siteurl, linked_content.url) + origin = origin.replace('\\', '/') # for Windows paths. + else: + logger.warning( + "Unable to find '%s', skipping url replacement.", + value.geturl(), extra={ + 'limit_msg': ("Other resources were not found " + "and their urls not replaced")}) + elif what == 'category': + origin = joiner(siteurl, Category(path, self.settings).url) + elif what == 'tag': + origin = joiner(siteurl, Tag(path, self.settings).url) + elif what == 'index': + origin = joiner(siteurl, self.settings['INDEX_SAVE_AS']) + elif what == 'author': + origin = joiner(siteurl, Author(path, self.settings).url) + else: + logger.warning( + "Replacement Indicator '%s' not recognized, " + "skipping replacement", + what) + + # keep all other parts, such as query, fragment, etc. + parts = list(value) + parts[2] = origin + origin = urlunparse(parts) + + return ''.join((m.group('markup'), m.group('quote'), origin, + m.group('quote'))) + def _update_content(self, content, siteurl): """Update the content attribute. @@ -251,69 +332,7 @@ def _update_content(self, content, siteurl): \2""".format(instrasite_link_regex) hrefs = re.compile(regex, re.X) - def replacer(m): - what = m.group('what') - value = urlparse(m.group('value')) - path = value.path - origin = m.group('path') - - # XXX Put this in a different location. - if what in {'filename', 'attach'}: - if path.startswith('/'): - path = path[1:] - else: - # relative to the source path of this content - path = self.get_relative_source_path( - os.path.join(self.relative_dir, path) - ) - - if path not in self._context['filenames']: - unquoted_path = path.replace('%20', ' ') - - if unquoted_path in self._context['filenames']: - path = unquoted_path - - linked_content = self._context['filenames'].get(path) - if linked_content: - if what == 'attach': - if isinstance(linked_content, Static): - linked_content.attach_to(self) - else: - logger.warning( - "%s used {attach} link syntax on a " - "non-static file. Use {filename} instead.", - self.get_relative_source_path()) - origin = '/'.join((siteurl, linked_content.url)) - origin = origin.replace('\\', '/') # for Windows paths. - else: - logger.warning( - "Unable to find '%s', skipping url replacement.", - value.geturl(), extra={ - 'limit_msg': ("Other resources were not found " - "and their urls not replaced")}) - elif what == 'category': - origin = '/'.join((siteurl, Category(path, self.settings).url)) - elif what == 'tag': - origin = '/'.join((siteurl, Tag(path, self.settings).url)) - elif what == 'index': - origin = '/'.join((siteurl, self.settings['INDEX_SAVE_AS'])) - elif what == 'author': - origin = '/'.join((siteurl, Author(path, self.settings).url)) - else: - logger.warning( - "Replacement Indicator '%s' not recognized, " - "skipping replacement", - what) - - # keep all other parts, such as query, fragment, etc. - parts = list(value) - parts[2] = origin - origin = urlunparse(parts) - - return ''.join((m.group('markup'), m.group('quote'), origin, - m.group('quote'))) - - return hrefs.sub(replacer, content) + return hrefs.sub(lambda m: self._link_replacer(siteurl, m), content) def get_siteurl(self): return self._context.get('localsiteurl', '')
diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -397,6 +397,54 @@ def test_intrasite_link_more(self): '</blockquote>' ) + def test_intrasite_link_absolute(self): + """Test that absolute URLs are merged properly.""" + + args = self.page_kwargs.copy() + args['settings'] = get_settings( + STATIC_URL='http://static.cool.site/{path}', + ARTICLE_URL='http://blog.cool.site/{slug}.html') + args['source_path'] = 'content' + args['context']['filenames'] = { + 'images/poster.jpg': Static('', + settings=args['settings'], + source_path='images/poster.jpg'), + 'article.rst': Article('', + settings=args['settings'], + metadata={'slug': 'article', + 'title': 'Article'}) + } + + # Article link will go to blog + args['content'] = ( + '<a href="{filename}article.rst">Article</a>' + ) + content = Page(**args).get_content('http://cool.site') + self.assertEqual( + content, + '<a href="http://blog.cool.site/article.html">Article</a>' + ) + + # Page link will go to the main site + args['content'] = ( + '<a href="{index}">Index</a>' + ) + content = Page(**args).get_content('http://cool.site') + self.assertEqual( + content, + '<a href="http://cool.site/index.html">Index</a>' + ) + + # Image link will go to static + args['content'] = ( + '<img src="{filename}/images/poster.jpg"/>' + ) + content = Page(**args).get_content('http://cool.site') + self.assertEqual( + content, + '<img src="http://static.cool.site/images/poster.jpg"/>' + ) + def test_intrasite_link_markdown_spaces(self): # Markdown introduces %20 instead of spaces, this tests that # we support markdown doing this.
{filename} issue when {{ SITEURL }} and {{ STATIC_URL }} are different absolute URLS I'm trying to build a blog where {{ SITEURL }} and {{ STATIC_URL }} differ (they are actually on two different domains); when I build my blog everything that is linked through {filename} ends up with {{ SITEURL }}{{ STATIC_URL }} as a prefix in its url. The issue seems to be in pelican/contents.py at line 245: ``` python origin = '/'.join((siteurl, linked_content.url)) ``` I just left "linked_content.url" and it works just fine. This is it now: ``` python origin = inked_content.url ``` Am I missing something?
2017-08-07T16:29:53Z
[]
[]
getpelican/pelican
2,366
getpelican__pelican-2366
[ "2255" ]
d2eb32c9106a0df87495da6464d0b9d037de4a1d
diff --git a/pelican/tools/pelican_import.py b/pelican/tools/pelican_import.py --- a/pelican/tools/pelican_import.py +++ b/pelican/tools/pelican_import.py @@ -674,6 +674,22 @@ def download_attachments(output_path, urls): return locations +def is_pandoc_needed(fields): + in_markup_idx = 9 + return filter(lambda f: f[in_markup_idx] in ('html', 'wp-html'), fields) + + +def get_pandoc_version(): + cmd = ['pandoc', '--version'] + try: + output = subprocess.check_output(cmd, universal_newlines=True) + except (subprocess.CalledProcessError, OSError) as e: + logger.warning("Pandoc version unknown: %s", e) + return '' + + return output.split()[1] + + def update_links_to_attached_files(content, attachments): for old_url, new_path in attachments.items(): # url may occur both with http:// and https:// @@ -689,6 +705,14 @@ def fields2pelican( dircat=False, strip_raw=False, disable_slugs=False, dirpage=False, filename_template=None, filter_author=None, wp_custpost=False, wp_attach=False, attachments=None): + + pandoc_version = get_pandoc_version() + + if is_pandoc_needed(fields) and not pandoc_version: + error = ('Pandoc must be installed to complete the ' + 'requested import action.') + exit(error) + for (title, content, filename, date, author, categories, tags, status, kind, in_markup) in fields: if filter_author and filter_author != author: @@ -735,11 +759,17 @@ def fields2pelican( fp.write(new_content) - parse_raw = '--parse-raw' if not strip_raw else '' - cmd = ('pandoc --normalize {0} --from=html' - ' --to={1} -o "{2}" "{3}"') - cmd = cmd.format(parse_raw, out_markup, - out_filename, html_filename) + if pandoc_version[0] == '1': + parse_raw = '--parse-raw' if not strip_raw else '' + cmd = ('pandoc --normalize {0} --from=html' + ' --to={1} -o "{2}" "{3}"') + cmd = cmd.format(parse_raw, out_markup, + out_filename, html_filename) + else: + from_arg = '-f html+raw_html' if not strip_raw else '-f html' + cmd = ('pandoc {0} --to={1}-smart -o "{2}" "{3}"') + cmd = cmd.format(from_arg, out_markup, + out_filename, html_filename) try: rc = subprocess.call(cmd, shell=True)
diff --git a/pelican/tests/content/wordpressexport.xml b/pelican/tests/content/wordpressexport.xml --- a/pelican/tests/content/wordpressexport.xml +++ b/pelican/tests/content/wordpressexport.xml @@ -554,7 +554,11 @@ Pelicans are supposed to eat fish, damn it! <iframe width="420" height="315" src="http://www.youtube.com/embed/QNNl_uWmQXE" frameborder="0" allowfullscreen></iframe> -Bottom line: don't mess up with birds]]></content:encoded> +Bottom line: don't mess up with birds + +"That's a 'wonderful' shoe." + +“That’s a ‘magic’ sock.”]]></content:encoded> <excerpt:encoded><![CDATA[]]></excerpt:encoded> <wp:post_id>173</wp:post_id> <wp:post_date>2012-02-16 15:52:55</wp:post_date> diff --git a/pelican/tests/test_importer.py b/pelican/tests/test_importer.py --- a/pelican/tests/test_importer.py +++ b/pelican/tests/test_importer.py @@ -268,6 +268,19 @@ def r(f): code_line = re.search(r'\s+a = \[1, 2, 3\]', md).group(0) self.assertTrue(sample_line.rindex('This') < code_line.rindex('a')) + def test_dont_use_smart_quotes(self): + def r(f): + with open(f, encoding='utf-8') as infile: + return infile.read() + silent_f2p = mute(True)(fields2pelican) + test_post = filter( + lambda p: p[0].startswith("Post with raw data"), + self.posts) + with temporary_folder() as temp: + md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0] + escaped_quotes = re.search(r'\\[\'"“”‘’]', md) + self.assertFalse(escaped_quotes) + class TestBuildHeader(unittest.TestCase): def test_build_header(self):
pelican-import corrupts posts imported from WordPress Hello, I tried to move my blog ( https://eax.me/ ) from WordPress to Pelican using pelican-import. It works for the most part. There were a few difficulties but I managed to find workarounds for most of them. Particularly pelican-import doesn't handle images well and doesn't support CodeColorer plugin so I had to use some regular expressions (see below). Also I discovered that pelican-import doesn't work with pandoc 2.0.2 properly so I had to patch the script around line 729: ``` parse_raw = '' cmd = ('pandoc {0} --from=html' ' --to={1}+raw_html -o "{2}" "{3}"') cmd = cmd.format(parse_raw, out_markup, out_filename, html_filename) ``` These are minor issues though. The most serious issues are the following. 1. After migration characters like `'`, `"` and `$` were replaced to `\'`, `\"` and `\$`. I can't just replace these sequences back to `'`, `"` and `$` since these sequences sometimes are used in code snippets. There is a similar issue with the dash symbol that after migrations turn from `---` to `\-\--`. 2. Also in some code snippets code like `#include <something.h>` just turns into `#include`. 3. Last but not least one of the posts created by pelican-import (see below) hangs `make devserver` with 100% CPU usage. You can reproduce all these issues using the XML file that I exported from WordPress. Please leave your email and I will send it to you. Exact steps to reproduce: ``` perl -pi -e 's/\[cci.*?\]/<code>/g' ~/temp/del-me/wordpress.2017-11-29.xml perl -pi -e 's/\[\/cci.*?\]/<\/code>/g' ~/temp/del-me/wordpress.2017-11-29.xml perl -pi -e 's/\[cc_(\w+).*?\]/<pre><code>:::$1/g' ~/temp/del-me/wordpress.2017-11-29.xml perl -pi -e 's/\[cc.*?lang="(\w+)".*?\]/<pre><code>:::$1/g' ~/temp/del-me/wordpress.2017-11-29.xml perl -pi -e 's/\[cc.*?\]/<pre><code>/g' ~/temp/del-me/wordpress.2017-11-29.xml perl -pi -e 's/\[\/cc.*?\]/<\/code><\/pre>/g' ~/temp/del-me/wordpress.2017-11-29.xml perl -pi -e 's/<img .*?((src=".*?" alt=".*?")|(alt=".*?" src=".*?)).*?>/<img $1 \/>/g' ~/temp/del-me/wordpress.2017-11-29.xml perl -pi -e 's/<span style="white\-space: nowrap;">(.*?)<\/span>/$1/g' ~/temp/del-me/wordpress.2017-11-29.xml perl -pi -e 's!"https?:\/\/eax\.me\/([a-z0-9\-_]+)\/"!"https:\/\/eax.me\/$1.html"!g' ~/temp/del-me/wordpress.2017-11-29.xml pelican-import -o content/ -m markdown --wpfile ~/temp/del-me/wordpress.2017-11-29.xml ``` Run `make devserver` to make sure that content/mojolicious.md hangs the server (issue 3). Temporary replace it somewhere and run `make devserver` again. Then see: ``` http://localhost:8000/diy-presentation-remote.html http://localhost:8000/elliptic-curves-crypto.html ``` ^ symbols `$`, `'` and `"` turned into `\$`, `\'` and `\"` (issue 1). ``` http://localhost:8000/cpp-gtest.html ``` ^ `#include <something.h>` was replaced to `#include` (issue 2). Pelican version is 3.7.1.
I maybe have the same trouble as you had. My environment is as follows. pelican 3.7.1 pandoc 2.0.5 When I run the following command, an error occurred. ``` $ pelican-import --wpfile -o ./output ./wp.xml ... --normalize has been removed. Normalization is now automatic. --parse-raw/-R has been removed. Use +raw_html or +raw_tex extension. Try pandoc --help for more information. Please, check your Pandoc installation. ``` It seems that Pandoc's arguments have changed. In order to avoid these error, you patched the script around line 729, didn't you? Right. Just ran into the very same error messages as @waura pelican=3.7.1 Pandoc 2.0.6 ``` $ pelican-import --wpfile -o output2/ osp-blog.wordpress.2017-10-10.xml output2/-.rst --normalize has been removed. Normalization is now automatic. --parse-raw/-R has been removed. Use +raw_html or +raw_tex extension. Try pandoc --help for more information. Please, check your Pandoc installation. ``` No news on this issue? Seems simply like Pandoc arguments have changed, any idea what version of pandoc we could downgrate do to get this working again? Thanks! I was personally able to get my xml export to convert to markdown, although both @afiskon and @waura seem to desire .rst This is what I did to lines 728 - 732 ``` parse_raw = '' if not strip_raw else '' cmd = ('pandoc {0} --from=html' ' --to=gfm+raw_html -o "{2}" "{3}"') cmd = cmd.format(parse_raw, out_markup, out_filename, html_filename) ``` And this let me import, convert and works ok with the other pelican import args like `--dir-cat` & ` --dir-page` Hope this helps in solving the issue for .rst convertions. Cheers I just hit this issue too. @davidwilemski: It seems you might have [implemented a workaround](https://github.com/davidwilemski/pelican/commit/77580dbbfbf8fb7f003bf132c1e7ee9a75b3521d) for this issue. Would you consider submitting a pull request so that others don't run into this problem? @justinmayer, sure, I've opened up #2289 to get the ball rolling. I hit this issue as well. The solution that @afiskon showed helped.
2018-06-26T17:15:33Z
[]
[]
getpelican/pelican
2,415
getpelican__pelican-2415
[ "2403" ]
6f0743b340cfaa98ced38fc41727c8ea9f1a52f8
diff --git a/pelican/writers.py b/pelican/writers.py --- a/pelican/writers.py +++ b/pelican/writers.py @@ -55,17 +55,35 @@ def _create_new_feed(self, feed_type, feed_title, context): def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() link = self.urljoiner(self.site_url, item.url) - is_rss = isinstance(feed, Rss201rev2Feed) - if not is_rss or self.settings.get('RSS_FEED_SUMMARY_ONLY'): - description = item.summary + + if isinstance(feed, Rss201rev2Feed): + # RSS feeds use a single tag called 'description' for both the full + # content and the summary + content = None + if self.settings.get('RSS_FEED_SUMMARY_ONLY'): + description = item.summary + else: + description = item.get_content(self.site_url) + else: - description = item.get_content(self.site_url) + # Atom feeds have two different tags for full content (called + # 'content' by feedgenerator) and summary (called 'description' by + # feedgenerator). + # + # It does not make sense to have the summary be the + # exact same thing as the full content. If we detect that + # they are we just remove the summary. + content = item.get_content(self.site_url) + description = item.summary + if description == content: + description = None + feed.add_item( title=title, link=link, unique_id=get_tag_uri(link, item.date), description=description, - content=item.get_content(self.site_url), + content=content, categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''), pubdate=set_date_tzinfo(
diff --git a/pelican/tests/output/basic/feeds/alexis-metaireau.atom.xml b/pelican/tests/output/basic/feeds/alexis-metaireau.atom.xml --- a/pelican/tests/output/basic/feeds/alexis-metaireau.atom.xml +++ b/pelican/tests/output/basic/feeds/alexis-metaireau.atom.xml @@ -13,13 +13,7 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><updated>2010-10-20T10:14:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:None,2010-10-20:/oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; -&lt;h2&gt;Why not ?&lt;/h2&gt; -&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;/div&gt; -</summary><content type="html">&lt;div class="section" id="why-not"&gt; +</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><updated>2010-10-20T10:14:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:None,2010-10-20:/oh-yeah.html</id><content type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; diff --git a/pelican/tests/output/basic/feeds/all-en.atom.xml b/pelican/tests/output/basic/feeds/all-en.atom.xml --- a/pelican/tests/output/basic/feeds/all-en.atom.xml +++ b/pelican/tests/output/basic/feeds/all-en.atom.xml @@ -1,18 +1,11 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all-en.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+00:00</published><updated>2012-11-30T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-11-30:/filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</content></entry><entry><title>Second article</title><link href="/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-02-29:/second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+00:00</published><updated>2011-04-20T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-04-20:/a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all-en.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+00:00</published><updated>2012-11-30T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-11-30:/filename_metadata-example.html</id><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</content></entry><entry><title>Second article</title><link href="/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-02-29:/second-article.html</id><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+00:00</published><updated>2011-04-20T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-04-20:/a-markdown-powered-article.html</id><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; -&lt;p&gt;&lt;a href="/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 1&lt;/p&gt; -</content></entry><entry><title>Article 2</title><link href="/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 2&lt;/p&gt; -</content></entry><entry><title>Article 3</title><link href="/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-1.html</id><content type="html">&lt;p&gt;Article 1&lt;/p&gt; +</content></entry><entry><title>Article 2</title><link href="/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-2.html</id><content type="html">&lt;p&gt;Article 2&lt;/p&gt; +</content></entry><entry><title>Article 3</title><link href="/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-3.html</id><content type="html">&lt;p&gt;Article 3&lt;/p&gt; </content></entry><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+00:00</published><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:None,2010-12-02:/this-is-a-super-article.html</id><summary type="html">&lt;p class="first last"&gt;Multi-line metadata should be supported as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; </summary><content type="html">&lt;p&gt;Some content here !&lt;/p&gt; @@ -27,13 +20,7 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><updated>2010-10-20T10:14:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:None,2010-10-20:/oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; -&lt;h2&gt;Why not ?&lt;/h2&gt; -&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;/div&gt; -</summary><content type="html">&lt;div class="section" id="why-not"&gt; +</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><updated>2010-10-20T10:14:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:None,2010-10-20:/oh-yeah.html</id><content type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; @@ -83,6 +70,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</content></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+00:00</published><updated>2010-03-14T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2010-03-14:/tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</content></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+00:00</published><updated>2010-03-14T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2010-03-14:/tag/baz.html</id><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </content></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/all-fr.atom.xml b/pelican/tests/output/basic/feeds/all-fr.atom.xml --- a/pelican/tests/output/basic/feeds/all-fr.atom.xml +++ b/pelican/tests/output/basic/feeds/all-fr.atom.xml @@ -1,4 +1,3 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all-fr.atom.xml" rel="self"></link><id>/</id><updated>2012-02-29T00:00:00+00:00</updated><entry><title>Deuxième article</title><link href="/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-02-29:/second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all-fr.atom.xml" rel="self"></link><id>/</id><updated>2012-02-29T00:00:00+00:00</updated><entry><title>Deuxième article</title><link href="/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-02-29:/second-article-fr.html</id><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; </content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/all.atom.xml b/pelican/tests/output/basic/feeds/all.atom.xml --- a/pelican/tests/output/basic/feeds/all.atom.xml +++ b/pelican/tests/output/basic/feeds/all.atom.xml @@ -1,20 +1,12 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+00:00</published><updated>2012-11-30T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-11-30:/filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</content></entry><entry><title>Second article</title><link href="/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-02-29:/second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-02-29:/second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+00:00</published><updated>2011-04-20T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-04-20:/a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all.atom.xml" rel="self"></link><id>/</id><updated>2013-11-17T23:29:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+00:00</published><updated>2012-11-30T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-11-30:/filename_metadata-example.html</id><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</content></entry><entry><title>Second article</title><link href="/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-02-29:/second-article.html</id><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-02-29:/second-article-fr.html</id><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+00:00</published><updated>2011-04-20T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-04-20:/a-markdown-powered-article.html</id><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; -&lt;p&gt;&lt;a href="/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 1&lt;/p&gt; -</content></entry><entry><title>Article 2</title><link href="/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 2&lt;/p&gt; -</content></entry><entry><title>Article 3</title><link href="/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-1.html</id><content type="html">&lt;p&gt;Article 1&lt;/p&gt; +</content></entry><entry><title>Article 2</title><link href="/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-2.html</id><content type="html">&lt;p&gt;Article 2&lt;/p&gt; +</content></entry><entry><title>Article 3</title><link href="/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-3.html</id><content type="html">&lt;p&gt;Article 3&lt;/p&gt; </content></entry><entry><title>This is a super article !</title><link href="/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+00:00</published><updated>2013-11-17T23:29:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:None,2010-12-02:/this-is-a-super-article.html</id><summary type="html">&lt;p class="first last"&gt;Multi-line metadata should be supported as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; </summary><content type="html">&lt;p&gt;Some content here !&lt;/p&gt; @@ -29,13 +21,7 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><updated>2010-10-20T10:14:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:None,2010-10-20:/oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; -&lt;h2&gt;Why not ?&lt;/h2&gt; -&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;/div&gt; -</summary><content type="html">&lt;div class="section" id="why-not"&gt; +</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><updated>2010-10-20T10:14:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:None,2010-10-20:/oh-yeah.html</id><content type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; @@ -85,6 +71,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</content></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+00:00</published><updated>2010-03-14T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2010-03-14:/tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</content></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+00:00</published><updated>2010-03-14T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2010-03-14:/tag/baz.html</id><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </content></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/bar.atom.xml b/pelican/tests/output/basic/feeds/bar.atom.xml --- a/pelican/tests/output/basic/feeds/bar.atom.xml +++ b/pelican/tests/output/basic/feeds/bar.atom.xml @@ -1,11 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog - bar</title><link href="/" rel="alternate"></link><link href="/feeds/bar.atom.xml" rel="self"></link><id>/</id><updated>2010-10-20T10:14:00+00:00</updated><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><updated>2010-10-20T10:14:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:None,2010-10-20:/oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; -&lt;h2&gt;Why not ?&lt;/h2&gt; -&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;/div&gt; -</summary><content type="html">&lt;div class="section" id="why-not"&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog - bar</title><link href="/" rel="alternate"></link><link href="/feeds/bar.atom.xml" rel="self"></link><id>/</id><updated>2010-10-20T10:14:00+00:00</updated><entry><title>Oh yeah !</title><link href="/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><updated>2010-10-20T10:14:00+00:00</updated><author><name>Alexis Métaireau</name></author><id>tag:None,2010-10-20:/oh-yeah.html</id><content type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; diff --git a/pelican/tests/output/basic/feeds/cat1.atom.xml b/pelican/tests/output/basic/feeds/cat1.atom.xml --- a/pelican/tests/output/basic/feeds/cat1.atom.xml +++ b/pelican/tests/output/basic/feeds/cat1.atom.xml @@ -1,12 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog - cat1</title><link href="/" rel="alternate"></link><link href="/feeds/cat1.atom.xml" rel="self"></link><id>/</id><updated>2011-04-20T00:00:00+00:00</updated><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+00:00</published><updated>2011-04-20T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-04-20:/a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog - cat1</title><link href="/" rel="alternate"></link><link href="/feeds/cat1.atom.xml" rel="self"></link><id>/</id><updated>2011-04-20T00:00:00+00:00</updated><entry><title>A markdown powered article</title><link href="/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+00:00</published><updated>2011-04-20T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-04-20:/a-markdown-powered-article.html</id><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; -&lt;p&gt;&lt;a href="/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 1&lt;/p&gt; -</content></entry><entry><title>Article 2</title><link href="/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 2&lt;/p&gt; -</content></entry><entry><title>Article 3</title><link href="/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-1.html</id><content type="html">&lt;p&gt;Article 1&lt;/p&gt; +</content></entry><entry><title>Article 2</title><link href="/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-2.html</id><content type="html">&lt;p&gt;Article 2&lt;/p&gt; +</content></entry><entry><title>Article 3</title><link href="/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+00:00</published><updated>2011-02-17T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2011-02-17:/article-3.html</id><content type="html">&lt;p&gt;Article 3&lt;/p&gt; </content></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/misc.atom.xml b/pelican/tests/output/basic/feeds/misc.atom.xml --- a/pelican/tests/output/basic/feeds/misc.atom.xml +++ b/pelican/tests/output/basic/feeds/misc.atom.xml @@ -1,8 +1,6 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog - misc</title><link href="/" rel="alternate"></link><link href="/feeds/misc.atom.xml" rel="self"></link><id>/</id><updated>2012-11-30T00:00:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+00:00</published><updated>2012-11-30T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-11-30:/filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</content></entry><entry><title>Second article</title><link href="/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-02-29:/second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog - misc</title><link href="/" rel="alternate"></link><link href="/feeds/misc.atom.xml" rel="self"></link><id>/</id><updated>2012-11-30T00:00:00+00:00</updated><entry><title>FILENAME_METADATA example</title><link href="/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+00:00</published><updated>2012-11-30T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-11-30:/filename_metadata-example.html</id><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</content></entry><entry><title>Second article</title><link href="/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-02-29:/second-article.html</id><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; </content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+00:00</published><updated>2010-10-15T20:30:00+00:00</updated><author><name></name></author><id>tag:None,2010-10-15:/unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; @@ -47,6 +45,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</content></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+00:00</published><updated>2010-03-14T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2010-03-14:/tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</content></entry><entry><title>The baz tag</title><link href="/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+00:00</published><updated>2010-03-14T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2010-03-14:/tag/baz.html</id><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </content></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/alexis-metaireau.atom.xml b/pelican/tests/output/custom/feeds/alexis-metaireau.atom.xml --- a/pelican/tests/output/custom/feeds/alexis-metaireau.atom.xml +++ b/pelican/tests/output/custom/feeds/alexis-metaireau.atom.xml @@ -1,18 +1,11 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - Alexis Métaireau</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><subtitle>A personal blog.</subtitle><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - Alexis Métaireau</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><subtitle>A personal blog.</subtitle><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/filename_metadata-example.html</id><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article.html</id><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/a-markdown-powered-article.html</id><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; -&lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 1&lt;/p&gt; -</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 2&lt;/p&gt; -</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-1.html</id><content type="html">&lt;p&gt;Article 1&lt;/p&gt; +</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-2.html</id><content type="html">&lt;p&gt;Article 2&lt;/p&gt; +</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-3.html</id><content type="html">&lt;p&gt;Article 3&lt;/p&gt; </content></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:/this-is-a-super-article.html</id><summary type="html">&lt;p class="first last"&gt;Multi-line metadata should be supported as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; </summary><content type="html">&lt;p&gt;Some content here !&lt;/p&gt; @@ -27,13 +20,7 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; -&lt;h2&gt;Why not ?&lt;/h2&gt; -&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;/div&gt; -</summary><content type="html">&lt;div class="section" id="why-not"&gt; +</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/oh-yeah.html</id><content type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; @@ -83,6 +70,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </content></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/all-en.atom.xml b/pelican/tests/output/custom/feeds/all-en.atom.xml --- a/pelican/tests/output/custom/feeds/all-en.atom.xml +++ b/pelican/tests/output/custom/feeds/all-en.atom.xml @@ -1,18 +1,11 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-en.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><subtitle>A personal blog.</subtitle><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-en.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><subtitle>A personal blog.</subtitle><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/filename_metadata-example.html</id><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article.html</id><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/a-markdown-powered-article.html</id><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; -&lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 1&lt;/p&gt; -</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 2&lt;/p&gt; -</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-1.html</id><content type="html">&lt;p&gt;Article 1&lt;/p&gt; +</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-2.html</id><content type="html">&lt;p&gt;Article 2&lt;/p&gt; +</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-3.html</id><content type="html">&lt;p&gt;Article 3&lt;/p&gt; </content></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:/this-is-a-super-article.html</id><summary type="html">&lt;p class="first last"&gt;Multi-line metadata should be supported as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; </summary><content type="html">&lt;p&gt;Some content here !&lt;/p&gt; @@ -27,13 +20,7 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; -&lt;h2&gt;Why not ?&lt;/h2&gt; -&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;/div&gt; -</summary><content type="html">&lt;div class="section" id="why-not"&gt; +</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/oh-yeah.html</id><content type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; @@ -83,6 +70,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </content></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/all-fr.atom.xml b/pelican/tests/output/custom/feeds/all-fr.atom.xml --- a/pelican/tests/output/custom/feeds/all-fr.atom.xml +++ b/pelican/tests/output/custom/feeds/all-fr.atom.xml @@ -1,6 +1,4 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-fr.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-03-02T14:01:01+01:00</updated><subtitle>A personal blog.</subtitle><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</content></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-fr.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-03-02T14:01:01+01:00</updated><subtitle>A personal blog.</subtitle><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</content></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; </content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/all.atom.xml b/pelican/tests/output/custom/feeds/all.atom.xml --- a/pelican/tests/output/custom/feeds/all.atom.xml +++ b/pelican/tests/output/custom/feeds/all.atom.xml @@ -1,22 +1,13 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><subtitle>A personal blog.</subtitle><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</content></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><subtitle>A personal blog.</subtitle><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/filename_metadata-example.html</id><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</content></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article.html</id><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/a-markdown-powered-article.html</id><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; -&lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 1&lt;/p&gt; -</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 2&lt;/p&gt; -</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-1.html</id><content type="html">&lt;p&gt;Article 1&lt;/p&gt; +</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-2.html</id><content type="html">&lt;p&gt;Article 2&lt;/p&gt; +</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-3.html</id><content type="html">&lt;p&gt;Article 3&lt;/p&gt; </content></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/this-is-a-super-article.html" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:/this-is-a-super-article.html</id><summary type="html">&lt;p class="first last"&gt;Multi-line metadata should be supported as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; </summary><content type="html">&lt;p&gt;Some content here !&lt;/p&gt; @@ -31,13 +22,7 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; -&lt;h2&gt;Why not ?&lt;/h2&gt; -&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;/div&gt; -</summary><content type="html">&lt;div class="section" id="why-not"&gt; +</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/oh-yeah.html</id><content type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; @@ -87,6 +72,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </content></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/bar.atom.xml b/pelican/tests/output/custom/feeds/bar.atom.xml --- a/pelican/tests/output/custom/feeds/bar.atom.xml +++ b/pelican/tests/output/custom/feeds/bar.atom.xml @@ -1,11 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - bar</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/bar.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2010-10-20T10:14:00+02:00</updated><subtitle>A personal blog.</subtitle><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/oh-yeah.html</id><summary type="html">&lt;div class="section" id="why-not"&gt; -&lt;h2&gt;Why not ?&lt;/h2&gt; -&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;/div&gt; -</summary><content type="html">&lt;div class="section" id="why-not"&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - bar</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/bar.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2010-10-20T10:14:00+02:00</updated><subtitle>A personal blog.</subtitle><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/oh-yeah.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/oh-yeah.html</id><content type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; diff --git a/pelican/tests/output/custom/feeds/cat1.atom.xml b/pelican/tests/output/custom/feeds/cat1.atom.xml --- a/pelican/tests/output/custom/feeds/cat1.atom.xml +++ b/pelican/tests/output/custom/feeds/cat1.atom.xml @@ -1,12 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - cat1</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/cat1.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2011-04-20T00:00:00+02:00</updated><subtitle>A personal blog.</subtitle><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/a-markdown-powered-article.html</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - cat1</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/cat1.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2011-04-20T00:00:00+02:00</updated><subtitle>A personal blog.</subtitle><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/a-markdown-powered-article.html</id><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; -&lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-1.html</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 1&lt;/p&gt; -</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-2.html</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 2&lt;/p&gt; -</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-3.html</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/article-1.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-1.html</id><content type="html">&lt;p&gt;Article 1&lt;/p&gt; +</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/article-2.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-2.html</id><content type="html">&lt;p&gt;Article 2&lt;/p&gt; +</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/article-3.html" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/article-3.html</id><content type="html">&lt;p&gt;Article 3&lt;/p&gt; </content></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/misc.atom.xml b/pelican/tests/output/custom/feeds/misc.atom.xml --- a/pelican/tests/output/custom/feeds/misc.atom.xml +++ b/pelican/tests/output/custom/feeds/misc.atom.xml @@ -1,8 +1,6 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - misc</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/misc.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-11-30T00:00:00+01:00</updated><subtitle>A personal blog.</subtitle><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/filename_metadata-example.html</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article.html</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - misc</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/misc.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-11-30T00:00:00+01:00</updated><subtitle>A personal blog.</subtitle><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/filename_metadata-example.html</id><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article.html</id><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; </content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:/unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; @@ -47,6 +45,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </content></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml --- a/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/alexis-metaireau.atom.xml @@ -1,18 +1,11 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - Alexis Métaireau</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - Alexis Métaireau</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/alexis-metaireau.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/posts/2012/novembre/30/filename_metadata-example/</id><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/posts/2012/février/29/second-article/</id><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/posts/2011/avril/20/a-markdown-powered-article/</id><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; -&lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 1&lt;/p&gt; -</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 2&lt;/p&gt; -</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-1/</id><content type="html">&lt;p&gt;Article 1&lt;/p&gt; +</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-2/</id><content type="html">&lt;p&gt;Article 2&lt;/p&gt; +</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-3/</id><content type="html">&lt;p&gt;Article 3&lt;/p&gt; </content></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:/posts/2010/décembre/02/this-is-a-super-article/</id><summary type="html">&lt;p class="first last"&gt;Multi-line metadata should be supported as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; </summary><content type="html">&lt;p&gt;Some content here !&lt;/p&gt; @@ -27,13 +20,7 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; -&lt;h2&gt;Why not ?&lt;/h2&gt; -&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;/div&gt; -</summary><content type="html">&lt;div class="section" id="why-not"&gt; +</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/posts/2010/octobre/20/oh-yeah/</id><content type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; @@ -83,6 +70,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </content></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/all-en.atom.xml b/pelican/tests/output/custom_locale/feeds/all-en.atom.xml --- a/pelican/tests/output/custom_locale/feeds/all-en.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/all-en.atom.xml @@ -1,18 +1,11 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-en.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-en.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/posts/2012/novembre/30/filename_metadata-example/</id><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/posts/2012/février/29/second-article/</id><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/posts/2011/avril/20/a-markdown-powered-article/</id><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; -&lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 1&lt;/p&gt; -</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 2&lt;/p&gt; -</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-1/</id><content type="html">&lt;p&gt;Article 1&lt;/p&gt; +</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-2/</id><content type="html">&lt;p&gt;Article 2&lt;/p&gt; +</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-3/</id><content type="html">&lt;p&gt;Article 3&lt;/p&gt; </content></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:/posts/2010/décembre/02/this-is-a-super-article/</id><summary type="html">&lt;p class="first last"&gt;Multi-line metadata should be supported as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; </summary><content type="html">&lt;p&gt;Some content here !&lt;/p&gt; @@ -27,13 +20,7 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; -&lt;h2&gt;Why not ?&lt;/h2&gt; -&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;/div&gt; -</summary><content type="html">&lt;div class="section" id="why-not"&gt; +</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/posts/2010/octobre/20/oh-yeah/</id><content type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; @@ -83,6 +70,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </content></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml b/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml --- a/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml @@ -1,6 +1,4 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-fr.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-03-02T14:01:01+01:00</updated><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</content></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-fr.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-03-02T14:01:01+01:00</updated><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</content></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; </content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/all.atom.xml b/pelican/tests/output/custom_locale/feeds/all.atom.xml --- a/pelican/tests/output/custom_locale/feeds/all.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/all.atom.xml @@ -1,22 +1,13 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</content></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</id><summary type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><summary type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/posts/2012/novembre/30/filename_metadata-example/</id><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</content></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/posts/2012/février/29/second-article/</id><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +</content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/posts/2011/avril/20/a-markdown-powered-article/</id><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; -&lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 1&lt;/p&gt; -</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 2&lt;/p&gt; -</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-1/</id><content type="html">&lt;p&gt;Article 1&lt;/p&gt; +</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-2/</id><content type="html">&lt;p&gt;Article 2&lt;/p&gt; +</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-3/</id><content type="html">&lt;p&gt;Article 3&lt;/p&gt; </content></entry><entry><title>This is a super article !</title><link href="http://blog.notmyidea.org/posts/2010/d%C3%A9cembre/02/this-is-a-super-article/" rel="alternate"></link><published>2010-12-02T10:14:00+01:00</published><updated>2013-11-17T23:29:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-12-02:/posts/2010/décembre/02/this-is-a-super-article/</id><summary type="html">&lt;p class="first last"&gt;Multi-line metadata should be supported as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; </summary><content type="html">&lt;p&gt;Some content here !&lt;/p&gt; @@ -31,13 +22,7 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; &lt;/pre&gt; &lt;p&gt;→ And now try with some utf8 hell: ééé&lt;/p&gt; &lt;/div&gt; -</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; -&lt;h2&gt;Why not ?&lt;/h2&gt; -&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;/div&gt; -</summary><content type="html">&lt;div class="section" id="why-not"&gt; +</content><category term="foo"></category><category term="bar"></category><category term="foobar"></category></entry><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/posts/2010/octobre/20/oh-yeah/</id><content type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; @@ -87,6 +72,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </content></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/bar.atom.xml b/pelican/tests/output/custom_locale/feeds/bar.atom.xml --- a/pelican/tests/output/custom_locale/feeds/bar.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/bar.atom.xml @@ -1,11 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - bar</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/bar.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2010-10-20T10:14:00+02:00</updated><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/posts/2010/octobre/20/oh-yeah/</id><summary type="html">&lt;div class="section" id="why-not"&gt; -&lt;h2&gt;Why not ?&lt;/h2&gt; -&lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! -YEAH !&lt;/p&gt; -&lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; -&lt;/div&gt; -</summary><content type="html">&lt;div class="section" id="why-not"&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - bar</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/bar.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2010-10-20T10:14:00+02:00</updated><entry><title>Oh yeah !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/20/oh-yeah/" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/posts/2010/octobre/20/oh-yeah/</id><content type="html">&lt;div class="section" id="why-not"&gt; &lt;h2&gt;Why not ?&lt;/h2&gt; &lt;p&gt;After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! YEAH !&lt;/p&gt; diff --git a/pelican/tests/output/custom_locale/feeds/cat1.atom.xml b/pelican/tests/output/custom_locale/feeds/cat1.atom.xml --- a/pelican/tests/output/custom_locale/feeds/cat1.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/cat1.atom.xml @@ -1,12 +1,7 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - cat1</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/cat1.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2011-04-20T00:00:00+02:00</updated><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/posts/2011/avril/20/a-markdown-powered-article/</id><summary type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - cat1</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/cat1.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2011-04-20T00:00:00+02:00</updated><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/posts/2011/avril/20/a-markdown-powered-article/</id><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</summary><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; -&lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; -&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-1/</id><summary type="html">&lt;p&gt;Article 1&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 1&lt;/p&gt; -</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-2/</id><summary type="html">&lt;p&gt;Article 2&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 2&lt;/p&gt; -</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-3/</id><summary type="html">&lt;p&gt;Article 3&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Article 3&lt;/p&gt; +&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a file-relative link to unbelievable&lt;/a&gt;&lt;/p&gt;</content></entry><entry><title>Article 1</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-1/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-1/</id><content type="html">&lt;p&gt;Article 1&lt;/p&gt; +</content></entry><entry><title>Article 2</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-2/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-2/</id><content type="html">&lt;p&gt;Article 2&lt;/p&gt; +</content></entry><entry><title>Article 3</title><link href="http://blog.notmyidea.org/posts/2011/f%C3%A9vrier/17/article-3/" rel="alternate"></link><published>2011-02-17T00:00:00+01:00</published><updated>2011-02-17T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-02-17:/posts/2011/février/17/article-3/</id><content type="html">&lt;p&gt;Article 3&lt;/p&gt; </content></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/misc.atom.xml b/pelican/tests/output/custom_locale/feeds/misc.atom.xml --- a/pelican/tests/output/custom_locale/feeds/misc.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/misc.atom.xml @@ -1,8 +1,6 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - misc</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/misc.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-11-30T00:00:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/posts/2012/novembre/30/filename_metadata-example/</id><summary type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</summary><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/posts/2012/février/29/second-article/</id><summary type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log - misc</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/misc.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-11-30T00:00:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/posts/2012/novembre/30/filename_metadata-example/</id><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; +</content></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/posts/2012/février/29/second-article/</id><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; </content><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:/posts/2010/octobre/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; @@ -47,6 +45,5 @@ pelican.conf, it will have nothing in default.&lt;/p&gt; &lt;/pre&gt;&lt;/div&gt; &lt;p&gt;Lovely.&lt;/p&gt; &lt;/div&gt; -</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><summary type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; -</summary><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; +</content></entry><entry><title>The baz tag</title><link href="http://blog.notmyidea.org/tag/baz.html" rel="alternate"></link><published>2010-03-14T00:00:00+01:00</published><updated>2010-03-14T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-03-14:/tag/baz.html</id><content type="html">&lt;p&gt;This article overrides the listening of the articles under the &lt;em&gt;baz&lt;/em&gt; tag.&lt;/p&gt; </content></entry></feed> \ No newline at end of file
Full Atom feeds contain empty summaries When providing the full content of articles in an Atom feed, the generated file contains an empty `summary` in each entry. It should only contain a `content` and no `summary`. According to the [Atom RFC](https://tools.ietf.org/html/rfc4287): > It is advisable that each atom:entry element contain a non-empty atom:title element, a non-empty atom:content element when that element is present, and a non-empty atom:summary element when the entry contains no atom:content element. The [W3C validator](https://validator.w3.org/feed/) raises a warning when it finds such empty summaries. Sample configuration: ```python FEED_DOMAIN = SITEURL FEED_ALL_ATOM = 'feeds/atom.xml' FEED_ALL_RSS = 'feeds/rss.xml' ATOM_FEED_SUMMARY_ONLY = False RSS_FEED_SUMMARY_ONLY = False CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None AUTHOR_FEED_ATOM = None AUTHOR_FEED_RSS = None SUMMARY_MAX_LENGTH = 0 ``` Excerpt of the generated Atom feed: ```xml <entry> <title>Foo</title> <link href="https://foo.com/bar.html" rel="alternate"></link> <published>2018-03-13T18:27:00+01:00</published> <updated>2018-03-13T18:27:00+01:00</updated> <author> <name>Foo</name> </author> <id>https://foo.com/bar.html</id> <summary type="html"></summary> <content type="html">This is the content.</content> </entry> ```
> SUMMARY_MAX_LENGTH = 50 > When creating a short summary of an article, this will be the default length (measured in words) of the text created. This only applies if your content does not otherwise specify a summary. Setting to None will cause the summary to be a copy of the original content. You should use `None`. Setting it to `0` means, zero words, therefore empty summary. According to the Atom specification it is recommended to have either a content or a summary. Setting `SUMMARY_MAX_LENGTH = None` copies the content into the summary, which makes little sense. If memory serves, the current behavior exists in part because @eevee [wanted both content and summary fields](https://github.com/getpelican/pelican/issues/1886). Unless the RFC/spec goes out of its way to explicitly state that a feed should never, ever, under any circumstance, contain both content and summary fields, I'm inclined to believe they can both peacefully co-exist. As for what happens when `SUMMARY_MAX_LENGTH = None`, I suppose the proper behavior should be that the `<summary>` field is suppressed entirely? The behavior that is in my opinion best is: ```python if not ATOM_FEED_SUMMARY_ONLY: if SUMMARY_MAX_LENGTH: set_short_summary() set_full_content() else: if SUMMARY_MAX_LENGTH: set_short_summary() else: error('At least a summary or a content is required') ``` In the final output if a tag is empty it should not be present in the xml (no empty `<summary></summary>`. This behavior is valid for Atom but not RSS, as RSS does not have a separate tag for summary and content.
2018-10-03T19:56:01Z
[]
[]
getpelican/pelican
2,432
getpelican__pelican-2432
[ "2383" ]
0da7ac677aec6f104f99f12d9c5dd4777989d7a7
diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -302,17 +302,21 @@ def generate_feeds(self, writer): """Generate the feeds from the current context, and output files.""" if self.settings.get('FEED_ATOM'): - writer.write_feed(self.articles, self.context, - self.settings['FEED_ATOM'], - self.settings.get('FEED_ATOM_URL', - self.settings['FEED_ATOM'])) + writer.write_feed( + self.articles, + self.context, + self.settings['FEED_ATOM'], + self.settings.get('FEED_ATOM_URL', self.settings['FEED_ATOM']) + ) if self.settings.get('FEED_RSS'): - writer.write_feed(self.articles, self.context, - self.settings['FEED_RSS'], - self.settings.get('FEED_RSS_URL', - self.settings['FEED_RSS']), - feed_type='rss') + writer.write_feed( + self.articles, + self.context, + self.settings['FEED_RSS'], + self.settings.get('FEED_RSS_URL', self.settings['FEED_RSS']), + feed_type='rss' + ) if (self.settings.get('FEED_ALL_ATOM') or self.settings.get('FEED_ALL_RSS')): @@ -323,80 +327,105 @@ def generate_feeds(self, writer): order_by=self.settings['ARTICLE_ORDER_BY']) if self.settings.get('FEED_ALL_ATOM'): - writer.write_feed(all_articles, self.context, - self.settings['FEED_ALL_ATOM'], - self.settings.get( - 'FEED_ALL_ATOM_URL', - self.settings['FEED_ALL_ATOM'])) + writer.write_feed( + all_articles, + self.context, + self.settings['FEED_ALL_ATOM'], + self.settings.get('FEED_ALL_ATOM_URL', + self.settings['FEED_ALL_ATOM']) + ) if self.settings.get('FEED_ALL_RSS'): - writer.write_feed(all_articles, self.context, - self.settings['FEED_ALL_RSS'], - self.settings.get( - 'FEED_ALL_RSS_URL', + writer.write_feed( + all_articles, + self.context, + self.settings['FEED_ALL_RSS'], + self.settings.get('FEED_ALL_RSS_URL', self.settings['FEED_ALL_RSS']), - feed_type='rss') + feed_type='rss' + ) for cat, arts in self.categories: if self.settings.get('CATEGORY_FEED_ATOM'): - writer.write_feed(arts, self.context, - self.settings['CATEGORY_FEED_ATOM'] - % cat.slug, - self.settings.get( - 'CATEGORY_FEED_ATOM_URL', - self.settings['CATEGORY_FEED_ATOM']) - % cat.slug, feed_title=cat.name) + writer.write_feed( + arts, + self.context, + self.settings['CATEGORY_FEED_ATOM'].format(slug=cat.slug), + self.settings.get( + 'CATEGORY_FEED_ATOM_URL', + self.settings['CATEGORY_FEED_ATOM']).format( + slug=cat.slug + ), + feed_title=cat.name + ) if self.settings.get('CATEGORY_FEED_RSS'): - writer.write_feed(arts, self.context, - self.settings['CATEGORY_FEED_RSS'] - % cat.slug, - self.settings.get( - 'CATEGORY_FEED_RSS_URL', - self.settings['CATEGORY_FEED_RSS']) - % cat.slug, feed_title=cat.name, - feed_type='rss') + writer.write_feed( + arts, + self.context, + self.settings['CATEGORY_FEED_RSS'].format(slug=cat.slug), + self.settings.get( + 'CATEGORY_FEED_RSS_URL', + self.settings['CATEGORY_FEED_RSS']).format( + slug=cat.slug + ), + feed_title=cat.name, + feed_type='rss' + ) for auth, arts in self.authors: if self.settings.get('AUTHOR_FEED_ATOM'): - writer.write_feed(arts, self.context, - self.settings['AUTHOR_FEED_ATOM'] - % auth.slug, - self.settings.get( - 'AUTHOR_FEED_ATOM_URL', - self.settings['AUTHOR_FEED_ATOM']) - % auth.slug, feed_title=auth.name) + writer.write_feed( + arts, + self.context, + self.settings['AUTHOR_FEED_ATOM'].format(slug=auth.slug), + self.settings.get( + 'AUTHOR_FEED_ATOM_URL', + self.settings['AUTHOR_FEED_ATOM'] + ).format(slug=auth.slug), + feed_title=auth.name + ) if self.settings.get('AUTHOR_FEED_RSS'): - writer.write_feed(arts, self.context, - self.settings['AUTHOR_FEED_RSS'] - % auth.slug, - self.settings.get( - 'AUTHOR_FEED_RSS_URL', - self.settings['AUTHOR_FEED_RSS']) - % auth.slug, feed_title=auth.name, - feed_type='rss') + writer.write_feed( + arts, + self.context, + self.settings['AUTHOR_FEED_RSS'].format(slug=auth.slug), + self.settings.get( + 'AUTHOR_FEED_RSS_URL', + self.settings['AUTHOR_FEED_RSS'] + ).format(slug=auth.slug), + feed_title=auth.name, + feed_type='rss' + ) if (self.settings.get('TAG_FEED_ATOM') or self.settings.get('TAG_FEED_RSS')): for tag, arts in self.tags.items(): if self.settings.get('TAG_FEED_ATOM'): - writer.write_feed(arts, self.context, - self.settings['TAG_FEED_ATOM'] - % tag.slug, - self.settings.get( - 'TAG_FEED_ATOM_URL', - self.settings['TAG_FEED_ATOM']) - % tag.slug, feed_title=tag.name) + writer.write_feed( + arts, + self.context, + self.settings['TAG_FEED_ATOM'].format(slug=tag.slug), + self.settings.get( + 'TAG_FEED_ATOM_URL', + self.settings['TAG_FEED_ATOM'] + ).format(slug=tag.slug), + feed_title=tag.name + ) if self.settings.get('TAG_FEED_RSS'): - writer.write_feed(arts, self.context, - self.settings['TAG_FEED_RSS'] % tag.slug, - self.settings.get( - 'TAG_FEED_RSS_URL', - self.settings['TAG_FEED_RSS']) - % tag.slug, feed_title=tag.name, - feed_type='rss') + writer.write_feed( + arts, + self.context, + self.settings['TAG_FEED_RSS'].format(slug=tag.slug), + self.settings.get( + 'TAG_FEED_RSS_URL', + self.settings['TAG_FEED_RSS'] + ).format(slug=tag.slug), + feed_title=tag.name, + feed_type='rss' + ) if (self.settings.get('TRANSLATION_FEED_ATOM') or self.settings.get('TRANSLATION_FEED_RSS')): @@ -409,19 +438,27 @@ def generate_feeds(self, writer): items, order_by=self.settings['ARTICLE_ORDER_BY']) if self.settings.get('TRANSLATION_FEED_ATOM'): writer.write_feed( - items, self.context, - self.settings['TRANSLATION_FEED_ATOM'] % lang, + items, + self.context, + self.settings['TRANSLATION_FEED_ATOM'] + .format(lang=lang), self.settings.get( 'TRANSLATION_FEED_ATOM_URL', - self.settings['TRANSLATION_FEED_ATOM']) % lang) + self.settings['TRANSLATION_FEED_ATOM'] + ).format(lang=lang), + ) if self.settings.get('TRANSLATION_FEED_RSS'): writer.write_feed( - items, self.context, - self.settings['TRANSLATION_FEED_RSS'] % lang, + items, + self.context, + self.settings['TRANSLATION_FEED_RSS'] + .format(lang=lang), self.settings.get( 'TRANSLATION_FEED_RSS_URL', - self.settings['TRANSLATION_FEED_RSS']) % lang, - feed_type='rss') + self.settings['TRANSLATION_FEED_RSS'] + ).format(lang=lang), + feed_type='rss' + ) def generate_articles(self, write): """Generate the articles.""" diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -45,10 +45,10 @@ def load_source(name, path): 'THEME_STATIC_DIR': 'theme', 'THEME_STATIC_PATHS': ['static', ], 'FEED_ALL_ATOM': posix_join('feeds', 'all.atom.xml'), - 'CATEGORY_FEED_ATOM': posix_join('feeds', '%s.atom.xml'), - 'AUTHOR_FEED_ATOM': posix_join('feeds', '%s.atom.xml'), - 'AUTHOR_FEED_RSS': posix_join('feeds', '%s.rss.xml'), - 'TRANSLATION_FEED_ATOM': posix_join('feeds', 'all-%s.atom.xml'), + 'CATEGORY_FEED_ATOM': posix_join('feeds', '{slug}.atom.xml'), + 'AUTHOR_FEED_ATOM': posix_join('feeds', '{slug}.atom.xml'), + 'AUTHOR_FEED_RSS': posix_join('feeds', '{slug}.rss.xml'), + 'TRANSLATION_FEED_ATOM': posix_join('feeds', 'all-{lang}.atom.xml'), 'FEED_MAX_ITEMS': '', 'RSS_FEED_SUMMARY_ONLY': True, 'SITEURL': '', @@ -385,6 +385,26 @@ def handle_deprecated_settings(settings): settings[f + '_REGEX_SUBSTITUTIONS'] = regex_subs settings.pop(f + '_SUBSTITUTIONS', None) + # `%s` -> '{slug}` or `{lang}` in FEED settings + for key in ['TRANSLATION_FEED_ATOM', + 'TRANSLATION_FEED_RSS' + ]: + if key in settings and '%s' in settings[key]: + logger.warning('%%s usage in %s is deprecated, use {lang} ' + 'instead. Falling back to default.', key) + settings[key] = DEFAULT_CONFIG[key] + for key in ['AUTHOR_FEED_ATOM', + 'AUTHOR_FEED_RSS', + 'CATEGORY_FEED_ATOM', + 'CATEGORY_FEED_RSS', + 'TAG_FEED_ATOM', + 'TAG_FEED_RSS', + ]: + if key in settings and '%s' in settings[key]: + logger.warning('%%s usage in %s is deprecated, use {slug} ' + 'instead. Falling back to default.', key) + settings[key] = DEFAULT_CONFIG[key] + return settings diff --git a/samples/pelican.conf.py b/samples/pelican.conf.py --- a/samples/pelican.conf.py +++ b/samples/pelican.conf.py @@ -18,7 +18,7 @@ DEFAULT_DATE = (2012, 3, 2, 14, 1, 1) FEED_ALL_RSS = 'feeds/all.rss.xml' -CATEGORY_FEED_RSS = 'feeds/%s.rss.xml' +CATEGORY_FEED_RSS = 'feeds/{slug}.rss.xml' LINKS = (('Biologeek', 'http://biologeek.org'), ('Filyb', "http://filyb.info/"), diff --git a/samples/pelican.conf_FR.py b/samples/pelican.conf_FR.py --- a/samples/pelican.conf_FR.py +++ b/samples/pelican.conf_FR.py @@ -22,7 +22,7 @@ ARTICLE_SAVE_AS = ARTICLE_URL + 'index.html' FEED_ALL_RSS = 'feeds/all.rss.xml' -CATEGORY_FEED_RSS = 'feeds/%s.rss.xml' +CATEGORY_FEED_RSS = 'feeds/{slug}.rss.xml' LINKS = (('Biologeek', 'http://biologeek.org'), ('Filyb', "http://filyb.info/"),
diff --git a/pelican/tests/default_conf.py b/pelican/tests/default_conf.py --- a/pelican/tests/default_conf.py +++ b/pelican/tests/default_conf.py @@ -12,7 +12,7 @@ DEFAULT_PAGINATION = 2 FEED_RSS = 'feeds/all.rss.xml' -CATEGORY_FEED_RSS = 'feeds/%s.rss.xml' +CATEGORY_FEED_RSS = 'feeds/{slug}.rss.xml' LINKS = (('Biologeek', 'http://biologeek.org'), ('Filyb', "http://filyb.info/"),
Convert FEED setting from `%s` to `{slug}`-style strings Closes #2106
@MinchinWeb: It appears Travis is reporting test failures. Could you take a look? @justinmayer Success! Flake8 issues have been fixed. I have Pandoc v2.1.3 install locally and so some test fail due to that. See issues #2255 and #2322, and WIP PR #2289. There are several other tests that fail locally (on Windows) for various reasons; some are things I haven't touched, so I'm leaving alone; some are due to 'symbolic link privilege not help'; some are due to Windows filepaths being written different than POSIX paths (these I skip now on Windows). These, as a whole, I'm ignoring. The other broken test was a simple typo. Fixed now! Python 3.7 has been released, but was failing to download on Travis; it can be added back at a later time. Would any @getpelican/reviewers be willing to take a moment and review this pull request? Yay, I personally welcome the change to `{}`-style! (Was also wondering myself why feeds use `%s` while everything else is `{}`-style.) But I think this should be done in a backwards-compatible way. With your proposed change, all setting that had the old-style ```py CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml' ``` would now result in `feeds/%s.atom.xml` put *verbatim* into the generated output, **without any warning** that something's wrong. Discovering such breakage in each affected site can then take months and that's **bad**. So instead just calling `format()`, I'd propose something like this: ```py self.settings['CATEGORY_FEED_ATOM']).format(slug=category.slug).replace('%s', category.slug) ``` And similarly elsewhere (maybe `replace()` first? not sure). That'll work for both approaches, with the `replace()` being removed some years later once the backwards compatibility is not needed anymore. Besides that, the diff contains a lot of unnecessary formatting changes that make it *very hard* to review. Would it be possible to clean up the diff to not have them? Also, unrelated changes such as `posix_path()` or Windows test fixes could be better discussed in a separate PR. @mosra : I like you suggestion of how to support the two styles. I searched for examples of how to do this, and never did find anything useful... Regarding the "extra" pieces, some of the reformatting is from the different lengths of `{slug}` vs `%s`, some was for line-length consistency. The second group could be removed, but there is no automated way to do it, I'd basically have to completely rebuilding the patch. The `posix_path()` changes again were for consistency. If you really want these two pieces out, I could maybe pull them out to a new PR, and then rebase this one on top of it. Is is worth the extra effort? The Windows testing issues are similar (they could be pulled to a separate PR, and this one rebased on to it), but they were needed to get this far: when I first ran the test suite locally (on my Windows machine), a huge swatch of the tests failed, and so it was very hard to know what, if any, tests I had broken. Making the tests behave more sanely was the faster alternative to spamming Travis and crossing my fingers. @justinmayer @mosra : both old-style and new-style text substitutes are now supported! ``` self.settings['CATEGORY_FEED_ATOM'].replace('%s', '{slug}') .format(slug=cat.slug), ``` Sorry for late-ish response but, I don't really like the `%s` replacement. For one, it'd fail if you have `{}`s around `%s`: ```python >>> '%s'.replace('%s', '{slug}').format(slug='foo') 'foo' >>> '{%s}'.replace('%s', '{slug}').format(slug='foo') '{slug}' >>> '{%s'.replace('%s', '{slug}').format(slug='foo') Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: Single '}' encountered in format string ``` Second, I'd rather have these handled when the settings file is read (in `settings.py`). And I'd propose throwing a warning and falling back to the default in case `%s` is in these settings. @avaris I like the idea of moving the conversion to the `settings.py` file. I'm not sure what to do about the errors you mention, and I haven't found an example online support both string styles at the same time. Do we throw an error on `%s` strings, and make the users upgrade? Do we throw a warning, try the conversion, and hope for the best? A check in `settings.py`. If `'%s'` is in the string, then throw a warning about deprecation, replace it from the `DEFAULT_CONFIG`. Something like: ```python for KEY in list_of_feed_setting_keys: if '%s' in setting[KEY]: logger.warning('%%s usage in %s is deprecated, use {name} instead. Falling back to default'. KEY) setting[KEY] = DEFAULT_CONFIG[KEY] ``` Very similar to [this](https://github.com/getpelican/pelican/blob/master/pelican/settings.py#L421-L426). @avaris @mosra : I've moved to avaris' suggestion of complaining of old style settings when the settings are first loaded and falling back to the default in this case. I'm of slightly mixed feeling as it requires the user to actively update their settings to work with the new Pelican version, and doesn't support backwards-compatibility, but on the other hand it avoids a bunch of odd corner cases that are hard to detect and resolve automatically. But "explicit is better than implicit", so let's go with it. Does something in the documentation need to be added to explain this change? The failing test is due to a change in how Markdown v3 outputs HTML for footnotes. The fix has been submitted separately as PR #2417. > Does something in the documentation need to be added to explain this change? [Changelog](https://github.com/getpelican/pelican/blob/master/docs/changelog.rst) is usually the place for such stuff. ... and the release announcement post. 😊 Changelog updated! @avaris / @mosra: Any further comments on @MinchinWeb's pull request? Stuff related to this PR is OK, but there are a lot of unrelated stuff (some unwarranted formatting changes, tox?? etc) attached to it. Is it possible to clean this up a bit? @MinchinWeb: I agree it's best to keep pull requests as discrete and digestible as possible. Could you take a look and perhaps move anything not directly related to issue 2106 into separate pull requests? It can be done, but it does seem like "busy work" rather than "fun work". And I don't understand what it adds. It's more about what it removes, which is the unrelated stuff reviewers have to go over otherwise.
2018-11-03T03:29:35Z
[]
[]
getpelican/pelican
2,449
getpelican__pelican-2449
[ "2448" ]
3596e04639035a70c164ccf4a739d419c08b09b5
diff --git a/pelican/tools/pelican_import.py b/pelican/tools/pelican_import.py --- a/pelican/tools/pelican_import.py +++ b/pelican/tools/pelican_import.py @@ -739,9 +739,8 @@ def download_attachments(output_path, urls): return locations -def is_pandoc_needed(fields): - in_markup_idx = 9 - return filter(lambda f: f[in_markup_idx] in ('html', 'wp-html'), fields) +def is_pandoc_needed(in_markup): + return in_markup in ('html', 'wp-html') def get_pandoc_version(): @@ -772,11 +771,7 @@ def fields2pelican( wp_custpost=False, wp_attach=False, attachments=None): pandoc_version = get_pandoc_version() - - if is_pandoc_needed(fields) and not pandoc_version: - error = ('Pandoc must be installed to complete the ' - 'requested import action.') - exit(error) + posts_require_pandoc = [] settings = read_settings() slug_subs = settings['SLUG_REGEX_SUBSTITUTIONS'] @@ -785,6 +780,9 @@ def fields2pelican( kind, in_markup) in fields: if filter_author and filter_author != author: continue + if is_pandoc_needed(in_markup) and not pandoc_version: + posts_require_pandoc.append(filename) + slug = not disable_slugs and filename or None if wp_attach and attachments: @@ -869,6 +867,11 @@ def fields2pelican( with open(out_filename, 'w', encoding='utf-8') as fs: fs.write(header + content) + + if posts_require_pandoc: + logger.error("Pandoc must be installed to import the following posts:" + "\n {}".format("\n ".join(posts_require_pandoc))) + if wp_attach and attachments and None in attachments: print("downloading attachments that don't have a parent post") urls = attachments[None]
diff --git a/pelican/tests/test_importer.py b/pelican/tests/test_importer.py --- a/pelican/tests/test_importer.py +++ b/pelican/tests/test_importer.py @@ -44,7 +44,7 @@ class TestBloggerXmlImporter(unittest.TestCase): def setUp(self): self.old_locale = locale.setlocale(locale.LC_ALL) locale.setlocale(locale.LC_ALL, str('C')) - self.posts = list(blogger2fields(BLOGGER_XML_SAMPLE)) + self.posts = blogger2fields(BLOGGER_XML_SAMPLE) def tearDown(self): locale.setlocale(locale.LC_ALL, self.old_locale) @@ -53,14 +53,15 @@ def test_recognise_kind_and_title(self): """Check that importer only outputs pages, articles and comments, that these are correctly identified and that titles are correct. """ - kinds = {x[8] for x in self.posts} + test_posts = list(self.posts) + kinds = {x[8] for x in test_posts} self.assertEqual({'page', 'article', 'comment'}, kinds) - page_titles = {x[0] for x in self.posts if x[8] == 'page'} + page_titles = {x[0] for x in test_posts if x[8] == 'page'} self.assertEqual({'Test page', 'Test page 2'}, page_titles) - article_titles = {x[0] for x in self.posts if x[8] == 'article'} + article_titles = {x[0] for x in test_posts if x[8] == 'article'} self.assertEqual({'Black as Egypt\'s Night', 'The Steel Windpipe'}, article_titles) - comment_titles = {x[0] for x in self.posts if x[8] == 'comment'} + comment_titles = {x[0] for x in test_posts if x[8] == 'comment'} self.assertEqual({'Mishka, always a pleasure to read your ' 'adventures!...'}, comment_titles) @@ -69,15 +70,16 @@ def test_recognise_status_with_correct_filename(self): """Check that importerer outputs only statuses 'published' and 'draft', that these are correctly identified and that filenames are correct. """ - statuses = {x[7] for x in self.posts} + test_posts = list(self.posts) + statuses = {x[7] for x in test_posts} self.assertEqual({'published', 'draft'}, statuses) - draft_filenames = {x[2] for x in self.posts if x[7] == 'draft'} + draft_filenames = {x[2] for x in test_posts if x[7] == 'draft'} # draft filenames are id-based self.assertEqual({'page-4386962582497458967', 'post-1276418104709695660'}, draft_filenames) - published_filenames = {x[2] for x in self.posts if x[7] == 'published'} + published_filenames = {x[2] for x in test_posts if x[7] == 'published'} # published filenames are url-based, except comments self.assertEqual({'the-steel-windpipe', 'test-page', @@ -91,8 +93,8 @@ class TestWordpressXmlImporter(unittest.TestCase): def setUp(self): self.old_locale = locale.setlocale(locale.LC_ALL) locale.setlocale(locale.LC_ALL, str('C')) - self.posts = list(wp2fields(WORDPRESS_XML_SAMPLE)) - self.custposts = list(wp2fields(WORDPRESS_XML_SAMPLE, True)) + self.posts = wp2fields(WORDPRESS_XML_SAMPLE) + self.custposts = wp2fields(WORDPRESS_XML_SAMPLE, True) def tearDown(self): locale.setlocale(locale.LC_ALL, self.old_locale) @@ -242,6 +244,8 @@ def test_wp_custpost_true_dirpage_false(self): self.assertFalse(out_name.endswith(filename)) def test_can_toggle_raw_html_code_parsing(self): + test_posts = list(self.posts) + def r(f): with open(f, encoding='utf-8') as infile: return infile.read() @@ -250,16 +254,16 @@ def r(f): with temporary_folder() as temp: rst_files = (r(f) for f - in silent_f2p(self.posts, 'markdown', temp)) + in silent_f2p(test_posts, 'markdown', temp)) self.assertTrue(any('<iframe' in rst for rst in rst_files)) rst_files = (r(f) for f - in silent_f2p(self.posts, 'markdown', + in silent_f2p(test_posts, 'markdown', temp, strip_raw=True)) self.assertFalse(any('<iframe' in rst for rst in rst_files)) # no effect in rst - rst_files = (r(f) for f in silent_f2p(self.posts, 'rst', temp)) + rst_files = (r(f) for f in silent_f2p(test_posts, 'rst', temp)) self.assertFalse(any('<iframe' in rst for rst in rst_files)) - rst_files = (r(f) for f in silent_f2p(self.posts, 'rst', temp, + rst_files = (r(f) for f in silent_f2p(test_posts, 'rst', temp, strip_raw=True)) self.assertFalse(any('<iframe' in rst for rst in rst_files))
pelican-importer: pandoc check **eats all content** before it is written fields2pelican is passed a generator called 'fields' https://github.com/getpelican/pelican/blob/master/pelican/tools/pelican_import.py#L768 The check to see if pandoc is needed uses the 'in' operator consuming this https://github.com/getpelican/pelican/blob/master/pelican/tools/pelican_import.py#L744 The loop afterwards is passed the now empty generator and so won't write any files https://github.com/getpelican/pelican/blob/master/pelican/tools/pelican_import.py#L784-L785
2018-11-15T15:13:26Z
[]
[]
getpelican/pelican
2,638
getpelican__pelican-2638
[ "1902" ]
643bccc497183d9cc2d7c80423fbd8e088dd3dad
diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -155,16 +155,15 @@ def get_files(self, paths, exclude=[], extensions=None): if os.path.isdir(root): for dirpath, dirs, temp_files in os.walk( - root, followlinks=True): - drop = [] + root, topdown=True, followlinks=True): excl = exclusions_by_dirpath.get(dirpath, ()) - for d in dirs: + # We copy the `dirs` list as we will modify it in the loop: + for d in list(dirs): if (d in excl or any(fnmatch.fnmatch(d, ignore) for ignore in ignores)): - drop.append(d) - for d in drop: - dirs.remove(d) + if d in dirs: + dirs.remove(d) reldir = os.path.relpath(dirpath, self.path) for f in temp_files:
diff --git a/pelican/tests/support.py b/pelican/tests/support.py --- a/pelican/tests/support.py +++ b/pelican/tests/support.py @@ -188,7 +188,7 @@ class LogCountHandler(BufferingHandler): """Capturing and counting logged messages.""" def __init__(self, capacity=1000): - logging.handlers.BufferingHandler.__init__(self, capacity) + super(LogCountHandler, self).__init__(capacity) def count_logs(self, msg=None, level=None): return len([
RFE: Make it possible to include content files from other content files Hi guys, I have several pages which have the same boiler plate text. It's a warning/disclaimer notice which gets included where necessary. As far as I can see it is not possible to do this inside my content files. For example instead of linking to another file with `[disclaimer]({filename}disclaimer.md]` I want to be able to write `{include}disclaimer.md` and the text will be automatically included in the current page being processed. Please let me know if this exists (or partially exists). I intend to work on this feature b/c I need it.
hello? Is this syntax can only be used in markdown files? or any kind of textfiles, like .rst or plain .txt, in the content folder? I have noticed that this is specially useful for organizing large posts. I've used a .html file to test it. As far as I can tell the curly braces syntax (also {filename}, {category}) can be used in any kind of text file. I still need to hear code review from the project maintainers though. @mitchtbaum - I've received a notification about you commenting on this issue but the comment got lost somehow. @atodorov: Sorry about that. I had started to comment, and somehow this form submitted before finishing it, so I wrote another one saying that happened and deleted both. #tangled-web-problems I looked further into this, and aside from Pelican's current link syntax and potential additions and improvements which we can address in another issue, I believe I have already produced a good solution for your use case. I need to fix upstream dependencies so I can make stable links to it (here and in Pelican's plugin meta-repo) and finish documentation, so I am working on that now. // Temporarily, you can find usable, pre-released code in https://github.com/mitchtbaum?tab=repositories // In a nutshell, you would create a 'notice' (article with that category) for each one you want to use, and you would **reference** your desired notice from any article with a `notice: warning` or `notice: whatever` metadata entry, and then in your article template or specific category template, you would include something like this chunk: ``` {% if article.notice %} {% for reference in article.notice %} <div id="notice"> <b>Notice:</b> {{ reference.body }} </div> {% endfor %} {% endif %} ``` How would that work for you? --- Aside: You can see a small writeup before I had worked out this `reference` plugin's full design in #1169. I will mark that as complete when I get this code into a repo where we can continue to work on it, #1488. Note: edited Jinja example to add div wrapper and changed styling to wrap only leading text saying what it is. > How would that work for you? It won't as far as I can understand. Correct me if I'm wrong but your solution requires me to have a template in which to reference the text I need to include, right ? What I want to do is include text inside my content files (e.g. html pages or markdown posts). I don't want to mess about with templates. References take place only in content. They are simple key-value pairs going from one piece of content to another using metadata. So for instance, `/content/notice/bring-water.md` could have body text "Bring a water bottle!", and `/content/travel/kalahari.md` could have `title: Kalahari Desert`, `notice: bring-water`, and `body` text like "It gets dry there, like any desert." When your content (markdown, etc) gets rendered into a desired output (html, etc), it will go wherever you put it and have whatever style you give it, as per your theme. imho, data feels most comfortable (so to speak) when it is kept separate from any logic about where to put it and how it could look. @atodorov > I have several pages which have the same boiler plate text. It's a warning/disclaimer notice which gets included where necessary I wrote the [jinja2content](https://github.com/leotrs/pelican-jinja2content) plugin for this purpose. You can see it in action [here](https://github.com/leotrs/erdos/blob/master/content/challenges/graphs/nodes_edges.md). The file includes several others by using a jinja2 `{% include %}` statement. The plugin also provides full jinja2 functionality (before the theme's templates are rendered, so the content that is passed onto the plugin does not have the `include`s any more). cc. @mitchtbaum PR #1909 was submitted to address this and is currently awaiting review from @getpelican/reviewers.
2019-10-16T09:06:23Z
[]
[]
getpelican/pelican
2,644
getpelican__pelican-2644
[ "2641" ]
772005f431e85abe9e4b09ad5bba9f87fe5a5b5e
diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -9,14 +9,20 @@ import time import traceback from collections.abc import Iterable +# Combines all paths to `pelican` package accessible from `sys.path` +# Makes it possible to install `pelican` and namespace plugins into different +# locations in the file system (e.g. pip with `-e` or `--user`) +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) # pelican.log has to be the first pelican module to be loaded # because logging.setLoggerClass has to be called before logging.getLogger from pelican.log import init as init_logging -from pelican import signals # noqa -from pelican.generators import (ArticlesGenerator, PagesGenerator, - SourceFileGenerator, StaticGenerator, - TemplatePagesGenerator) +from pelican.generators import (ArticlesGenerator, # noqa: I100 + PagesGenerator, SourceFileGenerator, + StaticGenerator, TemplatePagesGenerator) +from pelican.plugins import signals +from pelican.plugins._utils import load_plugins from pelican.readers import Readers from pelican.server import ComplexHTTPRequestHandler, RootedHTTPServer from pelican.settings import read_settings @@ -62,27 +68,14 @@ def init_path(self): sys.path.insert(0, '') def init_plugins(self): - self.plugins = [] - logger.debug('Temporarily adding PLUGIN_PATHS to system path') - _sys_path = sys.path[:] - for pluginpath in self.settings['PLUGIN_PATHS']: - sys.path.insert(0, pluginpath) - for plugin in self.settings['PLUGINS']: - # if it's a string, then import it - if isinstance(plugin, str): - logger.debug("Loading plugin `%s`", plugin) - try: - plugin = __import__(plugin, globals(), locals(), 'module') - except ImportError as e: - logger.error( - "Cannot load plugin `%s`\n%s", plugin, e) - continue - - logger.debug("Registering plugin `%s`", plugin.__name__) - plugin.register() - self.plugins.append(plugin) - logger.debug('Restoring system path') - sys.path = _sys_path + self.plugins = load_plugins(self.settings) + for plugin in self.plugins: + logger.debug('Registering plugin `%s`', plugin.__name__) + try: + plugin.register() + except Exception as e: + logger.error('Cannot register plugin `%s`\n%s', + plugin.__name__, e) def run(self): """Run the generators and return""" diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -10,7 +10,7 @@ import pytz -from pelican import signals +from pelican.plugins import signals from pelican.settings import DEFAULT_CONFIG from pelican.utils import (deprecated_attribute, memoized, path_to_url, posixize_path, sanitised_join, set_date_tzinfo, diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -13,9 +13,9 @@ from jinja2 import (BaseLoader, ChoiceLoader, Environment, FileSystemLoader, PrefixLoader, TemplateNotFound) -from pelican import signals from pelican.cache import FileStampDataCacher from pelican.contents import Article, Page, Static +from pelican.plugins import signals from pelican.readers import Readers from pelican.utils import (DateFormatter, copy, mkdir_p, order_content, posixize_path, process_translations) diff --git a/pelican/plugins/_utils.py b/pelican/plugins/_utils.py new file mode 100644 --- /dev/null +++ b/pelican/plugins/_utils.py @@ -0,0 +1,85 @@ +import importlib +import importlib.machinery +import importlib.util +import logging +import pkgutil + + +logger = logging.getLogger(__name__) + + +def iter_namespace(ns_pkg): + # Specifying the second argument (prefix) to iter_modules makes the + # returned name an absolute name instead of a relative one. This allows + # import_module to work without having to do additional modification to + # the name. + return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".") + + +def get_namespace_plugins(ns_pkg=None): + if ns_pkg is None: + import pelican.plugins as ns_pkg + + return { + name: importlib.import_module(name) + for finder, name, ispkg + in iter_namespace(ns_pkg) + if ispkg + } + + +def list_plugins(ns_pkg=None): + from pelican.log import init as init_logging + init_logging(logging.INFO) + ns_plugins = get_namespace_plugins(ns_pkg) + if ns_plugins: + logger.info('Plugins found:\n' + '\n'.join(ns_plugins)) + else: + logger.info('No plugins are installed') + + +def load_legacy_plugin(plugin, plugin_paths): + # Try to find plugin in PLUGIN_PATHS + spec = importlib.machinery.PathFinder.find_spec(plugin, plugin_paths) + if spec is None: + # If failed, try to find it in normal importable locations + spec = importlib.util.find_spec(plugin) + if spec is None: + raise ImportError('Cannot import plugin `{}`'.format(plugin)) + else: + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + +def load_plugins(settings): + logger.debug('Finding namespace plugins') + namespace_plugins = get_namespace_plugins() + if namespace_plugins: + logger.debug('Namespace plugins found:\n' + + '\n'.join(namespace_plugins)) + plugins = [] + if settings.get('PLUGINS') is not None: + for plugin in settings['PLUGINS']: + if isinstance(plugin, str): + logger.debug('Loading plugin `%s`', plugin) + # try to find in namespace plugins + if plugin in namespace_plugins: + plugin = namespace_plugins[plugin] + elif 'pelican.plugins.{}'.format(plugin) in namespace_plugins: + plugin = namespace_plugins['pelican.plugins.{}'.format( + plugin)] + # try to import it + else: + try: + plugin = load_legacy_plugin( + plugin, + settings.get('PLUGIN_PATHS', [])) + except ImportError as e: + logger.error('Cannot load plugin `%s`\n%s', plugin, e) + continue + plugins.append(plugin) + else: + plugins = list(namespace_plugins.values()) + + return plugins diff --git a/pelican/signals.py b/pelican/plugins/signals.py similarity index 96% rename from pelican/signals.py rename to pelican/plugins/signals.py --- a/pelican/signals.py +++ b/pelican/plugins/signals.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +from __future__ import print_function, unicode_literals from blinker import signal diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -16,9 +16,9 @@ from docutils.writers.html4css1 import HTMLTranslator, Writer from pelican import rstdirectives # NOQA -from pelican import signals from pelican.cache import FileStampDataCacher from pelican.contents import Author, Category, Page, Tag +from pelican.plugins import signals from pelican.utils import get_date, pelican_open, posixize_path try: diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -138,7 +138,7 @@ def load_source(name, path): 'TYPOGRIFY_IGNORE_TAGS': [], 'SUMMARY_MAX_LENGTH': 50, 'PLUGIN_PATHS': [], - 'PLUGINS': [], + 'PLUGINS': None, 'PYGMENTS_RST_OPTIONS': {}, 'TEMPLATE_PAGES': {}, 'TEMPLATE_EXTENSIONS': ['.html'], diff --git a/pelican/writers.py b/pelican/writers.py --- a/pelican/writers.py +++ b/pelican/writers.py @@ -8,8 +8,8 @@ from jinja2 import Markup -from pelican import signals from pelican.paginator import Paginator +from pelican.plugins import signals from pelican.utils import (get_relative_path, is_selected_for_writing, path_to_url, sanitised_join, set_date_tzinfo) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -18,7 +18,8 @@ 'pelican = pelican.__main__:main', 'pelican-import = pelican.tools.pelican_import:main', 'pelican-quickstart = pelican.tools.pelican_quickstart:main', - 'pelican-themes = pelican.tools.pelican_themes:main' + 'pelican-themes = pelican.tools.pelican_themes:main', + 'pelican-plugins = pelican.plugins._utils:list_plugins' ] } @@ -44,7 +45,7 @@ keywords='static web site generator SSG reStructuredText Markdown', license='AGPLv3', long_description=description, - packages=['pelican', 'pelican.tools'], + packages=['pelican', 'pelican.tools', 'pelican.plugins'], package_data={ # we manually collect the package data, as opposed to using, # include_package_data=True because we don't want the tests to be
diff --git a/pelican/tests/dummy_plugins/namespace_plugin/pelican/plugins/ns_plugin/__init__.py b/pelican/tests/dummy_plugins/namespace_plugin/pelican/plugins/ns_plugin/__init__.py new file mode 100644 --- /dev/null +++ b/pelican/tests/dummy_plugins/namespace_plugin/pelican/plugins/ns_plugin/__init__.py @@ -0,0 +1,5 @@ +NAME = 'namespace plugin' + + +def register(): + pass diff --git a/pelican/tests/dummy_plugins/normal_plugin/normal_plugin/__init__.py b/pelican/tests/dummy_plugins/normal_plugin/normal_plugin/__init__.py new file mode 100644 --- /dev/null +++ b/pelican/tests/dummy_plugins/normal_plugin/normal_plugin/__init__.py @@ -0,0 +1,5 @@ +NAME = 'normal plugin' + + +def register(): + pass diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -10,8 +10,8 @@ from jinja2.utils import generate_lorem_ipsum from pelican.contents import Article, Author, Category, Page, Static +from pelican.plugins.signals import content_object_init from pelican.settings import DEFAULT_CONFIG -from pelican.signals import content_object_init from pelican.tests.support import (LoggedTestCase, get_context, get_settings, unittest) from pelican.utils import (path_to_url, posixize_path, truncate_html_words) diff --git a/pelican/tests/test_plugins.py b/pelican/tests/test_plugins.py new file mode 100644 --- /dev/null +++ b/pelican/tests/test_plugins.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, unicode_literals + +import os +from contextlib import contextmanager + +from pelican.plugins._utils import get_namespace_plugins, load_plugins +from pelican.tests.support import unittest + + +@contextmanager +def tmp_namespace_path(path): + '''Context manager for temporarily appending namespace plugin packages + + path: path containing the `pelican` folder + + This modifies the `pelican.__path__` and lets the `pelican.plugins` + namespace package resolve it from that. + ''' + # This avoids calls to internal `pelican.plugins.__path__._recalculate()` + # as it should not be necessary + import pelican + + old_path = pelican.__path__[:] + try: + pelican.__path__.append(os.path.join(path, 'pelican')) + yield + finally: + pelican.__path__ = old_path + + +class PluginTest(unittest.TestCase): + _PLUGIN_FOLDER = os.path.join( + os.path.abspath(os.path.dirname(__file__)), + 'dummy_plugins') + _NS_PLUGIN_FOLDER = os.path.join(_PLUGIN_FOLDER, 'namespace_plugin') + _NORMAL_PLUGIN_FOLDER = os.path.join(_PLUGIN_FOLDER, 'normal_plugin') + + def test_namespace_path_modification(self): + import pelican + import pelican.plugins + old_path = pelican.__path__[:] + + # not existing path + path = os.path.join(self._PLUGIN_FOLDER, 'foo') + with tmp_namespace_path(path): + self.assertIn( + os.path.join(path, 'pelican'), + pelican.__path__) + # foo/pelican does not exist, so it won't propagate + self.assertNotIn( + os.path.join(path, 'pelican', 'plugins'), + pelican.plugins.__path__) + # verify that we restored path back + self.assertEqual(pelican.__path__, old_path) + + # existing path + with tmp_namespace_path(self._NS_PLUGIN_FOLDER): + self.assertIn( + os.path.join(self._NS_PLUGIN_FOLDER, 'pelican'), + pelican.__path__) + # /namespace_plugin/pelican exists, so it should be in + self.assertIn( + os.path.join(self._NS_PLUGIN_FOLDER, 'pelican', 'plugins'), + pelican.plugins.__path__) + self.assertEqual(pelican.__path__, old_path) + + def test_get_namespace_plugins(self): + # without plugins + ns_plugins = get_namespace_plugins() + self.assertEqual(len(ns_plugins), 0) + + # with plugin + with tmp_namespace_path(self._NS_PLUGIN_FOLDER): + ns_plugins = get_namespace_plugins() + self.assertEqual(len(ns_plugins), 1) + self.assertIn('pelican.plugins.ns_plugin', ns_plugins) + self.assertEqual( + ns_plugins['pelican.plugins.ns_plugin'].NAME, + 'namespace plugin') + + # should be back to 0 outside `with` + ns_plugins = get_namespace_plugins() + self.assertEqual(len(ns_plugins), 0) + + def test_load_plugins(self): + # no plugins + plugins = load_plugins({}) + self.assertEqual(len(plugins), 0) + + with tmp_namespace_path(self._NS_PLUGIN_FOLDER): + # with no `PLUGINS` setting, load namespace plugins + plugins = load_plugins({}) + self.assertEqual(len(plugins), 1, plugins) + self.assertEqual( + {'namespace plugin'}, + set(plugin.NAME for plugin in plugins)) + + # disable namespace plugins with `PLUGINS = []` + SETTINGS = { + 'PLUGINS': [] + } + plugins = load_plugins(SETTINGS) + self.assertEqual(len(plugins), 0, plugins) + + # using `PLUGINS` + + # normal plugin + SETTINGS = { + 'PLUGINS': ['normal_plugin'], + 'PLUGIN_PATHS': [self._NORMAL_PLUGIN_FOLDER] + } + plugins = load_plugins(SETTINGS) + self.assertEqual(len(plugins), 1, plugins) + self.assertEqual( + {'normal plugin'}, + set(plugin.NAME for plugin in plugins)) + + # namespace plugin short + SETTINGS = { + 'PLUGINS': ['ns_plugin'] + } + plugins = load_plugins(SETTINGS) + self.assertEqual(len(plugins), 1, plugins) + self.assertEqual( + {'namespace plugin'}, + set(plugin.NAME for plugin in plugins)) + + # namespace plugin long + SETTINGS = { + 'PLUGINS': ['pelican.plugins.ns_plugin'] + } + plugins = load_plugins(SETTINGS) + self.assertEqual(len(plugins), 1, plugins) + self.assertEqual( + {'namespace plugin'}, + set(plugin.NAME for plugin in plugins)) + + # normal and namespace plugin + SETTINGS = { + 'PLUGINS': ['normal_plugin', 'ns_plugin'], + 'PLUGIN_PATHS': [self._NORMAL_PLUGIN_FOLDER] + } + plugins = load_plugins(SETTINGS) + self.assertEqual(len(plugins), 2, plugins) + self.assertEqual( + {'normal plugin', 'namespace plugin'}, + set(plugin.NAME for plugin in plugins))
Support new namespace plugin format As discussed in https://github.com/getpelican/pelican-plugins/issues/425#issuecomment-542474603 and other comments in that issue.
A couple of other thoughts… The new namespace plugin format will require Python 3. [Since Python 2 will soon be gone](https://pythonclock.org/), Pelican 4.2 is probably the last version that will support Python 2.7. It might be nice for Pelican to have a period of backwards-compatibility, with a deprecation notice, for existing plugins. I don't think this is critically important, but it might make the transition a bit smoother. Anyone have any input on this idea?
2019-10-28T01:58:48Z
[]
[]
getpelican/pelican
2,651
getpelican__pelican-2651
[ "2650" ]
01eb08c42b543450ee5e0e3de3854526708a6711
diff --git a/tasks.py b/tasks.py new file mode 100644 --- /dev/null +++ b/tasks.py @@ -0,0 +1,114 @@ +import os +from pathlib import Path +from shutil import which + +from invoke import task + +PKG_NAME = "pelican" +PKG_PATH = Path("pelican") +DOCS_PORT = os.environ.get("DOCS_PORT", 8000) +ACTIVE_VENV = os.environ.get("VIRTUAL_ENV", None) +VENV_HOME = Path(os.environ.get("WORKON_HOME", "~/virtualenvs")) +VENV_PATH = Path(ACTIVE_VENV) if ACTIVE_VENV else (VENV_HOME / PKG_NAME) +VENV = str(VENV_PATH.expanduser()) + +TOOLS = ["poetry", "pre-commit"] +POETRY = which("poetry") if which("poetry") else (VENV / Path("bin") / "poetry") +PRECOMMIT = ( + which("pre-commit") if which("pre-commit") else (VENV / Path("bin") / "pre-commit") +) + + +@task +def docbuild(c): + """Build documentation""" + c.run(f"{VENV}/bin/sphinx-build docs docs/_build") + + +@task(docbuild) +def docserve(c): + """Serve docs at http://localhost:$DOCS_PORT/ (default port is 8000)""" + from livereload import Server + + server = Server() + server.watch("docs/conf.py", lambda: docbuild(c)) + server.watch("CONTRIBUTING.rst", lambda: docbuild(c)) + server.watch("docs/*.rst", lambda: docbuild(c)) + server.serve(port=DOCS_PORT, root="docs/_build") + + +@task +def tests(c): + """Run the test suite""" + c.run(f"{VENV}/bin/pytest", pty=True) + + +@task +def black(c, check=False, diff=False): + """Run Black auto-formatter, optionally with --check or --diff""" + check_flag, diff_flag = "", "" + if check: + check_flag = "--check" + if diff: + diff_flag = "--diff" + c.run(f"{VENV}/bin/black {check_flag} {diff_flag} {PKG_PATH} tasks.py") + + +@task +def isort(c, check=False, diff=False): + check_flag, diff_flag = "", "" + if check: + check_flag = "-c" + if diff: + diff_flag = "--diff" + c.run( + f"{VENV}/bin/isort {check_flag} {diff_flag} --recursive {PKG_PATH}/* tasks.py" + ) + + +@task +def flake8(c): + c.run(f"{VENV}/bin/flake8 {PKG_PATH} tasks.py") + + +@task +def lint(c): + isort(c, check=True) + black(c, check=True) + flake8(c) + + +@task +def tools(c): + """Install tools in the virtual environment if not already on PATH""" + for tool in TOOLS: + if not which(tool): + c.run(f"{VENV}/bin/pip install {tool}") + + +@task +def precommit(c): + """Install pre-commit hooks to .git/hooks/pre-commit""" + c.run(f"{PRECOMMIT} install") + + +@task +def setup(c): + c.run(f"{VENV}/bin/pip install -U pip") + tools(c) + c.run(f"{POETRY} install") + precommit(c) + + +@task +def update_functional_tests(c): + """Update the generated functional test output""" + c.run( + f"bash -c 'LC_ALL=en_US.utf8 pelican -o {PKG_PATH}/tests/output/custom/ -s samples/pelican.conf.py samples/content/'" + ) + c.run( + f"bash -c 'LC_ALL=fr_FR.utf8 pelican -o {PKG_PATH}/tests/output/custom_locale/ -s samples/pelican.conf_FR.py samples/content/'" + ) + c.run( + f"bash -c 'LC_ALL=en_US.utf8 pelican -o {PKG_PATH}/tests/output/basic/ samples/content/'" + )
diff --git a/requirements/test.pip b/requirements/test.pip --- a/requirements/test.pip +++ b/requirements/test.pip @@ -1,5 +1,6 @@ # Tests mock +pytest # Optional Packages Markdown >= 3.1
Fix Pytest-incompatible test As far as I can tell, the only test that currently fails when running Pelican's test suite via `pytest` (5.2.2) is the `test_error_on_warning` test: ``` ――――――――――――――――――――――――――――― TestSuiteTest.test_error_on_warning ――――――――――――――――――――――――――――― self = <pelican.tests.test_testsuite.TestSuiteTest testMethod=test_error_on_warning> def test_error_on_warning(self): with self.assertRaises(UserWarning): > warnings.warn('test warning') E AssertionError: UserWarning not raised pelican/tests/test_testsuite.py:13: AssertionError ``` Who would like to earn a gold star by making this test compatible with `pytest`? ✨
2019-11-09T17:53:33Z
[]
[]
getpelican/pelican
2,714
getpelican__pelican-2714
[ "1904" ]
4db9b944a237302b5558395038f6125a910c6a05
diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -302,6 +302,9 @@ def _parse_metadata(self, meta): """Return the dict containing document metadata""" formatted_fields = self.settings['FORMATTED_FIELDS'] + # prevent metadata extraction in fields + self._md.preprocessors.deregister('meta') + output = {} for name, value in meta.items(): name = name.lower()
diff --git a/pelican/tests/content/article_with_markdown_and_nested_metadata.md b/pelican/tests/content/article_with_markdown_and_nested_metadata.md new file mode 100644 --- /dev/null +++ b/pelican/tests/content/article_with_markdown_and_nested_metadata.md @@ -0,0 +1,5 @@ +Title: Article with markdown and nested summary metadata +Date: 2012-10-30 +Summary: Test: This metadata value looks like metadata + +This is some content. diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py --- a/pelican/tests/test_generators.py +++ b/pelican/tests/test_generators.py @@ -239,6 +239,8 @@ def test_generate_context(self): ['Article title', 'published', 'Default', 'article'], ['Article with markdown and summary metadata multi', 'published', 'Default', 'article'], + ['Article with markdown and nested summary metadata', 'published', + 'Default', 'article'], ['Article with markdown and summary metadata single', 'published', 'Default', 'article'], ['Article with markdown containing footnotes', 'published', @@ -554,6 +556,7 @@ def test_article_order_by(self): 'Article title', 'Article with Nonconformant HTML meta tags', 'Article with an inline SVG', + 'Article with markdown and nested summary metadata', 'Article with markdown and summary metadata multi', 'Article with markdown and summary metadata single', 'Article with markdown containing footnotes', diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -645,6 +645,19 @@ def test_duplicate_tags_or_authors_are_removed(self): } self.assertDictHasSubset(metadata, expected) + def test_metadata_not_parsed_for_metadata(self): + settings = get_settings() + settings['FORMATTED_FIELDS'] = ['summary'] + + reader = readers.MarkdownReader(settings=settings) + content, metadata = reader.read( + _path('article_with_markdown_and_nested_metadata.md')) + expected = { + 'title': 'Article with markdown and nested summary metadata', + 'summary': '<p>Test: This metadata value looks like metadata</p>', + } + self.assertDictHasSubset(metadata, expected) + def test_empty_file(self): reader = readers.MarkdownReader(settings=get_settings()) content, metadata = reader.read(
Markdown Summary metadata is parsed for additional metadata The Markdown parser forces the "meta" extension for parsing metadata headers. This parser object is reused when formatting header values for any header listed in the `FORMATTED_FIELDS` setting. Because header values like Summary are parsed for metadata, any metadata-like values are discarded from the final output. Example: ``` Summary: Test: output ``` The header value of `Test: output` will be parsed as metadata, recognized as a header, and discarded from the final formatted Markdown output, resulting in an empty summary.
afaict, you found a bug in https://github.com/waylan/Python-Markdown/blob/master/markdown/extensions/meta.py#L27 You seem to want that plugin to disregard any colons after the first one, until a new line. That seems like it makes sense. You might also want to look at, and possibly even request clarification on their practices, https://github.com/fletcher/MultiMarkdown/wiki/MultiMarkdown-Syntax-Guide#metadata I think it's more due to the double parsing. When the full document is parsed, the summary meta is correct: ``` {"summary": "Test: output"} ``` Pelican then re-parses the meta value ("Test: output") as Markdown, which is nice for bold/italic/links/etc., but not great when your value looks like Markdown meta. The resulting render has an empty body, with some meta: ``` {"test": "output"} ``` Python-Markdown correctly parses the value on the first pass. So, reparsing should be done without the `meta` extension? Exactly, @avaris. I don't see a way to disable meta on the parser once the extension is loaded, so the reader might need a dedicated Markdown parser for use with `FORMATTED_FIELDS`. Working from #1573 could also potentially solve this (with some other long term gains). How does `pelican_commonmark`'s metadata parser compare to the current core parser? Hi Annika. Is something you would be willing to help us fix/implement?
2020-04-12T09:45:43Z
[]
[]
getpelican/pelican
2,715
getpelican__pelican-2715
[ "1196" ]
7bbd3dc6fbd31da5ce69a1347c5f72db73a5e6fc
diff --git a/pelican/generators.py b/pelican/generators.py --- a/pelican/generators.py +++ b/pelican/generators.py @@ -78,6 +78,14 @@ def __init__(self, context, settings, path, theme, output_path, custom_filters = self.settings['JINJA_FILTERS'] self.env.filters.update(custom_filters) + # get custom Jinja globals from user settings + custom_globals = self.settings['JINJA_GLOBALS'] + self.env.globals.update(custom_globals) + + # get custom Jinja tests from user settings + custom_tests = self.settings['JINJA_TESTS'] + self.env.tests.update(custom_tests) + signals.generator_init.send(self) def get_template(self, name): diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -120,6 +120,8 @@ def load_source(name, path): 'output_format': 'html5', }, 'JINJA_FILTERS': {}, + 'JINJA_GLOBALS': {}, + 'JINJA_TESTS': {}, 'JINJA_ENVIRONMENT': { 'trim_blocks': True, 'lstrip_blocks': True,
diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py --- a/pelican/tests/test_generators.py +++ b/pelican/tests/test_generators.py @@ -1148,3 +1148,80 @@ def test_delete_existing_file_before_mkdir(self): self.assertTrue( os.path.isdir(os.path.join(self.temp_output, "static"))) self.assertTrue(os.path.isfile(self.endfile)) + + +class TestJinja2Environment(unittest.TestCase): + + def setUp(self): + self.temp_content = mkdtemp(prefix='pelicantests.') + self.temp_output = mkdtemp(prefix='pelicantests.') + self.old_locale = locale.setlocale(locale.LC_ALL) + locale.setlocale(locale.LC_ALL, str('C')) + + def tearDown(self): + rmtree(self.temp_content) + rmtree(self.temp_output) + locale.setlocale(locale.LC_ALL, self.old_locale) + + def _test_jinja2_helper(self, additional_settings, content, expected): + settings = get_settings() + settings['STATIC_PATHS'] = ['static'] + settings['TEMPLATE_PAGES'] = { + 'template/source.html': 'generated/file.html' + } + settings.update(additional_settings) + + generator = TemplatePagesGenerator( + context={'foo': 'foo', 'bar': 'bar'}, settings=settings, + path=self.temp_content, theme='', output_path=self.temp_output) + + # create a dummy template file + template_dir = os.path.join(self.temp_content, 'template') + template_path = os.path.join(template_dir, 'source.html') + os.makedirs(template_dir) + with open(template_path, 'w') as template_file: + template_file.write(content) + + writer = Writer(self.temp_output, settings=settings) + generator.generate_output(writer) + + output_path = os.path.join(self.temp_output, 'generated', 'file.html') + + # output file has been generated + self.assertTrue(os.path.exists(output_path)) + + # output content is correct + with open(output_path, 'r') as output_file: + self.assertEqual(output_file.read(), expected) + + def test_jinja2_filter(self): + """JINJA_FILTERS adds custom filters to Jinja2 environment""" + content = 'foo: {{ foo|custom_filter }}, bar: {{ bar|custom_filter }}' + settings = {'JINJA_FILTERS': {'custom_filter': lambda x: x.upper()}} + expected = 'foo: FOO, bar: BAR' + + self._test_jinja2_helper(settings, content, expected) + + def test_jinja2_test(self): + """JINJA_TESTS adds custom tests to Jinja2 environment""" + content = 'foo {{ foo is custom_test }}, bar {{ bar is custom_test }}' + settings = {'JINJA_TESTS': {'custom_test': lambda x: x == 'bar'}} + expected = 'foo False, bar True' + + self._test_jinja2_helper(settings, content, expected) + + def test_jinja2_global(self): + """JINJA_GLOBALS adds custom globals to Jinja2 environment""" + content = '{{ custom_global }}' + settings = {'JINJA_GLOBALS': {'custom_global': 'foobar'}} + expected = 'foobar' + + self._test_jinja2_helper(settings, content, expected) + + def test_jinja2_extension(self): + """JINJA_ENVIRONMENT adds extensions to Jinja2 environment""" + content = '{% set stuff = [] %}{% do stuff.append(1) %}{{ stuff }}' + settings = {'JINJA_ENVIRONMENT': {'extensions': ['jinja2.ext.do']}} + expected = '[1]' + + self._test_jinja2_helper(settings, content, expected)
Add configuration settings for custom tests and globals in jinja2 environment Pelican currently has a setting for custom Jinja2 filters, but not for custom tests or custom environment namespace globals. This pull request adds both.
It would be nice to have some unit tests for this feature, both to demonstrate its use, and to make sure it does not break in the future. Hi Kale. I think @al-tonio is right. Would you be willing to add some tests for your feature? I have rebased @kalefranz's changes onto master and added some tests (https://github.com/paperlessreceipts/pelican/compare/master...jinja_globals_tests) and can create a new pull request if this looks sensible... Hi @clj. Many thanks for the assistance. Looks sensible to me. Would you be so kind as to submit a new pull request? @clj: Any chance you might submit a new pull request with the changes you mentioned in this thread?
2020-04-12T12:50:04Z
[]
[]
getpelican/pelican
2,716
getpelican__pelican-2716
[ "2552" ]
e7ef546661046a22f7c469186e1a97f98a6e8d24
diff --git a/pelican/log.py b/pelican/log.py --- a/pelican/log.py +++ b/pelican/log.py @@ -110,11 +110,13 @@ def filter(self, record): else: self._raised_messages.add(message_key) - # ignore LOG_FILTER records by templates when "debug" isn't enabled + # ignore LOG_FILTER records by templates or messages + # when "debug" isn't enabled logger_level = logging.getLogger().getEffectiveLevel() if logger_level > logging.DEBUG: - ignore_key = (record.levelno, record.msg) - if ignore_key in self._ignore: + template_key = (record.levelno, record.msg) + message_key = (record.levelno, record.getMessage()) + if (template_key in self._ignore or message_key in self._ignore): return False # check if we went over threshold
diff --git a/pelican/tests/test_log.py b/pelican/tests/test_log.py new file mode 100644 --- /dev/null +++ b/pelican/tests/test_log.py @@ -0,0 +1,78 @@ +import logging +import unittest +from collections import defaultdict + +from pelican import log +from pelican.tests.support import LogCountHandler + + +class TestLog(unittest.TestCase): + def setUp(self): + super().setUp() + self.logger = logging.getLogger(__name__) + self.handler = LogCountHandler() + self.logger.addHandler(self.handler) + + def tearDown(self): + self._reset_limit_filter() + self.logger.removeHandler(self.handler) + super().tearDown() + + def _reset_limit_filter(self): + log.LimitFilter._ignore = set() + log.LimitFilter._raised_messages = set() + log.LimitFilter._threshold = 5 + log.LimitFilter._group_count = defaultdict(int) + + def test_log_filter(self): + def do_logging(): + for i in range(5): + self.logger.warning('Log %s', i) + self.logger.warning('Another log %s', i) + # no filter + do_logging() + self.assertEqual( + self.handler.count_logs('Log \\d', logging.WARNING), + 5) + self.assertEqual( + self.handler.count_logs('Another log \\d', logging.WARNING), + 5) + self.handler.flush() + self._reset_limit_filter() + + # filter by template + log.LimitFilter._ignore.add((logging.WARNING, 'Log %s')) + do_logging() + self.assertEqual( + self.handler.count_logs('Log \\d', logging.WARNING), + 0) + self.assertEqual( + self.handler.count_logs('Another log \\d', logging.WARNING), + 5) + self.handler.flush() + self._reset_limit_filter() + + # filter by exact message + log.LimitFilter._ignore.add((logging.WARNING, 'Log 3')) + do_logging() + self.assertEqual( + self.handler.count_logs('Log \\d', logging.WARNING), + 4) + self.assertEqual( + self.handler.count_logs('Another log \\d', logging.WARNING), + 5) + self.handler.flush() + self._reset_limit_filter() + + # filter by both + log.LimitFilter._ignore.add((logging.WARNING, 'Log 3')) + log.LimitFilter._ignore.add((logging.WARNING, 'Another log %s')) + do_logging() + self.assertEqual( + self.handler.count_logs('Log \\d', logging.WARNING), + 4) + self.assertEqual( + self.handler.count_logs('Another log \\d', logging.WARNING), + 0) + self.handler.flush() + self._reset_limit_filter()
LOG_FILTER not working as it should? Good evening folks, I have a problem with LOG_FILTER. According to the docs, one can remove `TAG_SAVE_AS is set to False` by configuring `LOG_FILTER` this way: ```python import logging LOG_FILTER = [(logging.WARN, 'TAG_SAVE_AS is set to False')] ``` So to ignore `AUTHOR_SAVE_AS is set to False` and `CATEGORY_SAVE_AS is set to False`, it must be : ```python import logging LOG_FILTER = [ (logging.WARN, 'AUTHOR_SAVE_AS is set to False'), (logging.WARN, 'CATEGORY_SAVE_AS is set to False') ] ``` Right? So this is what I did: ```bash $ head pelicanconf.py -n 20 #!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals import datetime import logging import pelican PELICAN_VERSION = pelican.__version__ [...] LOG_FILTER = [ (logging.WARN, 'AUTHOR_SAVE_AS is set to False'), (logging.WARN, 'CATEGORY_SAVE_AS is set to False') ] AUTHOR_SAVE_AS = False AUTHORS_SAVE_AS = False TAG_SAVE_AS = False TAGS_SAVE_AS = False CATEGORY_SAVE_AS = False CATEGORIES_SAVE_AS = False ARCHIVES_SAVE_AS = False $ make publish pelican /mnt/c/Users/max/Code/mlcdf/content -o /mnt/c/Users/max/Code/mlcdf/output -s /mnt/c/Users/max/Code/mlcdf/publishconf.py WARNING: CATEGORY_SAVE_AS is set to False WARNING: AUTHOR_SAVE_AS is set to False Done: Processed 2 articles, 0 drafts, 4 pages, 1 hidden page and 0 draft pages in 0.52 seconds. ``` 2 things here: - it didn't work: `AUTHOR_SAVE_AS is set to False` and `CATEGORY_SAVE_AS is set to False` are still logged. - and, you may have noticed that, despite setting `TAG_SAVE_AS = False`, and not filtering it, I did not see `TAG_SAVE_AS is set to False` in the logs. Using the template to filter worked (but it will also filter out other logs matching the template which is not what you would necessary want): ``` import logging LOG_FILTER = [ (logging.WARN, '%s is set to %s'), ] ``` * Am I missing something? * What's the intended behaviour here? Is there an actual bug in the code? Or is the code example in the docs just wrong? I'm a bit lost. 🙃 Maxime
I checked the code and you're correct. Any templated log has to be filtered with the template, not individually. This needs fixing. Also, I don't get why `TAG_SAVE_AS is set to False` is not appearing. It should be. Wrote a PR that will fix the issue. IMHO `TAG_SAVE_AS is set to False` is not showing because you don't have any tag in the content frontmatter. > IMHO `TAG_SAVE_AS is set to False` is not showing because you don't have any tag in the content frontmatter. Well, this makes sense. Thanks!
2020-04-12T17:57:28Z
[]
[]
getpelican/pelican
2,720
getpelican__pelican-2720
[ "2373" ]
2d590bb8c183ba22af4b7ed3241fd4f80069fa73
diff --git a/samples/pelican.conf.py b/samples/pelican.conf.py --- a/samples/pelican.conf.py +++ b/samples/pelican.conf.py @@ -40,13 +40,16 @@ # static paths will be copied without parsing their contents STATIC_PATHS = [ - 'pictures', + 'images', 'extra/robots.txt', ] # custom page generated with a jinja2 template TEMPLATE_PAGES = {'pages/jinja2_template.html': 'jinja2_template.html'} +# there is no other HTML content +READERS = {'html': None} + # code blocks with line numbers PYGMENTS_RST_OPTIONS = {'linenos': 'table'}
diff --git a/pelican/tests/output/basic/author/alexis-metaireau.html b/pelican/tests/output/basic/author/alexis-metaireau.html --- a/pelican/tests/output/basic/author/alexis-metaireau.html +++ b/pelican/tests/output/basic/author/alexis-metaireau.html @@ -74,7 +74,9 @@ <h1><a href="/oh-yeah.html" rel="bookmark" By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> <p>In <a href="/category/bar.html">bar</a>.</p> -<p>tags: <a href="/tag/oh.html">oh</a> <a href="/tag/bar.html">bar</a> <a href="/tag/yeah.html">yeah</a> </p> +<p>tags: <a href="/tag/oh.html">oh</a> <a href="/tag/bar.html">bar</a> <a href="/tag/yeah.html">yeah</a> </p>Translations: + <a href="/oh-yeah-fr.html" hreflang="fr">fr</a> + </footer><!-- /.post-info --> <div class="section" id="why-not"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! diff --git a/pelican/tests/output/basic/category/bar.html b/pelican/tests/output/basic/category/bar.html --- a/pelican/tests/output/basic/category/bar.html +++ b/pelican/tests/output/basic/category/bar.html @@ -34,7 +34,9 @@ <h1 class="entry-title"><a href="/oh-yeah.html">Oh yeah !</a></h1> By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> <p>In <a href="/category/bar.html">bar</a>.</p> -<p>tags: <a href="/tag/oh.html">oh</a> <a href="/tag/bar.html">bar</a> <a href="/tag/yeah.html">yeah</a> </p> +<p>tags: <a href="/tag/oh.html">oh</a> <a href="/tag/bar.html">bar</a> <a href="/tag/yeah.html">yeah</a> </p>Translations: + <a href="/oh-yeah-fr.html" hreflang="fr">fr</a> + </footer><!-- /.post-info --><div class="section" id="why-not"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! diff --git a/pelican/tests/output/basic/drafts/a-draft-article.html b/pelican/tests/output/basic/drafts/a-draft-article.html --- a/pelican/tests/output/basic/drafts/a-draft-article.html +++ b/pelican/tests/output/basic/drafts/a-draft-article.html @@ -31,8 +31,8 @@ <h1 class="entry-title"> <div class="entry-content"> <footer class="post-info"> - <abbr class="published" title="9999-12-31T23:59:59.999999+00:00"> - Published: + <abbr class="published" title="2011-05-08T15:58:00+00:00"> + Published: Sun 08 May 2011 </abbr> <p>In <a href="/category/misc.html">misc</a>.</p> diff --git a/pelican/tests/output/basic/feeds/all-fr.atom.xml b/pelican/tests/output/basic/feeds/all-fr.atom.xml --- a/pelican/tests/output/basic/feeds/all-fr.atom.xml +++ b/pelican/tests/output/basic/feeds/all-fr.atom.xml @@ -1,3 +1,4 @@ <?xml version="1.0" encoding="utf-8"?> <feed xmlns="http://www.w3.org/2005/Atom"><title>A Pelican Blog</title><link href="/" rel="alternate"></link><link href="/feeds/all-fr.atom.xml" rel="self"></link><id>/</id><updated>2012-02-29T00:00:00+00:00</updated><entry><title>Deuxième article</title><link href="/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+00:00</published><updated>2012-02-29T00:00:00+00:00</updated><author><name></name></author><id>tag:None,2012-02-29:/second-article-fr.html</id><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</content><category term="misc"></category><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry></feed> \ No newline at end of file +</content><category term="misc"></category><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Trop bien !</title><link href="/oh-yeah-fr.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><updated>2010-10-20T10:14:00+00:00</updated><author><name></name></author><id>tag:None,2010-10-20:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</content><category term="misc"></category></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/basic/feeds/all.atom.xml b/pelican/tests/output/basic/feeds/all.atom.xml --- a/pelican/tests/output/basic/feeds/all.atom.xml +++ b/pelican/tests/output/basic/feeds/all.atom.xml @@ -27,7 +27,8 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</content><category term="bar"></category><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+00:00</published><updated>2010-10-15T20:30:00+00:00</updated><author><name></name></author><id>tag:None,2010-10-15:/unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</content><category term="bar"></category><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Trop bien !</title><link href="/oh-yeah-fr.html" rel="alternate"></link><published>2010-10-20T10:14:00+00:00</published><updated>2010-10-20T10:14:00+00:00</updated><author><name></name></author><id>tag:None,2010-10-20:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</content><category term="misc"></category></entry><entry><title>Unbelievable !</title><link href="/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+00:00</published><updated>2010-10-15T20:30:00+00:00</updated><author><name></name></author><id>tag:None,2010-10-15:/unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; diff --git a/pelican/tests/output/basic/index.html b/pelican/tests/output/basic/index.html --- a/pelican/tests/output/basic/index.html +++ b/pelican/tests/output/basic/index.html @@ -187,7 +187,9 @@ <h1><a href="/oh-yeah.html" rel="bookmark" By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> <p>In <a href="/category/bar.html">bar</a>.</p> -<p>tags: <a href="/tag/oh.html">oh</a> <a href="/tag/bar.html">bar</a> <a href="/tag/yeah.html">yeah</a> </p> +<p>tags: <a href="/tag/oh.html">oh</a> <a href="/tag/bar.html">bar</a> <a href="/tag/yeah.html">yeah</a> </p>Translations: + <a href="/oh-yeah-fr.html" hreflang="fr">fr</a> + </footer><!-- /.post-info --> <div class="section" id="why-not"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! diff --git a/pelican/tests/output/basic/oh-yeah-fr.html b/pelican/tests/output/basic/oh-yeah-fr.html new file mode 100644 --- /dev/null +++ b/pelican/tests/output/basic/oh-yeah-fr.html @@ -0,0 +1,69 @@ +<!DOCTYPE html> +<html lang="fr"> +<head> + <meta charset="utf-8" /> + <meta name="generator" content="Pelican" /> + <title>Trop bien !</title> + <link rel="stylesheet" href="/theme/css/main.css" /> + <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> + <link rel="alternate" hreflang="en" href="/oh-yeah.html"> + +</head> + +<body id="index" class="home"> + <header id="banner" class="body"> + <h1><a href="/">A Pelican Blog</a></h1> + <nav><ul> + <li><a href="/tag/oh.html">Oh Oh Oh</a></li> + <li><a href="/override/">Override url/save_as</a></li> + <li><a href="/pages/this-is-a-test-page.html">This is a test page</a></li> + <li><a href="/category/bar.html">bar</a></li> + <li><a href="/category/cat1.html">cat1</a></li> + <li class="active"><a href="/category/misc.html">misc</a></li> + <li><a href="/category/yeah.html">yeah</a></li> + </ul></nav> + </header><!-- /#banner --> +<section id="content" class="body"> + <article> + <header> + <h1 class="entry-title"> + <a href="/oh-yeah-fr.html" rel="bookmark" + title="Permalink to Trop bien !">Trop bien !</a></h1> + </header> + + <div class="entry-content"> +<footer class="post-info"> + <abbr class="published" title="2010-10-20T10:14:00+00:00"> + Published: Wed 20 October 2010 + </abbr> + +<p>In <a href="/category/misc.html">misc</a>.</p> +Translations: + <a href="/oh-yeah.html" hreflang="en">en</a> + +</footer><!-- /.post-info --> <p>Et voila du contenu en français</p> + + </div><!-- /.entry-content --> + + </article> +</section> + <section id="extras" class="body"> + <div class="social"> + <h2>social</h2> + <ul> + <li><a href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate">atom feed</a></li> + + </ul> + </div><!-- /.social --> + </section><!-- /#extras --> + + <footer id="contentinfo" class="body"> + <address id="about" class="vcard body"> + Proudly powered by <a href="https://getpelican.com/">Pelican</a>, which takes great advantage of <a href="https://www.python.org/">Python</a>. + </address><!-- /#about --> + + <p>The theme is by <a href="https://www.smashingmagazine.com/2009/08/designing-a-html-5-layout-from-scratch/">Smashing Magazine</a>, thanks!</p> + </footer><!-- /#contentinfo --> + +</body> +</html> \ No newline at end of file diff --git a/pelican/tests/output/basic/oh-yeah.html b/pelican/tests/output/basic/oh-yeah.html --- a/pelican/tests/output/basic/oh-yeah.html +++ b/pelican/tests/output/basic/oh-yeah.html @@ -6,6 +6,8 @@ <title>Oh yeah !</title> <link rel="stylesheet" href="/theme/css/main.css" /> <link href="/feeds/all.atom.xml" type="application/atom+xml" rel="alternate" title="A Pelican Blog Atom Feed" /> + <link rel="alternate" hreflang="fr" href="/oh-yeah-fr.html"> + </head> <body id="index" class="home"> @@ -39,7 +41,9 @@ <h1 class="entry-title"> By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> <p>In <a href="/category/bar.html">bar</a>.</p> -<p>tags: <a href="/tag/oh.html">oh</a> <a href="/tag/bar.html">bar</a> <a href="/tag/yeah.html">yeah</a> </p> +<p>tags: <a href="/tag/oh.html">oh</a> <a href="/tag/bar.html">bar</a> <a href="/tag/yeah.html">yeah</a> </p>Translations: + <a href="/oh-yeah-fr.html" hreflang="fr">fr</a> + </footer><!-- /.post-info --> <div class="section" id="why-not"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! diff --git a/pelican/tests/output/basic/pages/this-is-a-test-page.html b/pelican/tests/output/basic/pages/this-is-a-test-page.html --- a/pelican/tests/output/basic/pages/this-is-a-test-page.html +++ b/pelican/tests/output/basic/pages/this-is-a-test-page.html @@ -25,7 +25,8 @@ <h1><a href="/">A Pelican Blog</a></h1> <h1 class="entry-title">This is a test page</h1> <p>Just an image.</p> -<img alt="alternate text" src="|filename|/pictures/Fat_Cat.jpg" style="width: 600px; height: 450px;" /> +<img alt="alternate text" src="/pictures/Fat_Cat.jpg" style="width: 600px; height: 450px;" /> +<img alt="wrong path since 'images' folder does not exist" src="|filename|/images/Fat_Cat.jpg" style="width: 600px; height: 450px;" /> </section> <section id="extras" class="body"> diff --git a/pelican/tests/output/basic/pictures/Fat_Cat.jpg b/pelican/tests/output/basic/pictures/Fat_Cat.jpg new file mode 100644 Binary files /dev/null and b/pelican/tests/output/basic/pictures/Fat_Cat.jpg differ diff --git a/pelican/tests/output/basic/tag/bar.html b/pelican/tests/output/basic/tag/bar.html --- a/pelican/tests/output/basic/tag/bar.html +++ b/pelican/tests/output/basic/tag/bar.html @@ -86,7 +86,9 @@ <h1><a href="/oh-yeah.html" rel="bookmark" By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> <p>In <a href="/category/bar.html">bar</a>.</p> -<p>tags: <a href="/tag/oh.html">oh</a> <a href="/tag/bar.html">bar</a> <a href="/tag/yeah.html">yeah</a> </p> +<p>tags: <a href="/tag/oh.html">oh</a> <a href="/tag/bar.html">bar</a> <a href="/tag/yeah.html">yeah</a> </p>Translations: + <a href="/oh-yeah-fr.html" hreflang="fr">fr</a> + </footer><!-- /.post-info --> <div class="section" id="why-not"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! diff --git a/pelican/tests/output/basic/tag/yeah.html b/pelican/tests/output/basic/tag/yeah.html --- a/pelican/tests/output/basic/tag/yeah.html +++ b/pelican/tests/output/basic/tag/yeah.html @@ -34,7 +34,9 @@ <h1 class="entry-title"><a href="/oh-yeah.html">Oh yeah !</a></h1> By <a class="url fn" href="/author/alexis-metaireau.html">Alexis Métaireau</a> </address> <p>In <a href="/category/bar.html">bar</a>.</p> -<p>tags: <a href="/tag/oh.html">oh</a> <a href="/tag/bar.html">bar</a> <a href="/tag/yeah.html">yeah</a> </p> +<p>tags: <a href="/tag/oh.html">oh</a> <a href="/tag/bar.html">bar</a> <a href="/tag/yeah.html">yeah</a> </p>Translations: + <a href="/oh-yeah-fr.html" hreflang="fr">fr</a> + </footer><!-- /.post-info --><div class="section" id="why-not"> <h2>Why not ?</h2> <p>After all, why not ? It's pretty simple to do it, and it will allow me to write my blogposts in rst ! diff --git a/pelican/tests/output/custom/drafts/a-draft-article.html b/pelican/tests/output/custom/drafts/a-draft-article.html --- a/pelican/tests/output/custom/drafts/a-draft-article.html +++ b/pelican/tests/output/custom/drafts/a-draft-article.html @@ -35,8 +35,8 @@ <h1 class="entry-title"> <div class="entry-content"> <footer class="post-info"> - <abbr class="published" title="2012-03-02T14:01:01+01:00"> - Published: Fri 02 March 2012 + <abbr class="published" title="2011-05-08T15:58:00+02:00"> + Published: Sun 08 May 2011 </abbr> <address class="vcard author"> diff --git a/pelican/tests/output/custom/feeds/all-fr.atom.xml b/pelican/tests/output/custom/feeds/all-fr.atom.xml --- a/pelican/tests/output/custom/feeds/all-fr.atom.xml +++ b/pelican/tests/output/custom/feeds/all-fr.atom.xml @@ -1,4 +1,4 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-fr.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-03-02T14:01:01+01:00</updated><subtitle>A personal blog.</subtitle><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</content><category term="misc"></category></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</content><category term="misc"></category><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry></feed> \ No newline at end of file +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-fr.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-02-29T00:00:00+01:00</updated><subtitle>A personal blog.</subtitle><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +</content><category term="misc"></category><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</content><category term="misc"></category></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom/feeds/all.atom.xml b/pelican/tests/output/custom/feeds/all.atom.xml --- a/pelican/tests/output/custom/feeds/all.atom.xml +++ b/pelican/tests/output/custom/feeds/all.atom.xml @@ -1,6 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> <feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><subtitle>A personal blog.</subtitle><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/filename_metadata-example.html" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/filename_metadata-example.html</id><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</content><category term="misc"></category></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; </content><category term="misc"></category></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/second-article.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article.html</id><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; </content><category term="misc"></category><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; </content><category term="misc"></category><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/a-markdown-powered-article.html" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/a-markdown-powered-article.html</id><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; @@ -28,7 +27,8 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</content><category term="bar"></category><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:/unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</content><category term="bar"></category><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</content><category term="misc"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/unbelievable.html" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:/unbelievable.html</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; diff --git a/pelican/tests/output/custom/feeds/all.rss.xml b/pelican/tests/output/custom/feeds/all.rss.xml --- a/pelican/tests/output/custom/feeds/all.rss.xml +++ b/pelican/tests/output/custom/feeds/all.rss.xml @@ -1,7 +1,6 @@ <?xml version="1.0" encoding="utf-8"?> <rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description>A personal blog.</description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/filename_metadata-example.html</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:/filename_metadata-example.html</guid><category>misc</category></item><item><title>Trop bien !</title><link>http://blog.notmyidea.org/oh-yeah-fr.html</link><description>&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 02 Mar 2012 14:01:01 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</guid><category>misc</category></item><item><title>Second article</title><link>http://blog.notmyidea.org/second-article.html</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:/filename_metadata-example.html</guid><category>misc</category></item><item><title>Second article</title><link>http://blog.notmyidea.org/second-article.html</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:/second-article.html</guid><category>misc</category><category>foo</category><category>bar</category><category>baz</category></item><item><title>Deuxième article</title><link>http://blog.notmyidea.org/second-article-fr.html</link><description>&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</guid><category>misc</category><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/a-markdown-powered-article.html</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/unbelievable.html"&gt;a root-relative link to unbelievable&lt;/a&gt; @@ -16,7 +15,8 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:/oh-yeah.html</guid><category>bar</category><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:/oh-yeah.html</guid><category>bar</category><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Trop bien !</title><link>http://blog.notmyidea.org/oh-yeah-fr.html</link><description>&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:/oh-yeah-fr.html</guid><category>misc</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/unbelievable.html</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/a-markdown-powered-article.html"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; diff --git a/pelican/tests/output/custom/oh-yeah-fr.html b/pelican/tests/output/custom/oh-yeah-fr.html --- a/pelican/tests/output/custom/oh-yeah-fr.html +++ b/pelican/tests/output/custom/oh-yeah-fr.html @@ -37,8 +37,8 @@ <h1 class="entry-title"> <div class="entry-content"> <footer class="post-info"> - <abbr class="published" title="2012-03-02T14:01:01+01:00"> - Published: Fri 02 March 2012 + <abbr class="published" title="2010-10-20T10:14:00+02:00"> + Published: Wed 20 October 2010 </abbr> <address class="vcard author"> diff --git a/pelican/tests/output/custom/pages/this-is-a-test-page.html b/pelican/tests/output/custom/pages/this-is-a-test-page.html --- a/pelican/tests/output/custom/pages/this-is-a-test-page.html +++ b/pelican/tests/output/custom/pages/this-is-a-test-page.html @@ -30,6 +30,7 @@ <h1 class="entry-title">This is a test page</h1> <p>Just an image.</p> <img alt="alternate text" src="../pictures/Fat_Cat.jpg" style="width: 600px; height: 450px;" /> +<img alt="wrong path since 'images' folder does not exist" src="|filename|/images/Fat_Cat.jpg" style="width: 600px; height: 450px;" /> </section> <section id="extras" class="body"> diff --git a/pelican/tests/output/custom_locale/drafts/a-draft-article.html b/pelican/tests/output/custom_locale/drafts/a-draft-article.html --- a/pelican/tests/output/custom_locale/drafts/a-draft-article.html +++ b/pelican/tests/output/custom_locale/drafts/a-draft-article.html @@ -35,8 +35,8 @@ <h1 class="entry-title"> <div class="entry-content"> <footer class="post-info"> - <abbr class="published" title="2012-03-02T14:01:01+01:00"> - Published: 02 mars 2012 + <abbr class="published" title="2011-05-08T15:58:00+02:00"> + Published: 08 mai 2011 </abbr> <address class="vcard author"> diff --git a/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml b/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml --- a/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/all-fr.atom.xml @@ -1,4 +1,4 @@ <?xml version="1.0" encoding="utf-8"?> -<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-fr.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-03-02T14:01:01+01:00</updated><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</content><category term="misc"></category></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; -</content><category term="misc"></category><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry></feed> \ No newline at end of file +<feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all-fr.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2012-02-29T00:00:00+01:00</updated><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; +</content><category term="misc"></category><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</content><category term="misc"></category></entry></feed> \ No newline at end of file diff --git a/pelican/tests/output/custom_locale/feeds/all.atom.xml b/pelican/tests/output/custom_locale/feeds/all.atom.xml --- a/pelican/tests/output/custom_locale/feeds/all.atom.xml +++ b/pelican/tests/output/custom_locale/feeds/all.atom.xml @@ -1,6 +1,5 @@ <?xml version="1.0" encoding="utf-8"?> <feed xmlns="http://www.w3.org/2005/Atom"><title>Alexis' log</title><link href="http://blog.notmyidea.org/" rel="alternate"></link><link href="http://blog.notmyidea.org/feeds/all.atom.xml" rel="self"></link><id>http://blog.notmyidea.org/</id><updated>2013-11-17T23:29:00+01:00</updated><entry><title>FILENAME_METADATA example</title><link href="http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/" rel="alternate"></link><published>2012-11-30T00:00:00+01:00</published><updated>2012-11-30T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-11-30:/posts/2012/novembre/30/filename_metadata-example/</id><content type="html">&lt;p&gt;Some cool stuff!&lt;/p&gt; -</content><category term="misc"></category></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2012-03-02T14:01:01+01:00</published><updated>2012-03-02T14:01:01+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; </content><category term="misc"></category></entry><entry><title>Second article</title><link href="http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/posts/2012/février/29/second-article/</id><content type="html">&lt;p&gt;This is some article, in english&lt;/p&gt; </content><category term="misc"></category><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>Deuxième article</title><link href="http://blog.notmyidea.org/second-article-fr.html" rel="alternate"></link><published>2012-02-29T00:00:00+01:00</published><updated>2012-02-29T00:00:00+01:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</id><content type="html">&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; </content><category term="misc"></category><category term="foo"></category><category term="bar"></category><category term="baz"></category></entry><entry><title>A markdown powered article</title><link href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/" rel="alternate"></link><published>2011-04-20T00:00:00+02:00</published><updated>2011-04-20T00:00:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2011-04-20:/posts/2011/avril/20/a-markdown-powered-article/</id><content type="html">&lt;p&gt;You're mutually oblivious.&lt;/p&gt; @@ -28,7 +27,8 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</content><category term="bar"></category><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:/posts/2010/octobre/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</content><category term="bar"></category><category term="oh"></category><category term="bar"></category><category term="yeah"></category></entry><entry><title>Trop bien !</title><link href="http://blog.notmyidea.org/oh-yeah-fr.html" rel="alternate"></link><published>2010-10-20T10:14:00+02:00</published><updated>2010-10-20T10:14:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-20:/oh-yeah-fr.html</id><content type="html">&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</content><category term="misc"></category></entry><entry><title>Unbelievable !</title><link href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/" rel="alternate"></link><published>2010-10-15T20:30:00+02:00</published><updated>2010-10-15T20:30:00+02:00</updated><author><name>Alexis Métaireau</name></author><id>tag:blog.notmyidea.org,2010-10-15:/posts/2010/octobre/15/unbelievable/</id><summary type="html">&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; diff --git a/pelican/tests/output/custom_locale/feeds/all.rss.xml b/pelican/tests/output/custom_locale/feeds/all.rss.xml --- a/pelican/tests/output/custom_locale/feeds/all.rss.xml +++ b/pelican/tests/output/custom_locale/feeds/all.rss.xml @@ -1,7 +1,6 @@ <?xml version="1.0" encoding="utf-8"?> <rss version="2.0"><channel><title>Alexis' log</title><link>http://blog.notmyidea.org/</link><description></description><lastBuildDate>Sun, 17 Nov 2013 23:29:00 +0100</lastBuildDate><item><title>FILENAME_METADATA example</title><link>http://blog.notmyidea.org/posts/2012/novembre/30/filename_metadata-example/</link><description>&lt;p&gt;Some cool stuff!&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:/posts/2012/novembre/30/filename_metadata-example/</guid><category>misc</category></item><item><title>Trop bien !</title><link>http://blog.notmyidea.org/oh-yeah-fr.html</link><description>&lt;p&gt;Et voila du contenu en français&lt;/p&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 02 Mar 2012 14:01:01 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-03-02:/oh-yeah-fr.html</guid><category>misc</category></item><item><title>Second article</title><link>http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Fri, 30 Nov 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-11-30:/posts/2012/novembre/30/filename_metadata-example/</guid><category>misc</category></item><item><title>Second article</title><link>http://blog.notmyidea.org/posts/2012/f%C3%A9vrier/29/second-article/</link><description>&lt;p&gt;This is some article, in english&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:/posts/2012/février/29/second-article/</guid><category>misc</category><category>foo</category><category>bar</category><category>baz</category></item><item><title>Deuxième article</title><link>http://blog.notmyidea.org/second-article-fr.html</link><description>&lt;p&gt;Ceci est un article, en français.&lt;/p&gt; </description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 29 Feb 2012 00:00:00 +0100</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2012-02-29:/second-article-fr.html</guid><category>misc</category><category>foo</category><category>bar</category><category>baz</category></item><item><title>A markdown powered article</title><link>http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/</link><description>&lt;p&gt;You're mutually oblivious.&lt;/p&gt; &lt;p&gt;&lt;a href="http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/"&gt;a root-relative link to unbelievable&lt;/a&gt; @@ -16,7 +15,8 @@ as well as &lt;strong&gt;inline markup&lt;/strong&gt;.&lt;/p&gt; YEAH !&lt;/p&gt; &lt;img alt="alternate text" src="http://blog.notmyidea.org/pictures/Sushi.jpg" style="width: 600px; height: 450px;" /&gt; &lt;/div&gt; -</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:/posts/2010/octobre/20/oh-yeah/</guid><category>bar</category><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:/posts/2010/octobre/20/oh-yeah/</guid><category>bar</category><category>oh</category><category>bar</category><category>yeah</category></item><item><title>Trop bien !</title><link>http://blog.notmyidea.org/oh-yeah-fr.html</link><description>&lt;p&gt;Et voila du contenu en français&lt;/p&gt; +</description><dc:creator xmlns:dc="http://purl.org/dc/elements/1.1/">Alexis Métaireau</dc:creator><pubDate>Wed, 20 Oct 2010 10:14:00 +0200</pubDate><guid isPermaLink="false">tag:blog.notmyidea.org,2010-10-20:/oh-yeah-fr.html</guid><category>misc</category></item><item><title>Unbelievable !</title><link>http://blog.notmyidea.org/posts/2010/octobre/15/unbelievable/</link><description>&lt;p&gt;Or completely awesome. Depends the needs.&lt;/p&gt; &lt;p&gt;&lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a root-relative link to markdown-article&lt;/a&gt; &lt;a class="reference external" href="http://blog.notmyidea.org/posts/2011/avril/20/a-markdown-powered-article/"&gt;a file-relative link to markdown-article&lt;/a&gt;&lt;/p&gt; &lt;div class="section" id="testing-sourcecode-directive"&gt; diff --git a/pelican/tests/output/custom_locale/oh-yeah-fr.html b/pelican/tests/output/custom_locale/oh-yeah-fr.html --- a/pelican/tests/output/custom_locale/oh-yeah-fr.html +++ b/pelican/tests/output/custom_locale/oh-yeah-fr.html @@ -37,8 +37,8 @@ <h1 class="entry-title"> <div class="entry-content"> <footer class="post-info"> - <abbr class="published" title="2012-03-02T14:01:01+01:00"> - Published: 02 mars 2012 + <abbr class="published" title="2010-10-20T10:14:00+02:00"> + Published: 20 octobre 2010 </abbr> <address class="vcard author"> diff --git a/pelican/tests/output/custom_locale/pages/this-is-a-test-page.html b/pelican/tests/output/custom_locale/pages/this-is-a-test-page.html --- a/pelican/tests/output/custom_locale/pages/this-is-a-test-page.html +++ b/pelican/tests/output/custom_locale/pages/this-is-a-test-page.html @@ -30,6 +30,7 @@ <h1 class="entry-title">This is a test page</h1> <p>Just an image.</p> <img alt="alternate text" src="../pictures/Fat_Cat.jpg" style="width: 600px; height: 450px;" /> +<img alt="wrong path since 'images' folder does not exist" src="|filename|/images/Fat_Cat.jpg" style="width: 600px; height: 450px;" /> </section> <section id="extras" class="body"> diff --git a/samples/content/pages/test_page.rst b/samples/content/pages/test_page.rst --- a/samples/content/pages/test_page.rst +++ b/samples/content/pages/test_page.rst @@ -5,8 +5,12 @@ This is a test page Just an image. -.. image:: |filename|/pictures/Fat_Cat.jpg +.. image:: {static}/pictures/Fat_Cat.jpg :height: 450 px :width: 600 px :alt: alternate text +.. image:: |filename|/images/Fat_Cat.jpg + :height: 450 px + :width: 600 px + :alt: wrong path since 'images' folder does not exist
Fixing some warnings and errors in the sample content. The current sample content is not up-to-date with the current Pelican mechanism. This will help new comers to understand better how Pelican works. * More valid articles. * More translations. * Images are now correctly displayed.
Looks like you need to [regenerate the sample output](http://docs.getpelican.com/en/stable/contribute.html#running-the-test-suite). PS: For future, it's best to run tests locally before pushing. Alright, I didn't knew that. I'll try to fix it. Hi @marespiaut. Would you be so kind as to re-generate the sample output and include it in your PR so we can get your contribution merged?
2020-04-14T19:54:20Z
[]
[]
getpelican/pelican
2,725
getpelican__pelican-2725
[ "2249" ]
79095f7bd3ebed649523fcac23d39e37b39d5cc8
diff --git a/pelican/tools/pelican_import.py b/pelican/tools/pelican_import.py --- a/pelican/tools/pelican_import.py +++ b/pelican/tools/pelican_import.py @@ -634,7 +634,8 @@ def get_out_filename(output_path, filename, ext, kind, typename = '' kind = 'article' if dircat and (len(categories) > 0): - catname = slugify(categories[0], regex_subs=slug_subs) + catname = slugify( + categories[0], regex_subs=slug_subs, preserve_case=True) else: catname = '' out_filename = os.path.join(output_path, typename, @@ -643,7 +644,8 @@ def get_out_filename(output_path, filename, ext, kind, os.makedirs(os.path.join(output_path, typename, catname)) # option to put files in directories with categories names elif dircat and (len(categories) > 0): - catname = slugify(categories[0], regex_subs=slug_subs) + catname = slugify( + categories[0], regex_subs=slug_subs, preserve_case=True) out_filename = os.path.join(output_path, catname, filename + ext) if not os.path.isdir(os.path.join(output_path, catname)): os.mkdir(os.path.join(output_path, catname)) diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -222,7 +222,7 @@ def pelican_open(filename, mode='r', strip_crs=(sys.platform == 'win32')): yield content -def slugify(value, regex_subs=()): +def slugify(value, regex_subs=(), preserve_case=False): """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. @@ -245,7 +245,8 @@ def slugify(value, regex_subs=()): value = re.sub(src, dst, value, flags=re.IGNORECASE) # convert to lowercase - value = value.lower() + if not preserve_case: + value = value.lower() # we want only ASCII chars value = value.encode('ascii', 'ignore').strip()
diff --git a/pelican/tests/test_importer.py b/pelican/tests/test_importer.py --- a/pelican/tests/test_importer.py +++ b/pelican/tests/test_importer.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- import locale import os @@ -138,7 +138,7 @@ def test_dircat(self): index = 0 for post in test_posts: name = post[2] - category = slugify(post[5][0], regex_subs=subs) + category = slugify(post[5][0], regex_subs=subs, preserve_case=True) name += '.md' filename = os.path.join(category, name) out_name = fnames[index] @@ -215,7 +215,7 @@ def test_custom_posts_put_in_own_dir_and_catagory_sub_dir(self): for post in test_posts: name = post[2] kind = post[8] - category = slugify(post[5][0], regex_subs=subs) + category = slugify(post[5][0], regex_subs=subs, preserve_case=True) name += '.md' filename = os.path.join(kind, category, name) out_name = fnames[index] diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py --- a/pelican/tests/test_utils.py +++ b/pelican/tests/test_utils.py @@ -122,6 +122,12 @@ def test_slugify(self): for value, expected in samples: self.assertEqual(utils.slugify(value, regex_subs=subs), expected) + self.assertEqual(utils.slugify('Cat', regex_subs=subs), 'cat') + self.assertEqual( + utils.slugify('Cat', regex_subs=subs, preserve_case=False), 'cat') + self.assertEqual( + utils.slugify('Cat', regex_subs=subs, preserve_case=True), 'Cat') + def test_slugify_substitute(self): samples = (('C++ is based on C', 'cpp-is-based-on-c'),
preserve_case option for category names slugification (fix #2248)
I have amended the commit message just to restart a completly failing Travis run. Lose time because of this shit (false error messages) 2 times on 3 PR, it's a shame. I haven't looked further, but what happens when a set of case sensitive categories hits a case insensitive file system? Although I haven't done tests, I think it changes nothing from a case insensitive file system point of view. Thoughts on this PR from @getpelican/reviewers? (especially @avaris) It'd be nice to add a test or two. @fgallaire: Would you mind adding some corresponding tests? 😊 @justinmayer The tests are run with preserve_case enabled as it will be the default option. Do you want more ? @fgallaire I meant just a couple basic tests for `slugify` to directly test that option. Something like: ```python slugify('Foo') == 'foo' slugify('Foo', preserve_case=False) == 'foo' slugify('Foo', preserve_case=True) == 'Foo' ``` in [here](https://github.com/getpelican/pelican/blob/master/pelican/tests/test_utils.py#L123) @fgallaire: Would you mind adding a few tests as @avaris suggested so we can merge your contribution? Hi @fgallaire. Any chance you could add the tests @avaris mentioned so we can merge your pull request?
2020-04-15T18:49:15Z
[]
[]
getpelican/pelican
2,731
getpelican__pelican-2731
[ "1207" ]
075ca53bd266fb3802ad0e10101a48fd63c1477b
diff --git a/pelican/contents.py b/pelican/contents.py --- a/pelican/contents.py +++ b/pelican/contents.py @@ -92,16 +92,18 @@ def __init__(self, content, metadata=None, settings=None, if not hasattr(self, 'slug'): if (settings['SLUGIFY_SOURCE'] == 'title' and hasattr(self, 'title')): - self.slug = slugify( - self.title, - regex_subs=settings.get('SLUG_REGEX_SUBSTITUTIONS', [])) + value = self.title elif (settings['SLUGIFY_SOURCE'] == 'basename' and source_path is not None): - basename = os.path.basename( - os.path.splitext(source_path)[0]) + value = os.path.basename(os.path.splitext(source_path)[0]) + else: + value = None + if value is not None: self.slug = slugify( - basename, - regex_subs=settings.get('SLUG_REGEX_SUBSTITUTIONS', [])) + value, + regex_subs=settings.get('SLUG_REGEX_SUBSTITUTIONS', []), + preserve_case=settings.get('SLUGIFY_PRESERVE_CASE', False), + use_unicode=settings.get('SLUGIFY_USE_UNICODE', False)) self.source_path = source_path self.relative_source_path = self.get_relative_source_path() diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -155,6 +155,8 @@ def load_source(name, path): ], 'INTRASITE_LINK_REGEX': '[{|](?P<what>.*?)[|}]', 'SLUGIFY_SOURCE': 'title', + 'SLUGIFY_USE_UNICODE': False, + 'SLUGIFY_PRESERVE_CASE': False, 'CACHE_CONTENT': False, 'CONTENT_CACHING_LAYER': 'reader', 'CACHE_PATH': 'cache', diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py --- a/pelican/urlwrappers.py +++ b/pelican/urlwrappers.py @@ -34,15 +34,16 @@ def slug(self): if self._slug is None: class_key = '{}_REGEX_SUBSTITUTIONS'.format( self.__class__.__name__.upper()) - if class_key in self.settings: - self._slug = slugify( - self.name, - regex_subs=self.settings[class_key]) - else: - self._slug = slugify( - self.name, - regex_subs=self.settings.get( - 'SLUG_REGEX_SUBSTITUTIONS', [])) + regex_subs = self.settings.get( + class_key, + self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])) + preserve_case = self.settings.get('SLUGIFY_PRESERVE_CASE', False) + self._slug = slugify( + self.name, + regex_subs=regex_subs, + preserve_case=preserve_case, + use_unicode=self.settings.get('SLUGIFY_USE_UNICODE', False) + ) return self._slug @slug.setter @@ -61,8 +62,18 @@ def __hash__(self): return hash(self.slug) def _normalize_key(self, key): - subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', []) - return slugify(key, regex_subs=subs) + class_key = '{}_REGEX_SUBSTITUTIONS'.format( + self.__class__.__name__.upper()) + regex_subs = self.settings.get( + class_key, + self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])) + use_unicode = self.settings.get('SLUGIFY_USE_UNICODE', False) + preserve_case = self.settings.get('SLUGIFY_PRESERVE_CASE', False) + return slugify( + key, + regex_subs=regex_subs, + preserve_case=preserve_case, + use_unicode=use_unicode) def __eq__(self, other): if isinstance(other, self.__class__): diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -222,7 +222,7 @@ def pelican_open(filename, mode='r', strip_crs=(sys.platform == 'win32')): yield content -def slugify(value, regex_subs=(), preserve_case=False): +def slugify(value, regex_subs=(), preserve_case=False, use_unicode=False): """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. @@ -230,28 +230,36 @@ def slugify(value, regex_subs=(), preserve_case=False): Took from Django sources. """ - # TODO Maybe steal again from current Django 1.5dev - value = Markup(value).striptags() - # value must be unicode per se import unicodedata - from unidecode import unidecode - value = unidecode(value) - if isinstance(value, bytes): - value = value.decode('ascii') - # still unicode - value = unicodedata.normalize('NFKD', value) + import unidecode + + def normalize_unicode(text): + # normalize text by compatibility composition + # see: https://en.wikipedia.org/wiki/Unicode_equivalence + return unicodedata.normalize('NFKC', text) + + # strip tags from value + value = Markup(value).striptags() + + # normalization + value = normalize_unicode(value) + + if not use_unicode: + # ASCII-fy + value = unidecode.unidecode(value) + # perform regex substitutions for src, dst in regex_subs: - value = re.sub(src, dst, value, flags=re.IGNORECASE) + value = re.sub( + normalize_unicode(src), + normalize_unicode(dst), + value, + flags=re.IGNORECASE) - # convert to lowercase if not preserve_case: value = value.lower() - # we want only ASCII chars - value = value.encode('ascii', 'ignore').strip() - # but Pelican should generally use only unicode - return value.decode('ascii') + return value.strip() def copy(source, destination, ignores=None):
diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py --- a/pelican/tests/test_contents.py +++ b/pelican/tests/test_contents.py @@ -135,6 +135,32 @@ def test_slug(self): page = Page(**page_kwargs) self.assertEqual(page.slug, 'foo') + # test slug from title with unicode and case + + inputs = ( + # (title, expected, preserve_case, use_unicode) + ('指導書', 'zhi-dao-shu', False, False), + ('指導書', 'Zhi-Dao-Shu', True, False), + ('指導書', '指導書', False, True), + ('指導書', '指導書', True, True), + ('Çığ', 'cig', False, False), + ('Çığ', 'Cig', True, False), + ('Çığ', 'çığ', False, True), + ('Çığ', 'Çığ', True, True), + ) + + settings = get_settings() + page_kwargs = self._copy_page_kwargs() + page_kwargs['settings'] = settings + + for title, expected, preserve_case, use_unicode in inputs: + settings['SLUGIFY_PRESERVE_CASE'] = preserve_case + settings['SLUGIFY_USE_UNICODE'] = use_unicode + page_kwargs['metadata']['title'] = title + page = Page(**page_kwargs) + self.assertEqual(page.slug, expected, + (title, preserve_case, use_unicode)) + def test_defaultlang(self): # If no lang is given, default to the default one. page = Page(**self.page_kwargs) diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py --- a/pelican/tests/test_utils.py +++ b/pelican/tests/test_utils.py @@ -128,6 +128,45 @@ def test_slugify(self): self.assertEqual( utils.slugify('Cat', regex_subs=subs, preserve_case=True), 'Cat') + def test_slugify_use_unicode(self): + + samples = ( + ('this is a test', 'this-is-a-test'), + ('this is a test', 'this-is-a-test'), + ('this → is ← a ↑ test', 'this-is-a-test'), + ('this--is---a test', 'this-is-a-test'), + ('unicode測試許功蓋,你看到了嗎?', 'unicode測試許功蓋你看到了嗎'), + ('Çığ', 'çığ') + ) + + settings = read_settings() + subs = settings['SLUG_REGEX_SUBSTITUTIONS'] + + for value, expected in samples: + self.assertEqual( + utils.slugify(value, regex_subs=subs, use_unicode=True), + expected) + + # check with preserve case + for value, expected in samples: + self.assertEqual( + utils.slugify('Çığ', regex_subs=subs, + preserve_case=True, use_unicode=True), + 'Çığ') + + # check normalization + samples = ( + ('大飯原発4号機、18日夜起動へ', '大飯原発4号機18日夜起動へ'), + ( + '\N{LATIN SMALL LETTER C}\N{COMBINING CEDILLA}', + '\N{LATIN SMALL LETTER C WITH CEDILLA}' + ) + ) + for value, expected in samples: + self.assertEqual( + utils.slugify(value, regex_subs=subs, use_unicode=True), + expected) + def test_slugify_substitute(self): samples = (('C++ is based on C', 'cpp-is-based-on-c'),
Add support for unicode slugs I want to use Pelican in a fully localized environment, where even the slugs would make use of unicode characters like õäöü. This patch helps (there is an additional option to slugify, which defaults to OFF at the moment (as there is no direct access to settings?). I felt that this approach is better than using the current configurable substitution option.
Hi Martin. Very sorry for the long delay in reviewing this pull request. It would appear that the Travis CI build failed. Would you be willing to rebase on current master, ensure all tests pass, and push a squashed commit to this branch so we can review it? @avaris: Could you take a look and offer your take on whether there is sufficient value here for inclusion in Pelican? We could provide a setting for not ASCII-fying the slugs, so it's probably worth having. But this needs docs, setting and tests. I can work on it.
2020-04-19T17:45:44Z
[]
[]
getpelican/pelican
2,747
getpelican__pelican-2747
[ "2428" ]
e6df35330205aa6bdbe56cd40e641fab4958af8c
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -61,7 +61,7 @@ def setup(app): # overrides for wide tables in RTD theme - app.add_stylesheet('theme_overrides.css') # path relative to _static + app.add_css_file('theme_overrides.css') # path relative to _static # -- Options for LaTeX output ------------------------------------------------- diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -696,7 +696,7 @@ def path_metadata(full_path, source_path, settings=None): # Enforce a trailing slash when checking for parent directories. # This prevents false positives when one file or directory's name # is a prefix of another's. - dirpath = os.path.join(path, '') + dirpath = posixize_path(os.path.join(path, '')) if source_path == path or source_path.startswith(dirpath): metadata.update(meta) diff --git a/pelican/tools/pelican_import.py b/pelican/tools/pelican_import.py --- a/pelican/tools/pelican_import.py +++ b/pelican/tools/pelican_import.py @@ -728,8 +728,9 @@ def download_attachments(output_path, urls): # Generate percent-encoded URL scheme, netloc, path, query, fragment = urlsplit(url) - path = quote(path) - url = urlunsplit((scheme, netloc, path, query, fragment)) + if scheme != 'file': + path = quote(path) + url = urlunsplit((scheme, netloc, path, query, fragment)) if not os.path.exists(full_path): os.makedirs(full_path) diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -27,8 +27,10 @@ def sanitised_join(base_directory, *parts): - joined = os.path.abspath(os.path.join(base_directory, *parts)) - if not joined.startswith(os.path.abspath(base_directory)): + joined = posixize_path( + os.path.abspath(os.path.join(base_directory, *parts))) + base = posixize_path(os.path.abspath(base_directory)) + if not joined.startswith(base): raise RuntimeError( "Attempted to break out of output directory to {}".format( joined @@ -391,10 +393,9 @@ def get_relative_path(path): def path_to_url(path): """Return the URL corresponding to a given path.""" - if os.sep == '/': - return path - else: - return '/'.join(split_all(path)) + if path is not None: + path = posixize_path(path) + return path def posixize_path(rel_path): diff --git a/pelican/writers.py b/pelican/writers.py --- a/pelican/writers.py +++ b/pelican/writers.py @@ -1,5 +1,6 @@ import logging import os +from posixpath import join as posix_join from urllib.parse import urljoin from feedgenerator import Atom1Feed, Rss201rev2Feed, get_tag_uri @@ -25,7 +26,7 @@ def __init__(self, output_path, settings=None): # See Content._link_replacer for details if self.settings['RELATIVE_URLS']: - self.urljoiner = os.path.join + self.urljoiner = posix_join else: self.urljoiner = lambda base, url: urljoin( base if base.endswith('/') else base + '/', url) diff --git a/tasks.py b/tasks.py --- a/tasks.py +++ b/tasks.py @@ -24,7 +24,7 @@ @task def docbuild(c): """Build documentation""" - c.run(f"{VENV_BIN}/sphinx-build docs docs/_build") + c.run(f"{VENV_BIN}/sphinx-build -W docs docs/_build") @task(docbuild)
diff --git a/pelican/tests/support.py b/pelican/tests/support.py --- a/pelican/tests/support.py +++ b/pelican/tests/support.py @@ -160,6 +160,19 @@ def locale_available(locale_): return True +def can_symlink(): + res = True + try: + with temporary_folder() as f: + os.symlink( + f, + os.path.join(f, 'symlink') + ) + except OSError: + res = False + return res + + def get_settings(**kwargs): """Provide tweaked setting dictionaries for testing diff --git a/pelican/tests/test_cache.py b/pelican/tests/test_cache.py --- a/pelican/tests/test_cache.py +++ b/pelican/tests/test_cache.py @@ -1,17 +1,11 @@ import os from shutil import rmtree from tempfile import mkdtemp +from unittest.mock import MagicMock from pelican.generators import ArticlesGenerator, PagesGenerator from pelican.tests.support import get_context, get_settings, unittest -try: - from unittest.mock import MagicMock -except ImportError: - try: - from mock import MagicMock - except ImportError: - MagicMock = False CUR_DIR = os.path.dirname(__file__) CONTENT_DIR = os.path.join(CUR_DIR, 'content') @@ -131,7 +125,6 @@ def sorted_titles(items): self.assertEqual(uncached_pages, cached_pages) self.assertEqual(uncached_hidden_pages, cached_hidden_pages) - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_article_object_caching(self): """Test Article objects caching at the generator level""" settings = self._get_cache_enabled_settings() @@ -162,7 +155,6 @@ def test_article_object_caching(self): """ self.assertEqual(generator.readers.read_file.call_count, 6) - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_article_reader_content_caching(self): """Test raw article content caching at the reader level""" settings = self._get_cache_enabled_settings() @@ -185,7 +177,6 @@ def test_article_reader_content_caching(self): for reader in readers.values(): self.assertEqual(reader.read.call_count, 0) - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_article_ignore_cache(self): """Test that all the articles are read again when not loading cache @@ -212,7 +203,6 @@ def test_article_ignore_cache(self): generator.readers.read_file.call_count, orig_call_count) - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_page_object_caching(self): """Test Page objects caching at the generator level""" settings = self._get_cache_enabled_settings() @@ -238,7 +228,6 @@ def test_page_object_caching(self): """ self.assertEqual(generator.readers.read_file.call_count, 1) - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_page_reader_content_caching(self): """Test raw page content caching at the reader level""" settings = self._get_cache_enabled_settings() @@ -262,7 +251,6 @@ def test_page_reader_content_caching(self): for reader in readers.values(): self.assertEqual(reader.read.call_count, 0) - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_page_ignore_cache(self): """Test that all the pages are read again when not loading cache diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py --- a/pelican/tests/test_generators.py +++ b/pelican/tests/test_generators.py @@ -1,22 +1,17 @@ import locale import os +import sys from shutil import copy, rmtree from tempfile import mkdtemp +from unittest.mock import MagicMock from pelican.generators import (ArticlesGenerator, Generator, PagesGenerator, PelicanTemplateNotFound, StaticGenerator, TemplatePagesGenerator) -from pelican.tests.support import get_context, get_settings, unittest +from pelican.tests.support import (can_symlink, get_context, get_settings, + unittest) from pelican.writers import Writer -try: - from unittest.mock import MagicMock -except ImportError: - try: - from mock import MagicMock - except ImportError: - MagicMock = False - CUR_DIR = os.path.dirname(__file__) CONTENT_DIR = os.path.join(CUR_DIR, 'content') @@ -198,7 +193,6 @@ def distill_articles(articles): return [[article.title, article.status, article.category.name, article.template] for article in articles] - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_generate_feeds(self): settings = get_settings() settings['CACHE_PATH'] = self.temp_cache @@ -218,7 +212,6 @@ def test_generate_feeds(self): generator.generate_feeds(writer) self.assertFalse(writer.write_feed.called) - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_generate_feeds_override_url(self): settings = get_settings() settings['CACHE_PATH'] = self.temp_cache @@ -334,7 +327,6 @@ def test_do_not_use_folder_as_category(self): categories_expected = ['default', 'yeah', 'test', 'zhi-dao-shu'] self.assertEqual(sorted(categories), sorted(categories_expected)) - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_direct_templates_save_as_url_default(self): settings = get_settings() @@ -352,7 +344,6 @@ def test_direct_templates_save_as_url_default(self): template_name='archives', page_name='archives', url="archives.html") - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_direct_templates_save_as_url_modified(self): settings = get_settings() @@ -373,7 +364,6 @@ def test_direct_templates_save_as_url_modified(self): page_name='archives/index', url="archives/") - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_direct_templates_save_as_false(self): settings = get_settings() @@ -398,7 +388,6 @@ def test_per_article_template(self): self.assertIn(custom_template, self.articles) self.assertIn(standard_template, self.articles) - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_period_in_timeperiod_archive(self): """ Test that the context of a generated period_archive is passed @@ -1022,7 +1011,6 @@ def test_copy_one_file(self): with open(self.endfile) as f: self.assertEqual(f.read(), "staticcontent") - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_file_update_required_when_dest_does_not_exist(self): staticfile = MagicMock() staticfile.source_path = self.startfile @@ -1032,7 +1020,6 @@ def test_file_update_required_when_dest_does_not_exist(self): update_required = self.generator._file_update_required(staticfile) self.assertTrue(update_required) - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_dest_and_source_mtimes_are_equal(self): staticfile = MagicMock() staticfile.source_path = self.startfile @@ -1045,7 +1032,6 @@ def test_dest_and_source_mtimes_are_equal(self): isnewer = self.generator._source_is_newer(staticfile) self.assertFalse(isnewer) - @unittest.skipUnless(MagicMock, 'Needs Mock module') def test_source_is_newer(self): staticfile = MagicMock() staticfile.source_path = self.startfile @@ -1097,6 +1083,7 @@ def test_output_file_exists_and_is_newer(self): self.generator.generate_output(None) self.assertTrue(os.path.samefile(self.startfile, self.endfile)) + @unittest.skipUnless(can_symlink(), 'No symlink privilege') def test_can_symlink_when_hardlink_not_possible(self): self.settings['STATIC_CREATE_LINKS'] = True with open(self.startfile, "w") as f: @@ -1104,40 +1091,29 @@ def test_can_symlink_when_hardlink_not_possible(self): os.mkdir(os.path.join(self.temp_output, "static")) self.generator.fallback_to_symlinks = True self.generator.generate_context() - try: - self.generator.generate_output(None) - except OSError as e: - # On Windows, possibly others, due to not holding symbolic link - # privilege - self.skipTest(e) + self.generator.generate_output(None) self.assertTrue(os.path.islink(self.endfile)) + @unittest.skipUnless(can_symlink(), 'No symlink privilege') def test_existing_symlink_is_considered_up_to_date(self): self.settings['STATIC_CREATE_LINKS'] = True with open(self.startfile, "w") as f: f.write("staticcontent") os.mkdir(os.path.join(self.temp_output, "static")) - try: - os.symlink(self.startfile, self.endfile) - except OSError as e: - # On Windows, possibly others - self.skipTest(e) + os.symlink(self.startfile, self.endfile) staticfile = MagicMock() staticfile.source_path = self.startfile staticfile.save_as = self.endfile requires_update = self.generator._file_update_required(staticfile) self.assertFalse(requires_update) + @unittest.skipUnless(can_symlink(), 'No symlink privilege') def test_invalid_symlink_is_overwritten(self): self.settings['STATIC_CREATE_LINKS'] = True with open(self.startfile, "w") as f: f.write("staticcontent") os.mkdir(os.path.join(self.temp_output, "static")) - try: - os.symlink("invalid", self.endfile) - except OSError as e: - # On Windows, possibly others - self.skipTest(e) + os.symlink("invalid", self.endfile) staticfile = MagicMock() staticfile.source_path = self.startfile staticfile.save_as = self.endfile @@ -1147,8 +1123,18 @@ def test_invalid_symlink_is_overwritten(self): self.generator.generate_context() self.generator.generate_output(None) self.assertTrue(os.path.islink(self.endfile)) - self.assertEqual(os.path.realpath(self.endfile), - os.path.realpath(self.startfile)) + + # os.path.realpath is broken on Windows before python3.8 for symlinks. + # This is a (ugly) workaround. + # see: https://bugs.python.org/issue9949 + if os.name == 'nt' and sys.version_info < (3, 8): + def get_real_path(path): + return os.readlink(path) if os.path.islink(path) else path + else: + get_real_path = os.path.realpath + + self.assertEqual(get_real_path(self.endfile), + get_real_path(self.startfile)) def test_delete_existing_file_before_mkdir(self): with open(self.startfile, "w") as f: diff --git a/pelican/tests/test_importer.py b/pelican/tests/test_importer.py --- a/pelican/tests/test_importer.py +++ b/pelican/tests/test_importer.py @@ -1,6 +1,7 @@ import locale import os import re +from posixpath import join as posix_join from pelican.settings import DEFAULT_CONFIG from pelican.tests.support import (mute, skipIfNoExecutable, temporary_folder, @@ -448,5 +449,5 @@ def test_download_attachments(self): self.assertEqual(1, len(locations)) directory = locations[0] self.assertTrue( - directory.endswith(os.path.join('content', 'article.rst')), + directory.endswith(posix_join('content', 'article.rst')), directory) diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -1,16 +1,10 @@ import os +from unittest.mock import patch from pelican import readers from pelican.tests.support import get_settings, unittest from pelican.utils import SafeDatetime -try: - from unittest.mock import patch -except ImportError: - try: - from mock import patch - except ImportError: - patch = False CUR_DIR = os.path.dirname(__file__) CONTENT_PATH = os.path.join(CUR_DIR, 'content') @@ -125,7 +119,6 @@ def test_readfile_path_metadata_explicit_date_implicit_modified(self): self.assertDictHasSubset(page.metadata, expected) - @unittest.skipUnless(patch, 'Needs Mock module') def test_find_empty_alt(self): with patch('pelican.readers.logger') as log_mock: content = ['<img alt="" src="test-image.png" width="300px" />', diff --git a/pelican/tests/test_rstdirectives.py b/pelican/tests/test_rstdirectives.py --- a/pelican/tests/test_rstdirectives.py +++ b/pelican/tests/test_rstdirectives.py @@ -1,15 +1,8 @@ -from pelican.tests.support import unittest +from unittest.mock import Mock -try: - from unittest.mock import Mock -except ImportError: - try: - from mock import Mock - except ImportError: - Mock = False +from pelican.tests.support import unittest [email protected](Mock, 'Needs Mock module') class Test_abbr_role(unittest.TestCase): def call_it(self, text): from pelican.rstdirectives import abbr_role diff --git a/pelican/tests/test_server.py b/pelican/tests/test_server.py --- a/pelican/tests/test_server.py +++ b/pelican/tests/test_server.py @@ -25,11 +25,10 @@ def setUp(self): os.chdir(self.temp_output) def tearDown(self): - rmtree(self.temp_output) os.chdir(self.old_cwd) + rmtree(self.temp_output) def test_get_path_that_exists(self): - handler = ComplexHTTPRequestHandler(MockRequest(), ('0.0.0.0', 8888), self.server) handler.base_path = self.temp_output diff --git a/pelican/tests/test_settings.py b/pelican/tests/test_settings.py --- a/pelican/tests/test_settings.py +++ b/pelican/tests/test_settings.py @@ -136,6 +136,8 @@ def test_deprecated_dir_setting(self): settings['ARTICLE_DIR'] settings['PAGE_DIR'] + # locale.getdefaultlocale() is broken on Windows + # See: https://bugs.python.org/issue37945 @unittest.skipIf(platform == 'win32', "Doesn't work on Windows") def test_default_encoding(self): # Test that the default locale is set if not specified in settings diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py --- a/pelican/tests/test_utils.py +++ b/pelican/tests/test_utils.py @@ -757,35 +757,32 @@ def test_turkish_locale(self): class TestSanitisedJoin(unittest.TestCase): - @unittest.skipIf(platform == 'win32', - "Different filesystem root on Windows") def test_detect_parent_breakout(self): with self.assertRaisesRegex( RuntimeError, - "Attempted to break out of output directory to /foo/test"): + "Attempted to break out of output directory to " + "(.*?:)?/foo/test"): # (.*?:)? accounts for Windows root utils.sanitised_join( "/foo/bar", "../test" ) - @unittest.skipIf(platform == 'win32', - "Different filesystem root on Windows") def test_detect_root_breakout(self): with self.assertRaisesRegex( RuntimeError, - "Attempted to break out of output directory to /test"): + "Attempted to break out of output directory to " + "(.*?:)?/test"): # (.*?:)? accounts for Windows root utils.sanitised_join( "/foo/bar", "/test" ) - @unittest.skipIf(platform == 'win32', - "Different filesystem root on Windows") def test_pass_deep_subpaths(self): self.assertEqual( utils.sanitised_join( "/foo/bar", "test" ), - os.path.join("/foo/bar", "test") + utils.posixize_path( + os.path.abspath(os.path.join("/foo/bar", "test"))) ) diff --git a/requirements/test.pip b/requirements/test.pip --- a/requirements/test.pip +++ b/requirements/test.pip @@ -1,6 +1,5 @@ # Tests Pygments==2.6.1 -mock pytest==5.3.5 pytest-cov pytest-xdist
Travis: Add Windows testing support Experimental Windows support has been enabled on Travis-CI. The PR implements testing Pelican on Windows for Python 2.7 and 3.4 to 3.7. It also adds testing for Python 3.7 on Linux. Docs building and flake8 tests are not run on Windows. Python installations on Windows (at least on Travis) are not recognized by tox, so the nosetest command is run directly. The testing suite on does not currently pass (and generates over 1,000 lines of output to tell you so), and so are classified as "allowable failures" on Travis. The hope is that this is one of the first steps to getting the test suite to pass cleanly on Windows. Closes #2427. c.f. #2383.
Looks like `pandoc` isn't installed in the linux environments: ``` SKIP: [u'pandoc', u'--version'] executable not found ``` There should be a way to reduce the duplication... @avaris : Duplication can be reduced, but that requires a bunch of Bash scripting, and I work in Python on Windows (so no Bash). I've started the work [here](https://github.com/MinchinWeb/pelican/blob/windows-tests-3/.travis.yml), but my Bash skills are falling short. @justinmayer : The current set up is working. Is it ready to merge? Thanks for your work on this, @MinchinWeb. I'm curious whether it makes sense to merge this, or instead wait until after potentially switching our CI/CD to GitHub Actions (#2726). What do you think? I'm always leery of putting things, especially open source work, off until "some day". (By way of personal example, I started moving my Pelican plugins over to the common repo/organization, but it's been stalled for maybe a year?) My thought would be to do this now, assuming this still works. It has the benefit of making sure anything added to Pelican before the move to GitHub Actions doesn't break Windows (or break it more). As well, it makes it clear that to reach feature parity, the GitHub Actions' CI pipeline should support Windows as well. But I guess that also depends on how fast the switch to GitHub Actions will take. Is it a week long project, or a six month project? The longer the transition is likely to take, the more value there is in merging it now. For what it's worth, I choose Python because Pelican supported Windows. Jekyll and Ruby was the other option I was considering at the time, and one of the packages Jekyll depended on had explicitly decided not to support Windows. Just my two cents. Whatever is the solution, the sooner it gets done, the less work it will be. I just submitted #2739 because the project's `tasks.py` was hardcorded to Linux and so failed hard trying to get set up for Windows. Also, working on #2738, I get 32 failing tests, but it's not obvious which were failing because of Windows, and which because my pull request had broken things. (I think only 12 of them are due to Windows, and most of those due to the changes in #2738...)
2020-05-01T18:45:06Z
[]
[]
getpelican/pelican
2,750
getpelican__pelican-2750
[ "2400" ]
e6df35330205aa6bdbe56cd40e641fab4958af8c
diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -24,8 +24,7 @@ from pelican.readers import Readers from pelican.server import ComplexHTTPRequestHandler, RootedHTTPServer from pelican.settings import read_settings -from pelican.utils import (clean_output_dir, file_watcher, - folder_watcher, maybe_pluralize) +from pelican.utils import (FileSystemWatcher, clean_output_dir, maybe_pluralize) from pelican.writers import Writer try: @@ -381,65 +380,36 @@ def get_instance(args): return cls(settings), settings -def autoreload(watchers, args, old_static, reader_descs, excqueue=None): +def autoreload(args, excqueue=None): + print(' --- AutoReload Mode: Monitoring `content`, `theme` and' + ' `settings` for changes. ---') + pelican, settings = get_instance(args) + watcher = FileSystemWatcher(args.settings, Readers, settings) + sleep = False while True: try: - # Check source dir for changed files ending with the given - # extension in the settings. In the theme dir is no such - # restriction; all files are recursively checked if they - # have changed, no matter what extension the filenames - # have. - modified = {k: next(v) for k, v in watchers.items()} + # Don't sleep first time, but sleep afterwards to reduce cpu load + if sleep: + time.sleep(0.5) + else: + sleep = True + + modified = watcher.check() if modified['settings']: pelican, settings = get_instance(args) - - # Adjust static watchers if there are any changes - new_static = settings.get("STATIC_PATHS", []) - - # Added static paths - # Add new watchers and set them as modified - new_watchers = set(new_static).difference(old_static) - for static_path in new_watchers: - static_key = '[static]%s' % static_path - watchers[static_key] = folder_watcher( - os.path.join(pelican.path, static_path), - [''], - pelican.ignore_files) - modified[static_key] = next(watchers[static_key]) - - # Removed static paths - # Remove watchers and modified values - old_watchers = set(old_static).difference(new_static) - for static_path in old_watchers: - static_key = '[static]%s' % static_path - watchers.pop(static_key) - modified.pop(static_key) - - # Replace old_static with the new one - old_static = new_static + watcher.update_watchers(settings) if any(modified.values()): print('\n-> Modified: {}. re-generating...'.format( ', '.join(k for k, v in modified.items() if v))) - - if modified['content'] is None: - logger.warning( - 'No valid files found in content for ' - + 'the active readers:\n' - + '\n'.join(reader_descs)) - - if modified['theme'] is None: - logger.warning('Empty theme folder. Using `basic` ' - 'theme.') - pelican.run() - except KeyboardInterrupt as e: - logger.warning("Keyboard interrupt, quitting.") + except KeyboardInterrupt: if excqueue is not None: - excqueue.put(traceback.format_exception_only(type(e), e)[-1]) - return + excqueue.put(None) + return + raise except Exception as e: if (args.verbosity == logging.DEBUG): @@ -449,10 +419,8 @@ def autoreload(watchers, args, old_static, reader_descs, excqueue=None): else: raise logger.warning( - 'Caught exception "%s". Reloading.', e) - - finally: - time.sleep(.5) # sleep to avoid cpu load + 'Caught exception:\n"%s".', e, + exc_info=settings.get('DEBUG', False)) def listen(server, port, output, excqueue=None): @@ -476,8 +444,10 @@ def listen(server, port, output, excqueue=None): return except KeyboardInterrupt: - print("\nKeyboard interrupt received. Shutting down server.") httpd.socket.close() + if excqueue is not None: + return + raise def main(argv=None): @@ -492,37 +462,11 @@ def main(argv=None): try: pelican, settings = get_instance(args) - readers = Readers(settings) - reader_descs = sorted( - { - '%s (%s)' % (type(r).__name__, ', '.join(r.file_extensions)) - for r in readers.readers.values() - if r.enabled - } - ) - - watchers = {'content': folder_watcher(pelican.path, - readers.extensions, - pelican.ignore_files), - 'theme': folder_watcher(pelican.theme, - [''], - pelican.ignore_files), - 'settings': file_watcher(args.settings)} - - old_static = settings.get("STATIC_PATHS", []) - for static_path in old_static: - # use a prefix to avoid possible overriding of standard watchers - # above - watchers['[static]%s' % static_path] = folder_watcher( - os.path.join(pelican.path, static_path), - [''], - pelican.ignore_files) - if args.autoreload and args.listen: excqueue = multiprocessing.Queue() p1 = multiprocessing.Process( target=autoreload, - args=(watchers, args, old_static, reader_descs, excqueue)) + args=(args, excqueue)) p2 = multiprocessing.Process( target=listen, args=(settings.get('BIND'), settings.get('PORT'), @@ -532,26 +476,19 @@ def main(argv=None): exc = excqueue.get() p1.terminate() p2.terminate() - logger.critical(exc) + if exc is not None: + logger.critical(exc) elif args.autoreload: - print(' --- AutoReload Mode: Monitoring `content`, `theme` and' - ' `settings` for changes. ---') - autoreload(watchers, args, old_static, reader_descs) + autoreload(args) elif args.listen: listen(settings.get('BIND'), settings.get('PORT'), settings.get("OUTPUT_PATH")) else: - if next(watchers['content']) is None: - logger.warning( - 'No valid files found in content for ' - + 'the active readers:\n' - + '\n'.join(reader_descs)) - - if next(watchers['theme']) is None: - logger.warning('Empty theme folder. Using `basic` theme.') - + watcher = FileSystemWatcher(args.settings, Readers, settings) + watcher.check() pelican.run() - + except KeyboardInterrupt: + logger.warning('Keyboard interrupt received. Exiting.') except Exception as e: logger.critical('%s', e) diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -734,60 +734,167 @@ def order_content(content_list, order_by='slug'): return content_list -def folder_watcher(path, extensions, ignores=[]): - '''Generator for monitoring a folder for modifications. - - Returns a boolean indicating if files are changed since last check. - Returns None if there are no matching files in the folder''' - - def file_times(path): - '''Return `mtime` for each file in path''' - - for root, dirs, files in os.walk(path, followlinks=True): - dirs[:] = [x for x in dirs if not x.startswith(os.curdir)] - - for f in files: - valid_extension = f.endswith(tuple(extensions)) - file_ignored = any( - fnmatch.fnmatch(f, ignore) for ignore in ignores - ) - if valid_extension and not file_ignored: - try: - yield os.stat(os.path.join(root, f)).st_mtime - except OSError as e: - logger.warning('Caught Exception: %s', e) - - LAST_MTIME = 0 - while True: - try: - mtime = max(file_times(path)) - if mtime > LAST_MTIME: - LAST_MTIME = mtime - yield True - except ValueError: - yield None +class FileSystemWatcher: + def __init__(self, settings_file, reader_class, settings=None): + self.watchers = { + 'settings': FileSystemWatcher.file_watcher(settings_file) + } + + self.settings = None + self.reader_class = reader_class + self._extensions = None + self._content_path = None + self._theme_path = None + self._ignore_files = None + + if settings is not None: + self.update_watchers(settings) + + def update_watchers(self, settings): + new_extensions = set(self.reader_class(settings).extensions) + new_content_path = settings.get('PATH', '') + new_theme_path = settings.get('THEME', '') + new_ignore_files = set(settings.get('IGNORE_FILES', [])) + + extensions_changed = new_extensions != self._extensions + content_changed = new_content_path != self._content_path + theme_changed = new_theme_path != self._theme_path + ignore_changed = new_ignore_files != self._ignore_files + + # Refresh content watcher if related settings changed + if extensions_changed or content_changed or ignore_changed: + self.add_watcher('content', + new_content_path, + new_extensions, + new_ignore_files) + + # Refresh theme watcher if related settings changed + if theme_changed or ignore_changed: + self.add_watcher('theme', + new_theme_path, + [''], + new_ignore_files) + + # Watch STATIC_PATHS + old_static_watchers = set(key + for key in self.watchers + if key.startswith('[static]')) + + for path in settings.get('STATIC_PATHS', []): + key = '[static]{}'.format(path) + if ignore_changed or (key not in self.watchers): + self.add_watcher( + key, + os.path.join(new_content_path, path), + [''], + new_ignore_files) + if key in old_static_watchers: + old_static_watchers.remove(key) + + # cleanup removed static watchers + for key in old_static_watchers: + del self.watchers[key] + + # update values + self.settings = settings + self._extensions = new_extensions + self._content_path = new_content_path + self._theme_path = new_theme_path + self._ignore_files = new_ignore_files + + def check(self): + '''return a key:watcher_status dict for all watchers''' + result = {key: next(watcher) for key, watcher in self.watchers.items()} + + # Various warnings + if result.get('content') is None: + reader_descs = sorted( + { + '%s (%s)' % (type(r).__name__, ', '.join(r.file_extensions)) + for r in self.reader_class(self.settings).readers.values() + if r.enabled + } + ) + logger.warning( + 'No valid files found in content for the active readers:\n' + + '\n'.join(reader_descs)) + + if result.get('theme') is None: + logger.warning('Empty theme folder. Using `basic` theme.') + + return result + + def add_watcher(self, key, path, extensions=[''], ignores=[]): + watcher = self.get_watcher(path, extensions, ignores) + if watcher is not None: + self.watchers[key] = watcher + + def get_watcher(self, path, extensions=[''], ignores=[]): + '''return a watcher depending on path type (file or folder)''' + if not os.path.exists(path): + logger.warning("Watched path does not exist: %s", path) + return None + + if os.path.isdir(path): + return self.folder_watcher(path, extensions, ignores) else: - yield False + return self.file_watcher(path) + + @staticmethod + def folder_watcher(path, extensions, ignores=[]): + '''Generator for monitoring a folder for modifications. + Returns a boolean indicating if files are changed since last check. + Returns None if there are no matching files in the folder''' -def file_watcher(path): - '''Generator for monitoring a file for modifications''' - LAST_MTIME = 0 - while True: - if path: + def file_times(path): + '''Return `mtime` for each file in path''' + + for root, dirs, files in os.walk(path, followlinks=True): + dirs[:] = [x for x in dirs if not x.startswith(os.curdir)] + + for f in files: + valid_extension = f.endswith(tuple(extensions)) + file_ignored = any( + fnmatch.fnmatch(f, ignore) for ignore in ignores + ) + if valid_extension and not file_ignored: + try: + yield os.stat(os.path.join(root, f)).st_mtime + except OSError as e: + logger.warning('Caught Exception: %s', e) + + LAST_MTIME = 0 + while True: try: - mtime = os.stat(path).st_mtime - except OSError as e: - logger.warning('Caught Exception: %s', e) - continue - - if mtime > LAST_MTIME: - LAST_MTIME = mtime - yield True + mtime = max(file_times(path)) + if mtime > LAST_MTIME: + LAST_MTIME = mtime + yield True + except ValueError: + yield None else: yield False - else: - yield None + + @staticmethod + def file_watcher(path): + '''Generator for monitoring a file for modifications''' + LAST_MTIME = 0 + while True: + if path: + try: + mtime = os.stat(path).st_mtime + except OSError as e: + logger.warning('Caught Exception: %s', e) + continue + + if mtime > LAST_MTIME: + LAST_MTIME = mtime + yield True + else: + yield False + else: + yield None def set_date_tzinfo(d, tz_name=None):
diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py --- a/pelican/tests/test_utils.py +++ b/pelican/tests/test_utils.py @@ -10,6 +10,7 @@ from pelican import utils from pelican.generators import TemplatePagesGenerator +from pelican.readers import Readers from pelican.settings import read_settings from pelican.tests.support import (LoggedTestCase, get_article, locale_available, unittest) @@ -361,47 +362,91 @@ def test_process_translations(self): self.assertNotIn(a_arts[4], b_arts[5].translations) self.assertNotIn(a_arts[5], b_arts[4].translations) - def test_watchers(self): - # Test if file changes are correctly detected - # Make sure to handle not getting any files correctly. - - dirname = os.path.join(os.path.dirname(__file__), 'content') - folder_watcher = utils.folder_watcher(dirname, ['rst']) - - path = os.path.join(dirname, 'article_with_metadata.rst') - file_watcher = utils.file_watcher(path) - - # first check returns True - self.assertEqual(next(folder_watcher), True) - self.assertEqual(next(file_watcher), True) - - # next check without modification returns False - self.assertEqual(next(folder_watcher), False) - self.assertEqual(next(file_watcher), False) - - # after modification, returns True - t = time.time() - os.utime(path, (t, t)) - self.assertEqual(next(folder_watcher), True) - self.assertEqual(next(file_watcher), True) - - # file watcher with None or empty path should return None - self.assertEqual(next(utils.file_watcher('')), None) - self.assertEqual(next(utils.file_watcher(None)), None) - - empty_path = os.path.join(os.path.dirname(__file__), 'empty') - try: - os.mkdir(empty_path) - os.mkdir(os.path.join(empty_path, "empty_folder")) - shutil.copy(__file__, empty_path) - - # if no files of interest, returns None - watcher = utils.folder_watcher(empty_path, ['rst']) - self.assertEqual(next(watcher), None) - except OSError: - self.fail("OSError Exception in test_files_changed test") - finally: - shutil.rmtree(empty_path, True) + def test_filesystemwatcher(self): + def create_file(name, content): + with open(name, 'w') as f: + f.write(content) + + # disable logger filter + from pelican.utils import logger + logger.disable_filter() + + # create a temp "project" dir + root = mkdtemp() + content_path = os.path.join(root, 'content') + static_path = os.path.join(root, 'content', 'static') + config_file = os.path.join(root, 'config.py') + theme_path = os.path.join(root, 'mytheme') + + # populate + os.mkdir(content_path) + os.mkdir(theme_path) + create_file(config_file, + 'PATH = "content"\n' + 'THEME = "mytheme"\n' + 'STATIC_PATHS = ["static"]') + + t = time.time() - 1000 # make sure it's in the "past" + os.utime(config_file, (t, t)) + settings = read_settings(config_file) + + watcher = utils.FileSystemWatcher(config_file, Readers, settings) + # should get a warning for static not not existing + self.assertLogCountEqual(1, 'Watched path does not exist: .*static') + + # create it and update config + os.mkdir(static_path) + watcher.update_watchers(settings) + # no new warning + self.assertLogCountEqual(1, 'Watched path does not exist: .*static') + + # get modified values + modified = watcher.check() + # empty theme and content should raise warnings + self.assertLogCountEqual(1, 'No valid files found in content') + self.assertLogCountEqual(1, 'Empty theme folder. Using `basic` theme') + + self.assertIsNone(modified['content']) # empty + self.assertIsNone(modified['theme']) # empty + self.assertIsNone(modified['[static]static']) # empty + self.assertTrue(modified['settings']) # modified, first time + + # add a content, add file to theme and check again + create_file(os.path.join(content_path, 'article.md'), + 'Title: test\n' + 'Date: 01-01-2020') + + create_file(os.path.join(theme_path, 'dummy'), + 'test') + + modified = watcher.check() + # no new warning + self.assertLogCountEqual(1, 'No valid files found in content') + self.assertLogCountEqual(1, 'Empty theme folder. Using `basic` theme') + + self.assertIsNone(modified['[static]static']) # empty + self.assertFalse(modified['settings']) # not modified + self.assertTrue(modified['theme']) # modified + self.assertTrue(modified['content']) # modified + + # change config, remove static path + create_file(config_file, + 'PATH = "content"\n' + 'THEME = "mytheme"\n' + 'STATIC_PATHS = []') + + settings = read_settings(config_file) + watcher.update_watchers(settings) + + modified = watcher.check() + self.assertNotIn('[static]static', modified) # should be gone + self.assertTrue(modified['settings']) # modified + self.assertFalse(modified['content']) # not modified + self.assertFalse(modified['theme']) # not modified + + # cleanup + logger.enable_filter() + shutil.rmtree(root) def test_clean_output_dir(self): retention = ()
"pelican -lr" fails on Windows Hi @justinmayer ! I just installed pelican from the [latest commit](https://github.com/getpelican/pelican/commit/8f68551454157aab139f0c59b772fc0b4c635c42) and found `pelican -lr` to not be working on Windows. Either works, on the other hand (`pelican -l` or `pelican -r`). I get the following output: ~~~ $ pelican -lrD DEBUG: Pelican version: 3.7.2.dev0 DEBUG: Python version: 3.6.2 DEBUG: Adding current directory to system path DEBUG: Temporarily adding PLUGIN_PATHS to system path DEBUG: Restoring system path CRITICAL: TypeError: can't pickle generator objects Traceback (most recent call last): File "c:\users\jonathan hale\appdata\local\programs\python\python36\lib\runpy. py", line 193, in _run_module_as_main "__main__", mod_spec) File "c:\users\jonathan hale\appdata\local\programs\python\python36\lib\runpy. py", line 85, in _run_code exec(code, run_globals) File "C:\Users\Jonathan Hale\AppData\Local\Programs\Python\Python36\Scripts\pe lican.exe\__main__.py", line 9, in <module> File "c:\users\jonathan hale\appdata\local\programs\python\python36\lib\site-p ackages\pelican\__init__.py", line 563, in main p1.start() File "c:\users\jonathan hale\appdata\local\programs\python\python36\lib\multip rocessing\process.py", line 105, in start self._popen = self._Popen(self) File "c:\users\jonathan hale\appdata\local\programs\python\python36\lib\multip rocessing\context.py", line 223, in _Popen return _default_context.get_context().Process._Popen(process_obj) File "c:\users\jonathan hale\appdata\local\programs\python\python36\lib\multip rocessing\context.py", line 322, in _Popen return Popen(process_obj) File "c:\users\jonathan hale\appdata\local\programs\python\python36\lib\multip rocessing\popen_spawn_win32.py", line 65, in __init__ reduction.dump(process_obj, to_child) File "c:\users\jonathan hale\appdata\local\programs\python\python36\lib\multip rocessing\reduction.py", line 60, in dump ForkingPickler(file, protocol).dump(obj) TypeError: can't pickle generator objects Traceback (most recent call last): File "<string>", line 1, in <module> File "c:\users\jonathan hale\appdata\local\programs\python\python36\lib\multip rocessing\spawn.py", line 99, in spawn_main new_handle = reduction.steal_handle(parent_pid, pipe_handle) File "c:\users\jonathan hale\appdata\local\programs\python\python36\lib\multip rocessing\reduction.py", line 87, in steal_handle _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) PermissionError: [WinError 5] Zugriff verweigert ~~~ ("Zugriff verweigert" means "Access denied") This is on a basic `pelican-quickstart` project, no additional plugins, content or custom theme. Though this may be important for #2162 . Cheers, Jonathan.
Hi Jonathan. I don't use Windows, so I unfortunately won't be of much assistance. Did you modify the settings file in any way? No, this is a untouched quickstart project. The error refers to "pickle", which could be related to content caching, which if memory serves should be disabled by default. You could try explicitly disabling caching, but beyond that I'm out of ideas at the moment. Disabling caching with ~~~ CACHE_CONTENT = False LOAD_CONTENT_CACHE = False ~~~ or `--ignore-cache` both did not seem to help. I will see if I can find something later today. Debugging this results in an exception in multiprocessing/spawn.py around the following code: ~~~python def _check_not_importing_main(): if getattr(process.current_process(), '_inheriting', False): raise RuntimeError(''' An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable.''') ~~~ Any ideas? (Adding that "idiom" did not fully fix the error. Now it raises the error in [p1.start()](https://github.com/getpelican/pelican/blob/master/pelican/__init__.py#L563) ) Alright, so it looks like on windows it uses pickle to serialize objects and send them to the child process. With it the arguments [passed with watchers](https://github.com/getpelican/pelican/blob/master/pelican/__init__.py#L558), which contains `folder watchers`, which are generator objects that are not pickleable, that's why it gives us the > CRITICAL: TypeError: can't pickle generator objects Now, should this be handled? ~I have no experience with pickle, it looks a bit as if I manually would serialize/deserialize every attribute... for Generator and every subclass? 🤔~ Generator objects are not related to `Generator` class in `generator.py`. Instead they are some python specific thing. In this case they probably hold a native Windows resource that can not be ~simply~ trivially serialized/deserialized. (I'm guessing on linux the serialization is handled properly, or the handle to the file watching "thing" is easily shared between processes.... ?) Alright, so my suggestion is to have a `autoreload_process` function which finds its own watchers and then calls `autoreload` rather than passing `autoreload` directly to `multiprocess.Process(target=here)`. That would omit the watchers argument and we would not need to serialize it. I sketched this out and it *would* work, but is rather ugly. Potentially better idea: ~~~python if args.autoreload and args.listen: excqueue = multiprocessing.Queue() p1 = multiprocessing.Process(target=autoreload, ...) p2 = multiprocessing.Process(target=listen, ...) p1.start() p2.start() exc = excqueue.get() p1.terminate() p2.terminate() logger.critical(exc) ~~~ This currently spawns two processes, one for listen, one for autoreload. How about having one of the two be done by the current process? Or maybe use threads instead? (Or are they not as portable? 🤔 ) For any of both I could create a pullrequest once we made a decision. The real error might be `CRITICAL: TypeError: can't pickle generator objects` due to `watchers` containing generator objects. `multiprocessing` uses `fork` on Unix systems, but on Windows it runs a new process and pickles the arguments. It looks like pickling is the real issue like @segevfiner said. I did some looking and found [this](https://web.archive.org/web/20160112050840/http://metaoptimize.com/blog/2009/12/22/why-cant-you-pickle-generators-in-python-workaround-pattern-for-saving-training-state/) workaround, and [this](https://peadrop.com/blog/2009/12/29/why-you-cannot-pickle-generators/) response. The response was written by a CPython dev who recommends refactoring the generator into an iterator if custom `__getstate__` and `__setstate__` methods can't be used. I'm new to writing iterators, but looking at `file_watcher` and `folder_watcher`, it doesn't look like it would be too difficult to refactor the generators into iterators. Another option is refactoring so that you only start the generators in the subprocess. Passing the subprocess the configuration for how to start them. This should be quite sinpler to do I think. That's a far better idea, I'll try that before trying to write iterators. I couldn't get it to work by starting the generators in the subprocess, so I tried the iterator method. It worked (ish), but it turns out that you can't pickle `dict_keys` objects either. Looks like this is gonna be a pain :/ Any news on this? @danieleteti In a comment above I mentioned I have some changes for something that could work, but it's quite a while ago, so I don't remember any details. I hope I still have those changes lying around in case someone wants me to PR those. Since back in August the interest in this issue was pretty low I opted for `pelican -Dr` and a local server in the `output/` folder instead. That works just as fine. Yes, currently I'm working in the same way (`pelican -Dr` and `python -m http.server` in the `output/` folder) but would be nice, expecially for newcomers, to have something that *just works* instead of a workaround. Do we have any progress on this? I don't know if this helps anyone else, but I have 2 scripts that contain the (pelican -Dr) and the (python -m http.server) parts build.bat and server.bat. While the extra command windows are a bit annoying, I find that "start build.bat" and "start server.bat" accomplishes what I need. Hi @jackdewinter. As with most other projects, if there were any updates regarding this issue, presumably they would be posted here already. To summarize, I don't believe any Pelican maintainers currently use Windows — and perhaps don't even have access to computers running Windows. Ergo, it's up to the community to decide whether a reasonably elegant solution can be devised. If and when that day comes, we would be happy to review said contributions. Until that time, I think some further documentation is warranted, if only to explain that adding both `--autoreload` and `--listen` to a single `pelican` invocation on Windows does not currently behave as the user might expect. I've taken the liberty of doing that, in the most succinct way possible, via: 9fdcb90 I'm a hugo user, but I prefer python, so I thought I would give this a chance. The fact that this issue is 1.5 years old and not fixed is really kind of surprising. No one has to support windows of course, but i just thought I would mention that the experience on Windows surely is discouraging many potential users. Best wishes, looks like a cool tool from the outside at least... I'm a former pelican user... for it's lack of interest in supporting windows, I'm switching to Hugo... I would to use pelican because it's python, but it's not possible. @thetanil / @danieleteti: Honestly, folks, I find your comments to be disheartening. At the very least, they are inaccurate and show a lack of understanding as how open-source projects work, and at worst, they exhibit a lack of consideration as to how such comments can hurt the feelings of those who donate, for free, their time and effort to maintain said open-source projects. Contrary to the casually-flung supposition, as far as I am aware, the experience on Windows is not discouraging potential users in significant numbers. In fact, many Windows users happily use Pelican to publish their sites, every day, without any problems whatsoever. Presumably they do so in the same way that I have personally done for years, which is to regenerate in one terminal pane and serve the output in another. This works beautifully and has been the primary way folks have used Pelican for a very long time. Then one day someone contributed a pull request that added the ability to serve output from within Pelican. That pull request was reviewed and merged. Yay. Then @Squareys discovered that combining that feature with the regenerate option on Windows resulted in an error. He not only helpfully reported it, but he spent a considerable amount of time working on a potential solution. I regret that he did not receive timely feedback from anyone on his work. Personally, at the time I had recently relocated and had other priorities to attend to. To suggest that Pelican maintainers lack interest in supporting Windows is not only false, it’s insulting. If someone submits a pull request that addresses a problem, includes tests, and meets the community’s standards, it is merged. This issue is no different in that respect. Contributions that address this issue are welcome. In short, please take care with your words. Greater attention to accuracy and empathy in this case would have spared some hurt feelings. Do any Pelican users here on Windows use the [live-reload Invoke task](https://github.com/getpelican/pelican/pull/2526)? If so, how well does it work for you? If you use Windows and haven't tried it, would you mind giving it a try and reporting the results here? @justinmayer I really tried to write that comment in the least "choosy begger" way I could. I work open source projects as well, I know how you feel. I think when we work on these projects we also get blind spots because we know how it works. I will try again this way. My experience as a new user: Find some recommendation, go to the web site, read some docs, follow the quick-start. It doesn't work. Search it up. Land here. So from my point of view, as a Windows user I did everything right and I still need to build hacky stuff myself to make it work. That's fine, it's open source, I could fix it myself, I understand that. I just thought maybe it's a blind spot and I should at least mention it since the issue is so old. Just to be clear and perhaps more irritating, I am not even suggesting a technical change. If there was just some advice on the [install docs](https://docs.getpelican.com/en/stable/install.html) *Windows Users:* - Don't use `make devserver` - Don't use pelican -lr - instead do x/y And I have to say, even after reading through this issue, as a new user I am unsure what x/y should be. I see there are a couple workarounds, but I do not know what is correct or best. Now _those_ are useful and actionable suggestions. I agree that should be made clearer. Let's collectively figure out a way to do that. Since I personally use Invoke rather than Make, I didn't even realize that `pelican -lr` was being called when users run [`make devserver`](https://github.com/getpelican/pelican/blob/6f0743b340cfaa98ced38fc41727c8ea9f1a52f8/pelican/tools/templates/Makefile.jinja2#L110-L115). Windows users who use Make should indeed be warned that task will not behave properly. As for what Windows users _should_ do, I think the first step is determining whether the aforementioned Invoke `livereload` task works properly on Windows. Anyone care to try and report back? `invoke livereload` is working for me with Windows 10, Python 3.7, Pelican 4.2.0, livereload 2.6.1 and the output of a fresh pelican-quickstart. (I did get tripped up for a bit on md/rst files in `content/` not triggering the refresh- #2604. https://github.com/lepture/python-livereload/pull/203 is still open, but there are workarounds in that issue. I just removed the wildcard from my local `tasks.py` for now since I don't use subdirectories in `content/`) i tried notes: - no invoke command. not in pelican docs. found it and installed - no livereload module. not in docs. found it installed. - then I got a traceback not implemented. i think it's cause my content folder is messed up locally - ran out of time. this is just fyi https://docs.getpelican.com/en/stable/publish.html#invoke @Squareys: Could you possibly take a look and see whether you still have the changes you worked on related to this issue? They might come in handy. Thanks, Jonathan!
2020-05-09T13:30:40Z
[]
[]
getpelican/pelican
2,753
getpelican__pelican-2753
[ "2596" ]
e6df35330205aa6bdbe56cd40e641fab4958af8c
diff --git a/pelican/log.py b/pelican/log.py --- a/pelican/log.py +++ b/pelican/log.py @@ -2,6 +2,7 @@ import os import sys from collections import defaultdict +from collections.abc import Mapping __all__ = [ 'init' @@ -18,9 +19,10 @@ def format(self, record): record.__dict__['customlevelname'] = customlevel # format multiline messages 'nicely' to make it clear they are together record.msg = record.msg.replace('\n', '\n | ') - record.args = tuple(arg.replace('\n', '\n | ') if - isinstance(arg, str) else - arg for arg in record.args) + if not isinstance(record.args, Mapping): + record.args = tuple(arg.replace('\n', '\n | ') if + isinstance(arg, str) else + arg for arg in record.args) return super().format(record) def formatException(self, ei):
diff --git a/pelican/tests/support.py b/pelican/tests/support.py --- a/pelican/tests/support.py +++ b/pelican/tests/support.py @@ -195,6 +195,15 @@ def count_logs(self, msg=None, level=None): (level is None or l.levelno == level) ]) + def count_formatted_logs(self, msg=None, level=None): + return len([ + l + for l + in self.buffer + if (msg is None or re.search(msg, self.format(l))) and + (level is None or l.levelno == level) + ]) + class LoggedTestCase(unittest.TestCase): """A test case that captures log messages.""" diff --git a/pelican/tests/test_log.py b/pelican/tests/test_log.py --- a/pelican/tests/test_log.py +++ b/pelican/tests/test_log.py @@ -1,6 +1,7 @@ import logging import unittest from collections import defaultdict +from contextlib import contextmanager from pelican import log from pelican.tests.support import LogCountHandler @@ -11,6 +12,7 @@ def setUp(self): super().setUp() self.logger = logging.getLogger(__name__) self.handler = LogCountHandler() + self.handler.setFormatter(log.get_formatter()) self.logger.addHandler(self.handler) def tearDown(self): @@ -24,55 +26,107 @@ def _reset_limit_filter(self): log.LimitFilter._threshold = 5 log.LimitFilter._group_count = defaultdict(int) + @contextmanager + def reset_logger(self): + try: + yield None + finally: + self._reset_limit_filter() + self.handler.flush() + + def test_log_formatter(self): + counter = self.handler.count_formatted_logs + with self.reset_logger(): + # log simple case + self.logger.warning('Log %s', 'test') + self.assertEqual( + counter('Log test', logging.WARNING), + 1) + + with self.reset_logger(): + # log multiline message + self.logger.warning('Log\n%s', 'test') + # Log + # | test + self.assertEqual( + counter('Log', logging.WARNING), + 1) + self.assertEqual( + counter(' | test', logging.WARNING), + 1) + + with self.reset_logger(): + # log multiline argument + self.logger.warning('Log %s', 'test1\ntest2') + # Log test1 + # | test2 + self.assertEqual( + counter('Log test1', logging.WARNING), + 1) + self.assertEqual( + counter(' | test2', logging.WARNING), + 1) + + with self.reset_logger(): + # log single list + self.logger.warning('Log %s', ['foo', 'bar']) + self.assertEqual( + counter(r"Log \['foo', 'bar'\]", logging.WARNING), + 1) + + with self.reset_logger(): + # log single dict + self.logger.warning('Log %s', {'foo': 1, 'bar': 2}) + self.assertEqual( + # dict order is not guaranteed + counter(r"Log {'.*': \d, '.*': \d}", logging.WARNING), + 1) + def test_log_filter(self): def do_logging(): for i in range(5): self.logger.warning('Log %s', i) self.logger.warning('Another log %s', i) # no filter - do_logging() - self.assertEqual( - self.handler.count_logs('Log \\d', logging.WARNING), - 5) - self.assertEqual( - self.handler.count_logs('Another log \\d', logging.WARNING), - 5) - self.handler.flush() - self._reset_limit_filter() + with self.reset_logger(): + do_logging() + self.assertEqual( + self.handler.count_logs('Log \\d', logging.WARNING), + 5) + self.assertEqual( + self.handler.count_logs('Another log \\d', logging.WARNING), + 5) # filter by template - log.LimitFilter._ignore.add((logging.WARNING, 'Log %s')) - do_logging() - self.assertEqual( - self.handler.count_logs('Log \\d', logging.WARNING), - 0) - self.assertEqual( - self.handler.count_logs('Another log \\d', logging.WARNING), - 5) - self.handler.flush() - self._reset_limit_filter() + with self.reset_logger(): + log.LimitFilter._ignore.add((logging.WARNING, 'Log %s')) + do_logging() + self.assertEqual( + self.handler.count_logs('Log \\d', logging.WARNING), + 0) + self.assertEqual( + self.handler.count_logs('Another log \\d', logging.WARNING), + 5) # filter by exact message - log.LimitFilter._ignore.add((logging.WARNING, 'Log 3')) - do_logging() - self.assertEqual( - self.handler.count_logs('Log \\d', logging.WARNING), - 4) - self.assertEqual( - self.handler.count_logs('Another log \\d', logging.WARNING), - 5) - self.handler.flush() - self._reset_limit_filter() + with self.reset_logger(): + log.LimitFilter._ignore.add((logging.WARNING, 'Log 3')) + do_logging() + self.assertEqual( + self.handler.count_logs('Log \\d', logging.WARNING), + 4) + self.assertEqual( + self.handler.count_logs('Another log \\d', logging.WARNING), + 5) # filter by both - log.LimitFilter._ignore.add((logging.WARNING, 'Log 3')) - log.LimitFilter._ignore.add((logging.WARNING, 'Another log %s')) - do_logging() - self.assertEqual( - self.handler.count_logs('Log \\d', logging.WARNING), - 4) - self.assertEqual( - self.handler.count_logs('Another log \\d', logging.WARNING), - 0) - self.handler.flush() - self._reset_limit_filter() + with self.reset_logger(): + log.LimitFilter._ignore.add((logging.WARNING, 'Log 3')) + log.LimitFilter._ignore.add((logging.WARNING, 'Another log %s')) + do_logging() + self.assertEqual( + self.handler.count_logs('Log \\d', logging.WARNING), + 4) + self.assertEqual( + self.handler.count_logs('Another log \\d', logging.WARNING), + 0)
fix log formatting of iterable objects If logged object is a dictionary (or any other iterable object), 1 argument is extected to a number of items in the object by `BaseFormatter` in attempt to prettify a message. This would result in a invalid message format with unexpected numbers of arguments. ``` import logging logger = logging.getLogger(__name__) logger.debug('my dict: %s', {'here': 'is', 'my': 'dict'}) ``` Which results in the following error: ``` Traceback (most recent call last): File "/usr/lib/python2.7/logging/__init__.py", line 868, in emit msg = self.format(record) File "/usr/lib/python2.7/logging/__init__.py", line 741, in format return fmt.format(record) File "/usr/local/lib/python2.7/dist-packages/pelican/log.py", line 34, in format return super(BaseFormatter, self).format(record) File "/usr/lib/python2.7/logging/__init__.py", line 465, in format record.message = record.getMessage() File "/usr/lib/python2.7/logging/__init__.py", line 329, in getMessage msg = msg % self.args TypeError: not all arguments converted during string formatting ``` introduce by https://github.com/getpelican/pelican/commit/dd76c7158f7e05b0d203818d3fe18bea26e48c3f in #2438 "Solution": try to prettify arguments only if it's a tuple: `logger.debug('my message: %s and %s', 'foo', 'bar')` This requires review and thourugh testing.
Fair enough, but this won't be an issue for "any iterable", [at best any `Mapping` as the only argument](https://github.com/python/cpython/blob/3.7/Lib/logging/__init__.py#L291-L310). `logging` singles out `Mapping`s because string formatting handles it specially.
2020-05-09T17:05:53Z
[]
[]
getpelican/pelican
2,818
getpelican__pelican-2818
[ "2817" ]
e4d9c41a77b45cb7ff6b8d1732623990adf65931
diff --git a/pelican/plugins/_utils.py b/pelican/plugins/_utils.py --- a/pelican/plugins/_utils.py +++ b/pelican/plugins/_utils.py @@ -53,6 +53,9 @@ def load_legacy_plugin(plugin, plugin_paths): if spec is None: raise ImportError('Cannot import plugin `{}`'.format(plugin)) else: + # Avoid loading the same plugin twice + if spec.name in sys.modules: + return sys.modules[spec.name] # create module object from spec mod = importlib.util.module_from_spec(spec) # place it into sys.modules cache
diff --git a/pelican/tests/test_plugins.py b/pelican/tests/test_plugins.py --- a/pelican/tests/test_plugins.py +++ b/pelican/tests/test_plugins.py @@ -131,6 +131,17 @@ def get_plugin_names(plugins): 'normal subpackage plugin'}, get_plugin_names(plugins)) + # ensure normal plugins are loaded only once + SETTINGS = { + 'PLUGINS': ['normal_plugin'], + 'PLUGIN_PATHS': [self._NORMAL_PLUGIN_FOLDER], + } + plugins = load_plugins(SETTINGS) + for plugin in load_plugins(SETTINGS): + # The second load_plugins() should return the same plugin + # objects as the first one + self.assertIn(plugin, plugins) + # namespace plugin short SETTINGS = { 'PLUGINS': ['ns_plugin']
Plugins run twice in autoreload mode <!-- Hi there! Thank you for discovering and submitting an issue. Before you submit this, let’s make sure of a few things. Please make sure the following boxes are ticked if they are correct. If not, please try and fulfill them first. --> <!-- Checked checkbox should look like this: [x] --> - [x] I have read the [Filing Issues](https://docs.getpelican.com/en/latest/contribute.html#filing-issues) and subsequent “How to Get Help” sections of the documentation. - [x] I have searched the [issues](https://github.com/getpelican/pelican/issues?q=is%3Aissue) (including closed ones) and believe that this is not a duplicate. <!-- Once the above boxes are checked, if you are able to fill in the following list with your information, it would be very helpful for maintainers. --> - **OS version and name**: Ubuntu 18.04.5 LTS (WSL) - **Python version**: 3.8.6 - **Pelican version**: e4d9c41a ## To reproduce Create these files: ``` . ├── content │ └── 1.rst ├── pelicanconf.py └── plugins └── test_plugin.py ``` ```rst .. content/1.rst #### test #### :date: 1970-01-01 :modified: 1970-01-01 TEST ``` ```python # pelicanconf.py PLUGINS = [ 'test_plugin', ] PLUGIN_PATHS = [ 'plugins', ] ``` ```python # plugins/test_plugin.py import logging from pelican import signals logger = logging.getLogger(__name__) def test_function(content): logger.info('test plugin loaded') test = content._content test += 'TEST' content._content = test logger.info(content._content) def register(): signals.content_object_init.connect(test_function) ``` Then run: ```console $ pelican -rD --logs-dedup-min-level DEBUG content [Not showing unrelated logs] -> test plugin loaded -> <p>TEST</p> | TEST -> test plugin loaded -> <p>TEST</p> | TESTTEST $ pelican -D --logs-dedup-min-level DEBUG content [Not showing unrelated logs] -> test plugin loaded -> <p>TEST</p> | TEST ``` The plugin runs twice with `-r`, but only once without `-r`. ## The casue of the issue After a bisect, I found that commit ed1eca16 introduced this issue. ```console $ git checkout ed1eca16^ $ pelican -rD --logs-dedup-min-level DEBUG content [Not showing unrelated logs] -> test plugin loaded -> <p>TEST</p> | TEST $ git checkout ed1eca16 $ pelican -rD --logs-dedup-min-level DEBUG content [Not showing unrelated logs] -> test plugin loaded -> <p>TEST</p> | TEST -> test plugin loaded -> <p>TEST</p> | TESTTEST ``` After adding this line, I found that the plugin was registered twice: ```diff diff --git a/pelican/contents.py b/pelican/contents.py index 594cd3b5..ff991be9 100644 --- a/pelican/contents.py +++ b/pelican/contents.py @@ -139,6 +139,7 @@ class Content(object): if 'summary' in metadata: self._summary = metadata['summary'] + logger.info(str(signals.content_object_init.receivers)) signals.content_object_init.send(self) def __str__(self): ``` ```console $ pelican -rD --logs-dedup-min-level DEBUG content [Not showing unrelated logs] -> {140542259499216: <weakref at 0x7fd28a2b8b80; to 'function' at 0x7fd28b7410d0 (test_function)>, 140542259600400: <weakref at 0x7fd28b7525e0; to 'function' at 0x7fd28b759c10 (test_function)>} -> test plugin loaded -> <p>TEST</p> | TEST -> test plugin loaded -> <p>TEST</p> | TESTTEST ``` But why? The reason: ```python >>> # Implementation of ed1eca16^ >>> import sys >>> sys.path.insert(0, 'plugins') >>> plugin = __import__('test_plugin', globals(), locals(), str('module')) >>> plugin_ = __import__('test_plugin', globals(), locals(), str('module')) >>> plugin is plugin_ True >>> >>> # Implementation of ed1eca16 >>> import importlib >>> spec = importlib.machinery.PathFinder.find_spec('test_plugin', ['plugins']) >>> plugin = importlib.util.module_from_spec(spec) >>> plugin_ = importlib.util.module_from_spec(spec) >>> plugin is plugin_ False ``` ## How to fix To fix it, simply avoid loading the same plugin twice: ```diff diff --git a/pelican/plugins/_utils.py b/pelican/plugins/_utils.py index 4e6ec3c5..699192d3 100644 --- a/pelican/plugins/_utils.py +++ b/pelican/plugins/_utils.py @@ -53,6 +53,11 @@ def load_legacy_plugin(plugin, plugin_paths): if spec is None: raise ImportError('Cannot import plugin `{}`'.format(plugin)) else: + # Avoid loading the same plugin twice + try: + return sys.modules[spec.name] + except KeyError: + pass # create module object from spec mod = importlib.util.module_from_spec(spec) # place it into sys.modules cache ``` I will create a pull request after writing some tests for it.
Thanks. I would expect old module to be deleted/garbage collected since it is overridden in the `sys.modules`. Returning module if it is already present makes sense, but `try/except` is a bit unnecessary. A simple `if` would do: ```python if spec.name in sys.modules: return sys.modules[spec.name] ``` > Thanks. I would expect old module to be deleted/garbage collected since it is overridden in the `sys.modules`. Returning module if it is already present makes sense, but `try/except` is a bit unnecessary. A simple `if` would do: > > ```python > if spec.name in sys.modules: > return sys.modules[spec.name] > ``` I used `try/except` to make it consistent with: https://github.com/getpelican/pelican/blob/e4d9c41a77b45cb7ff6b8d1732623990adf65931/pelican/plugins/_utils.py#L65-L69 I will use `if` instead in my commit.
2020-10-31T12:45:43Z
[]
[]
getpelican/pelican
2,926
getpelican__pelican-2926
[ "2825" ]
f862d64b7a492f2af24e1228790a1d75371a1112
diff --git a/pelican/readers.py b/pelican/readers.py --- a/pelican/readers.py +++ b/pelican/readers.py @@ -571,8 +571,9 @@ def read_file(self, base_path, path, content_class=Page, fmt=None, content, reader_metadata = self.get_cached_data(path, (None, None)) if content is None: content, reader_metadata = reader.read(path) + reader_metadata = _filter_discardable_metadata(reader_metadata) self.cache_data(path, (content, reader_metadata)) - metadata.update(_filter_discardable_metadata(reader_metadata)) + metadata.update(reader_metadata) if content: # find images with empty alt
diff --git a/pelican/tests/content/article_with_markdown_and_empty_tags.md b/pelican/tests/content/article_with_markdown_and_empty_tags.md new file mode 100644 --- /dev/null +++ b/pelican/tests/content/article_with_markdown_and_empty_tags.md @@ -0,0 +1,4 @@ +Title: Article with markdown and empty tags +Tags: + +This is some content. diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py --- a/pelican/tests/test_generators.py +++ b/pelican/tests/test_generators.py @@ -265,6 +265,8 @@ def test_generate_context(self): ['This is a super article !', 'published', 'yeah', 'article'], ['This is a super article !', 'published', 'Default', 'article'], ['Article with an inline SVG', 'published', 'Default', 'article'], + ['Article with markdown and empty tags', 'published', 'Default', + 'article'], ['This is an article with category !', 'published', 'yeah', 'article'], ['This is an article with multiple authors!', 'published', @@ -569,6 +571,7 @@ def test_article_order_by(self): 'Article title', 'Article with Nonconformant HTML meta tags', 'Article with an inline SVG', + 'Article with markdown and empty tags', 'Article with markdown and nested summary metadata', 'Article with markdown and summary metadata multi', 'Article with markdown and summary metadata single', diff --git a/pelican/tests/test_readers.py b/pelican/tests/test_readers.py --- a/pelican/tests/test_readers.py +++ b/pelican/tests/test_readers.py @@ -18,6 +18,7 @@ class ReaderTest(unittest.TestCase): def read_file(self, path, **kwargs): # Isolate from future API changes to readers.read_file + r = readers.Readers(settings=get_settings(**kwargs)) return r.read_file(base_path=CONTENT_PATH, path=path) @@ -795,6 +796,23 @@ def test_typogrify_dashes_config(self): self.assertEqual(page.content, expected) self.assertEqual(page.title, expected_title) + def test_metadata_has_no_discarded_data(self): + md_filename = 'article_with_markdown_and_empty_tags.md' + + r = readers.Readers(cache_name='cache', settings=get_settings( + CACHE_CONTENT=True)) + page = r.read_file(base_path=CONTENT_PATH, path=md_filename) + + __, cached_metadata = r.get_cached_data( + _path(md_filename), (None, None)) + + expected = { + 'title': 'Article with markdown and empty tags' + } + self.assertEqual(cached_metadata, expected) + self.assertNotIn('tags', page.metadata) + self.assertDictHasSubset(page.metadata, expected) + class HTMLReaderTest(ReaderTest): def test_article_with_comments(self):
Reader level cache results in TypeError with empty metadata in Markdown - [x] I have read the [Filing Issues](https://docs.getpelican.com/en/latest/contribute.html#filing-issues) and subsequent “How to Get Help” sections of the documentation. - [x] I have searched the [issues](https://github.com/getpelican/pelican/issues?q=is%3Aissue) (including closed ones) and believe that this is not a duplicate. <!-- Once the above boxes are checked, if you are able to fill in the following list with your information, it would be very helpful for maintainers. --> - **Python version**: 3.8.2 - **Pelican version**: 4.5.3 ## Issue Metadata that should be discarded ([`_DISCARD`](https://github.com/getpelican/pelican/blob/8033162ba4393db60791b201fb100d1be0f04431/pelican/readers.py#L27-L31)) is stored in the cache and upon retrieval new `object()` instance is created and [fails filtration](https://github.com/getpelican/pelican/blob/8033162ba4393db60791b201fb100d1be0f04431/pelican/readers.py#L575). Relevant information for reproducing is below. Note the empty `Tag:` metadata. First run is without cache and succeeds. Second run retrieves from cache and errors due to not being filtered. ```console $ cat content/article.md Title: asd Tags: Content here $ cat pelicanconf.py DEFAULT_DATE = 'fs' CACHE_CONTENT = True LOAD_CONTENT_CACHE = True CONTENT_CACHING_LAYER= 'reader' $ pelican content -s pelicanconf.py WARNING: Feeds generated without SITEURL set properly may not be valid WARNING: No timezone information specified in the settings. Assuming your timezone is UTC for feed generation. Check https://docs.getpelican.com/en/latest/settings.html#TIMEZONE for more information WARNING: Watched path does not exist: /home/avaris/ws/test/content/images Done: Processed 1 article, 0 drafts, 0 pages, 0 hidden pages and 0 draft pages in 0.11 seconds. $ pelican content -s pelicanconf.py WARNING: Feeds generated without SITEURL set properly may not be valid WARNING: No timezone information specified in the settings. Assuming your timezone is UTC for feed generation. Check https://docs.getpelican.com/en/latest/settings.html#TIMEZONE for more information WARNING: Watched path does not exist: /home/avaris/ws/test/content/images CRITICAL: 'object' object is not iterable $ pelican content -s pelicanconf.py -D DEBUG: Pelican version: 4.5.3 DEBUG: Python version: 3.8.2 [snip] CRITICAL: 'object' object is not iterable Traceback (most recent call last): File "/home/avaris/.venvs/ptest/bin/pelican", line 8, in <module> sys.exit(main()) File "/home/avaris/.venvs/ptest/lib/python3.8/site-packages/pelican/__init__.py", line 523, in main pelican.run() File "/home/avaris/.venvs/ptest/lib/python3.8/site-packages/pelican/__init__.py", line 109, in run p.generate_context() File "/home/avaris/.venvs/ptest/lib/python3.8/site-packages/pelican/generators.py", line 659, in generate_context for tag in article.tags: TypeError: 'object' object is not iterable ```
Hi @avaris, is this fix still wanted? I was looking into the [code](https://github.com/getpelican/pelican/blob/4.7.0/pelican/readers.py#L571-L575) and it seems fairly easy to fix it. From: ```python content, reader_metadata = self.get_cached_data(path, (None, None)) if content is None: content, reader_metadata = reader.read(path) self.cache_data(path, (content, reader_metadata)) metadata.update(_filter_discardable_metadata(reader_metadata)) ``` To: ```python content, reader_metadata = self.get_cached_data(path, (None, None)) if content is None: content, reader_metadata = reader.read(path) reader_metadata = _filter_discardable_metadata(reader_metadata) # discard before caching self.cache_data(path, (content, reader_metadata)) metadata.update(reader_metadata) ``` I was wondering it's ok for me to submit a PR for it + unit tests covering that scenario. Hi @jonasborges, yes this is still needed and you are more than welcome to submit a PR for it :). Thanks. Your suggested change is correct.
2021-10-01T23:27:53Z
[]
[]
getpelican/pelican
2,940
getpelican__pelican-2940
[ "2938" ]
fe19f1abb661a666af9fc194d02b86a7bb37252d
diff --git a/pelican/__init__.py b/pelican/__init__.py --- a/pelican/__init__.py +++ b/pelican/__init__.py @@ -1,4 +1,5 @@ import argparse +import json import logging import multiprocessing import os @@ -24,7 +25,7 @@ from pelican.plugins._utils import get_plugin_name, load_plugins from pelican.readers import Readers from pelican.server import ComplexHTTPRequestHandler, RootedHTTPServer -from pelican.settings import coerce_overrides, read_settings +from pelican.settings import read_settings from pelican.utils import (FileSystemWatcher, clean_output_dir, maybe_pluralize) from pelican.writers import Writer @@ -259,16 +260,29 @@ def __call__(self, parser, namespace, values, option_string): parser.exit() -class ParseDict(argparse.Action): +class ParseOverrides(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): - d = {} - if values: - for item in values: - split_items = item.split("=", 1) - key = split_items[0].strip() - value = split_items[1].strip() - d[key] = value - setattr(namespace, self.dest, d) + overrides = {} + for item in values: + try: + k, v = item.split("=", 1) + except ValueError: + raise ValueError( + 'Extra settings must be specified as KEY=VALUE pairs ' + f'but you specified {item}' + ) + try: + overrides[k] = json.loads(v) + except json.decoder.JSONDecodeError: + raise ValueError( + f'Invalid JSON value: {v}. ' + 'Values specified via -e / --extra-settings flags ' + 'must be in JSON notation. ' + 'Use -e KEY=\'"string"\' to specify a string value; ' + '-e KEY=null to specify None; ' + '-e KEY=false (or true) to specify False (or True).' + ) + setattr(namespace, self.dest, overrides) def parse_arguments(argv=None): @@ -366,13 +380,13 @@ def parse_arguments(argv=None): parser.add_argument('-e', '--extra-settings', dest='overrides', help='Specify one or more SETTING=VALUE pairs to ' - 'override settings. If VALUE contains spaces, ' - 'add quotes: SETTING="VALUE". Values other than ' - 'integers and strings can be specified via JSON ' - 'notation. (e.g., SETTING=none)', + 'override settings. VALUE must be in JSON notation: ' + 'specify string values as SETTING=\'"some string"\'; ' + 'booleans as SETTING=true or SETTING=false; ' + 'None as SETTING=null.', nargs='*', - action=ParseDict - ) + action=ParseOverrides, + default={}) args = parser.parse_args(argv) @@ -385,6 +399,8 @@ def parse_arguments(argv=None): def get_config(args): + """Builds a config dictionary based on supplied `args`. + """ config = {} if args.path: config['PATH'] = os.path.abspath(os.path.expanduser(args.path)) @@ -409,7 +425,7 @@ def get_config(args): if args.bind is not None: config['BIND'] = args.bind config['DEBUG'] = args.verbosity == logging.DEBUG - config.update(coerce_overrides(args.overrides)) + config.update(args.overrides) return config diff --git a/pelican/settings.py b/pelican/settings.py --- a/pelican/settings.py +++ b/pelican/settings.py @@ -1,7 +1,6 @@ import copy import importlib.util import inspect -import json import locale import logging import os @@ -659,25 +658,3 @@ def configure_settings(settings): continue # setting not specified, nothing to do return settings - - -def coerce_overrides(overrides): - if overrides is None: - return {} - coerced = {} - types_to_cast = {int, str, bool} - for k, v in overrides.items(): - if k not in DEFAULT_CONFIG: - logger.warning('Override for unknown setting %s, ignoring', k) - continue - setting_type = type(DEFAULT_CONFIG[k]) - if setting_type not in types_to_cast: - coerced[k] = json.loads(v) - else: - try: - coerced[k] = setting_type(v) - except ValueError: - logger.debug('ValueError for %s override with %s, try to ' - 'load as json', k, v) - coerced[k] = json.loads(v) - return coerced
diff --git a/pelican/tests/test_cli.py b/pelican/tests/test_cli.py new file mode 100644 --- /dev/null +++ b/pelican/tests/test_cli.py @@ -0,0 +1,72 @@ +import unittest + +from pelican import get_config, parse_arguments + + +class TestParseOverrides(unittest.TestCase): + def test_flags(self): + for flag in ['-e', '--extra-settings']: + args = parse_arguments([flag, 'k=1']) + self.assertDictEqual(args.overrides, {'k': 1}) + + def test_parse_multiple_items(self): + args = parse_arguments('-e k1=1 k2=2'.split()) + self.assertDictEqual(args.overrides, {'k1': 1, 'k2': 2}) + + def test_parse_valid_json(self): + json_values_python_values_map = { + '""': '', + 'null': None, + '"string"': 'string', + '["foo", 12, "4", {}]': ['foo', 12, '4', {}] + } + for k, v in json_values_python_values_map.items(): + args = parse_arguments(['-e', 'k=' + k]) + self.assertDictEqual(args.overrides, {'k': v}) + + def test_parse_invalid_syntax(self): + invalid_items = ['k= 1', 'k =1', 'k', 'k v'] + for item in invalid_items: + with self.assertRaises(ValueError): + parse_arguments(f'-e {item}'.split()) + + def test_parse_invalid_json(self): + invalid_json = { + '', 'False', 'True', 'None', 'some other string', + '{"foo": bar}', '[foo]' + } + for v in invalid_json: + with self.assertRaises(ValueError): + parse_arguments(['-e ', 'k=' + v]) + + +class TestGetConfigFromArgs(unittest.TestCase): + def test_overrides_known_keys(self): + args = parse_arguments([ + '-e', + 'DELETE_OUTPUT_DIRECTORY=false', + 'OUTPUT_RETENTION=["1.txt"]', + 'SITENAME="Title"' + ]) + config = get_config(args) + config_must_contain = { + 'DELETE_OUTPUT_DIRECTORY': False, + 'OUTPUT_RETENTION': ['1.txt'], + 'SITENAME': 'Title' + } + self.assertDictEqual(config, {**config, **config_must_contain}) + + def test_overrides_non_default_type(self): + args = parse_arguments([ + '-e', + 'DISPLAY_PAGES_ON_MENU=123', + 'PAGE_TRANSLATION_ID=null', + 'TRANSLATION_FEED_RSS_URL="someurl"' + ]) + config = get_config(args) + config_must_contain = { + 'DISPLAY_PAGES_ON_MENU': 123, + 'PAGE_TRANSLATION_ID': None, + 'TRANSLATION_FEED_RSS_URL': 'someurl' + } + self.assertDictEqual(config, {**config, **config_must_contain}) diff --git a/pelican/tests/test_settings.py b/pelican/tests/test_settings.py --- a/pelican/tests/test_settings.py +++ b/pelican/tests/test_settings.py @@ -7,7 +7,7 @@ from pelican.settings import (DEFAULT_CONFIG, DEFAULT_THEME, _printf_s_to_format_field, - coerce_overrides, configure_settings, + configure_settings, handle_deprecated_settings, read_settings) from pelican.tests.support import unittest @@ -304,18 +304,3 @@ def test_deprecated_slug_substitutions_from_file(self): [(r'C\+\+', 'cpp')] + self.settings['SLUG_REGEX_SUBSTITUTIONS']) self.assertNotIn('SLUG_SUBSTITUTIONS', settings) - - def test_coerce_overrides(self): - overrides = coerce_overrides({ - 'ARTICLE_EXCLUDES': '["testexcl"]', - 'READERS': '{"foo": "bar"}', - 'STATIC_EXCLUDE_SOURCES': 'true', - 'THEME_STATIC_DIR': 'theme', - }) - expected = { - 'ARTICLE_EXCLUDES': ["testexcl"], - 'READERS': {"foo": "bar"}, - 'STATIC_EXCLUDE_SOURCES': True, - 'THEME_STATIC_DIR': 'theme', - } - self.assertDictEqual(overrides, expected)
Booleans specified with -e / --extra-settings do not evaluate to False <!-- Hi there! Thank you for discovering and submitting an issue. Before you submit this, let’s make sure of a few things. Please make sure the following boxes are ticked if they are correct. If not, please try and fulfill them first. --> <!-- Checked checkbox should look like this: [x] --> - [x] I have read the [Filing Issues](https://docs.getpelican.com/en/latest/contribute.html#filing-issues) and subsequent “How to Get Help” sections of the documentation. - [x] I have searched the [issues](https://github.com/getpelican/pelican/issues?q=is%3Aissue) (including closed ones) and believe that this is not a duplicate. <!-- Once the above boxes are checked, if you are able to fill in the following list with your information, it would be very helpful for maintainers. --> - **OS version and name**: macOS Mojave - **Python version**: 3.9.7 - **Pelican version**: 4.7.1 - **Link to theme**: default produced by `pelican-quickstart` - **Links to plugins**: default produced by `pelican-quickstart` - **Link to your site**: – - **Link to your source**: https://github.com/r4victor/pelican/tree/boolean_overrides_demo/website - **Link to a [Gist](https://gist.github.com/) with the contents of your settings file**: https://github.com/r4victor/pelican/blob/boolean_overrides_demo/website/pelicanconf.py ## Issue 1. Pick any boolean settings parameter that is set to `True` by default or in `pelicanconf.py`. 2. Try to override it to `False` with `-e / --extra-settings`. 3. Observe that parameter is still `True`. For example, I set `DELETE_OUTPUT_DIRECTORY = True` in `pelicanconf.py` and then run: ``` pelican --debug -e DELETE_OUTPUT_DIRECTORY=false ``` I see how the old files in the `output` directory are deleted. Here's the debug log: https://github.com/r4victor/pelican/blob/boolean_overrides_demo/website/debug.txt Setting `-e param=False` or `-e param=0` doesn't work as well. The only way is to set the parameter to an empty string like `-e param=`. This is definitely not what users are supposed to do according to the docs (https://docs.getpelican.com/en/latest/settings.html#settings). It seems the problem was introduced when fixing issue #2789 and originates in the [`coerce_overrides()`](https://github.com/getpelican/pelican/blob/bb10d286a6ac2f283d332f7e80c5655b9997f827/pelican/settings.py#L664) function in `settings.py`: ```python def coerce_overrides(overrides): if overrides is None: return {} coerced = {} types_to_cast = {int, str, bool} for k, v in overrides.items(): if k not in DEFAULT_CONFIG: logger.warning('Override for unknown setting %s, ignoring', k) continue setting_type = type(DEFAULT_CONFIG[k]) if setting_type not in types_to_cast: coerced[k] = json.loads(v) else: try: coerced[k] = setting_type(v) except ValueError: logger.debug('ValueError for %s override with %s, try to ' 'load as json', k, v) coerced[k] = json.loads(v) return coerced ``` So, when `types_to_cast` is `bool`, setting `-e k=v` results in `coerced[k] = bool(v)`, but in Python `bool(v)` is True for any string except for the empty string. `bool("false")` is `True`. The solution would be to evaluate `"false", "False", "0", ""` (something else?) to `False`, and everything else to `True`.
Hi Victor. Many thanks for reporting this problem and posting such a detailed analysis. Much appreciated! 👏 Since you seem to have a good handle on a potential solution, would you consider submitting a PR that addresses the problem, preferably including first a commit with a (failing) test and then another that fixes it? That would _really_ help us out! cc: @jwodder @sabaini Hi, Justin. Sure, I'll work on it. I now realized that `coerce_overrides()` and thus `-e` / `--extra-settings` are completely broken. It's not just booleans. To parse a value of some parameter, `coerce_overrides()` does `coerced[k] = json.loads(v)`. But to avoid parsing string parameters, it also looks at the type of the parameter's default value and so does `coerced[k] = str(v)` if it's str. (It does the same for `int`, and for `bool` before my PR for no good reason). The problem with this is that in Pelican a settings parameter can take values of different Python types. For instance, `PAGE_TRANSLATION_ID` is `'slug'` by default but can be `None` or `False`. Many other parameters are nullable strings. `DEFAULT_PAGINATION` can be `False` or int. All those parameters are parsed incorrectly. With default `PAGINATED_TEMPLATES` run `pelican -e DEFAULT_PAGINATION=123`. You'll get one blog entry per page because Pelican sees that `DEFAULT_PAGINATION` is `False` in defaults and parses `123` as `True`. Then `Paginator(... per_page=True)` works as `Paginator(... per_page=1)`, but that's another story. The same for strings and so on. My PR doesn't affect this behaviour. Another observation is that though we allowed non-json booleans like `False` and `True` for boolean types, it's still not possible to do `pelican -e JINJA_ENVIRONMENT='{"trim_blocks": True}'`. You'll get `JSONDecodeError`. I see two possible solutions: 1. Associate each parameter with a type (maybe a compound type). Then parse each type accordingly. 2. Give up on custom values like `True` and `False` and rely completely on `json.loads()`. Show users a useful error message on `JSONDecodeError`. I'm 100% for the second solution since it's much simpler and predictable. I wouldn't be afraid of breaking code that relies on `True` and `False` since it was so broken anyway. I should note that relying on `json.loads()` will force users to quote string parameters like so: `pelican -e k='"some string"'`. We'll parse `'null'` as `None` and `'"some string"'` as `'some string'`, but parsing `'some string'` will throw an error. If you want to accept `'some string'` literally, you need to handle `'null'`, and this again means you need to introduce some kind of type system for parameters. I'd go with `json.loads()` + informative error messages + overview in the docs.
2021-10-20T12:20:15Z
[]
[]
getpelican/pelican
3,002
getpelican__pelican-3002
[ "2982" ]
e8d6318e93fecacebca41b2013622b374bd2ad34
diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -412,10 +412,7 @@ def posixize_path(rel_path): class _HTMLWordTruncator(HTMLParser): - _word_regex = re.compile(r"(({SBC})({SBC}|-|')*)|{DBC}".format( - # SBC means Latin-like characters. A word contains a few characters. - # ASCII |Extended Latin | Cyrillic - SBC="[0-9a-zA-Z]|[\u00C0-\u024f]|[\u0400-\u04FF]", + _word_regex = re.compile(r"{DBC}|(\w[\w'-]*)".format( # DBC means CJK-like characters. An character can stand for a word. DBC=("([\u4E00-\u9FFF])|" # CJK Unified Ideographs "([\u3400-\u4DBF])|" # CJK Unified Ideographs Extension A
diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py --- a/pelican/tests/test_utils.py +++ b/pelican/tests/test_utils.py @@ -230,6 +230,11 @@ def test_truncate_html_words(self): 'Ты мелькнула, ты предстала, Снова сердце задрожало,', 3 ), 'Ты мелькнула, ты' + ' …') + self.assertEqual( + utils.truncate_html_words( + 'Trong đầm gì đẹp bằng sen', 4 + ), + 'Trong đầm gì đẹp' + ' …') # Words enclosed or intervaled by HTML tags. self.assertEqual(
SUMMARY_MAX_LENGTH is applied in a wrong way to Vietnamese content <!-- Hi there! Thank you for discovering and submitting an issue. Before you submit this, let’s make sure of a few things. Please make sure the following boxes are ticked if they are correct. If not, please try and fulfill them first. --> <!-- Checked checkbox should look like this: [x] --> - [x] I have read the [Filing Issues](https://docs.getpelican.com/en/latest/contribute.html#filing-issues) and subsequent “How to Get Help” sections of the documentation. - [x] I have searched the [issues](https://github.com/getpelican/pelican/issues?q=is%3Aissue) (including closed ones) and believe that this is not a duplicate. <!-- Once the above boxes are checked, if you are able to fill in the following list with your information, it would be very helpful for maintainers. --> - **OS version and name**: Ubuntu 21.10 - **Python version**: 3.10.2 - **Pelican version**: 4.7.2 - **Link to theme**: N/A, I use my self-made theme - **Links to plugins**: None - **Link to your site**: https://manhhomienbienthuy.github.io/ - **Link to your source**: It‘s not published. - **Link to a [Gist](https://gist.github.com/) with the contents of your settings file**: <!-- If your source is not accessible, put Gist link here --> https://gist.github.com/manhhomienbienthuy/b989f84c7551d9ff13549e260cfb9450 ## Issue <!-- Now feel free to write your issue. Please avoid vague phrases like “[…] doesn’t work”. Be descriptive! Thanks again 🙌 ❤️ --> When a site contains articles in Vietnamese, the summary on index page generated by pelican will be much shorter than SUMMARY_MAX_LENGTH. I use default setting for SUMMARY_MAX_LENGTH = 50, but it's obviously less than 50 words (only about 35-36 words). But the biggest problem is that, sometimes the last word is broken in the middle and only some first characters remaining. I copied generated results here, so that you can have a direct compare: ``` Bài toán FizzBuzz thì quá kinh điển rồi, có lẽ ai học lập trình cũng đã từng làm quen với bài toán này ít nhất một lần. Trong bài viết này, tôi sẽ tổng hợp một s […] ``` When I used version 4.6.0, it had no problems. Here is the result ``` Bài toán FizzBuzz thì quá kinh điển rồi, có lẽ ai học lập trình cũng đã từng làm quen với bài toán này ít nhất một lần. Trong bài viết này, tôi sẽ tổng hợp một số các khác nhau để giải bài toán này với ngôn ngữ Python. Mục […] ```
2022-05-02T13:18:29Z
[]
[]
getpelican/pelican
3,124
getpelican__pelican-3124
[ "3110" ]
86f62d0a92ad78df36aac9f5837d4f2715535421
diff --git a/pelican/utils.py b/pelican/utils.py --- a/pelican/utils.py +++ b/pelican/utils.py @@ -155,7 +155,9 @@ def __repr__(self): def __get__(self, obj, objtype): '''Support instance methods.''' - return partial(self.__call__, obj) + fn = partial(self.__call__, obj) + fn.cache = self.cache + return fn def deprecated_attribute(old, new, since=None, remove=None, doc=None):
diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py --- a/pelican/tests/test_utils.py +++ b/pelican/tests/test_utils.py @@ -860,3 +860,34 @@ def test_pass_deep_subpaths(self): utils.posixize_path( os.path.abspath(os.path.join("/foo/bar", "test"))) ) + + +class TestMemoized(unittest.TestCase): + def test_memoized(self): + class Container: + def _get(self, key): + pass + + @utils.memoized + def get(self, key): + return self._get(key) + + container = Container() + + with unittest.mock.patch.object( + container, "_get", side_effect=lambda x: x + ) as get_mock: + self.assertEqual("foo", container.get("foo")) + get_mock.assert_called_once_with("foo") + + get_mock.reset_mock() + self.assertEqual("foo", container.get("foo")) + get_mock.assert_not_called() + + self.assertEqual("bar", container.get("bar")) + get_mock.assert_called_once_with("bar") + + get_mock.reset_mock() + container.get.cache.clear() + self.assertEqual("bar", container.get("bar")) + get_mock.assert_called_once_with("bar")
More flexible page content testing for unit tests - [x] I have searched the [issues](https://github.com/getpelican/pelican/issues?q=is%3Aissue) (including closed ones) and believe that this is not a duplicate. - [x] I have searched the [documentation](https://docs.getpelican.com/) and believe that my question is not covered. - [ ] I am willing to lend a hand to help implement this feature. <!-- optional but encouraged --> ## Feature Request I am currently working on automated tests for a Pelican plugin. While documentation on this appears to be quite sparse and I took some of the other plugins for inspiration, especially testing the rendered page content itself seems to impose some issues for me. The plugin will transform the source files to support custom types of blocks. To test this (and some intermediate transformation steps), I tried using `Content.content`, but this appears to be impossible for now, as this will do hard caching: https://github.com/getpelican/pelican/blob/b473280eac576705bc39b8151d27a1e05040d0e8/pelican/contents.py#L386-L396 While I could use `Content._content` as in the plugin itself, the high-level API feels much more suitable for testing purposes. For this reason, it would be nice to somehow be able to reset the cached value of `pelican.contents.Content.get_content` (which uses `pelican.utils.memoized`) like Python allows for `functools.lru_cache` and `functools.cached_property` for example.
I did not test it but changing the `__get__` in `memoized`: https://github.com/getpelican/pelican/blob/4db5c7ca4b4903d6fbebc406f922419a6a2bb283/pelican/utils.py#L156-L158 to something like: ```python def get(self, obj, objtype): fn = partial(self.__call__, obj) fn.cache = self.cache return fn ``` should (in theory) allow for something like ```python assert some_article.content == "foo" some_article._content = "bar" some_article.get_content.cache.clear() assert some_article.content == "bar" ``` Thanks for the snippet. I just gave it a quick try by modifying my local *pelican* distribution and it seems to work as intended. @FriedrichFroebel: Glad to hear that the suggestion from @avaris appears to address your target use case. Would you be willing to submit a pull request that implements these changes?
2023-04-24T16:47:59Z
[]
[]