metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "aaditgupta21/FlareStocks",
"score": 3
} |
#### File: FlareStocks/data/extract_data.py
```python
import os
import requests
import bs4
import pickle
import time
print("Starting")
start = time.time()
def get_tickers():
resp = requests.get(
'http://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
table = bs4.BeautifulSoup(resp.text, 'lxml').find(
'table', {'class': 'wikitable sortable'})
symbols = []
for row in table.findAll('tr')[1:]:
symbol = row.findAll('td')[0].text
symbols.append(symbol.replace('\n', ''))
with open("tickers.pkl", "wb") as f:
pickle.dump(symbols, f)
return symbols
def get_data():
if not os.path.exists("tickers.pkl"):
symbols = get_tickers()
else:
with open("tickers.pkl", "rb") as f:
symbols = pickle.load(f)
if not os.path.exists('stocks'):
os.makedirs('stocks')
for symbol in symbols:
if not os.path.exists('stocks/{}'.format(symbol)):
os.makedirs('stocks/{}'.format(symbol))
if not os.path.exists('stocks/{}/meta.csv'.format(symbol)):
try:
headers = {
'Content-Type': 'application/json',
'Authorization': 'Token <PASSWORD>'
}
desc = requests.get(
"https://api.tiingo.com/tiingo/daily/{}?".format(symbol.replace('.', '-')), headers=headers)
with open('stocks/{}/desc.pkl'.format(symbol), "wb") as f:
pickle.dump(desc.json(), f)
print(symbol + ' dumping')
except Exception:
print(symbol + ' not found')
try:
headers = {
'Content-Type': 'application/json',
'Authorization': 'Token <PASSWORD>472b6a4f263ed'
}
eod = requests.get("https://api.tiingo.com/tiingo/daily/{}/prices?startDate=2019-01-02".format(
symbol.replace('.', '-')), headers=headers)
with open('stocks/{}/{}.pkl'.format(symbol, symbol), "wb") as f:
pickle.dump(eod.json(), f)
except Exception:
print(symbol + ' not found')
if __name__ == '__main__':
get_data()
file_list = os.listdir("./stocks")
sorted(file_list)
print(f'Total time = {(time.time() - start) / 60} minutes')
print("Done")
``` |
{
"source": "aaditgupta21/Fotography",
"score": 3
} |
#### File: Fotography/cruddy/query.py
```python
from cruddy.model import Users
# SQLAlchemy extract all users from database
def users_all():
table = Users.query.all()
json_ready = [peep.read() for peep in table]
return json_ready
# SQLAlchemy extract users from database matching term
def users_ilike(term):
"""filter Users table by term into JSON list (ordered by User.name)"""
term = "%{}%".format(term) # "ilike" is case insensitive and requires wrapped %term%
table = Users.query.order_by(Users.name).filter((Users.name.ilike(term)) | (Users.email.ilike(term)))
return [peep.read() for peep in table]
# SQLAlchemy extract single user from database matching ID
def user_by_id(userid):
"""finds User in table matching userid """
return Users.query.filter_by(userID=userid).first()
# SQLAlchemy extract single user from database matching email
def user_by_email(email):
"""finds User in table matching email """
return Users.query.filter_by(email=email).first()
```
#### File: aaditgupta21/Fotography/main.py
```python
import json
# import app as app
from flask import render_template, redirect, request, url_for, send_from_directory
from flask_login import login_required
from __init__ import app, login_manager
from cruddy.app_crud import app_crud
from cruddy.app_crud_api import app_crud_api
from cruddy.login import login, logout, authorize
# from uploady.app_upload import app_upload
from notey.app_notes import app_notes
from events.app_events import app_events
# app.register_blueprint(app_upload)
app.register_blueprint(app_crud)
app.register_blueprint(app_crud_api)
app.register_blueprint(app_notes)
app.register_blueprint(app_events)
# create a Flask instance
# connects default URL to render index.html
@app.route('/logout/', methods=["GET", "POST"])
@login_required
def main_logout():
logout()
return redirect(url_for('index'))
@login_manager.unauthorized_handler
def unauthorized():
"""Redirect unauthorized users to Login page."""
app.config['NEXT_PAGE'] = request.endpoint
return redirect(url_for('main_login'))
# if login url, show phones table only
@app.route('/login/', methods=["GET", "POST"])
def main_login():
# obtains form inputs and fulfills login requirements
if request.form:
email = request.form.get("email")
password = request.form.get("password")
if login(email, password):
if (email == "<EMAIL>") and (password == "<PASSWORD>"): # this can be replaced with whatever login is needed
return redirect(url_for('crud.crud'))
else:
return redirect(url_for('crud.crud_view'))
# if not logged in, show the login page
return render_template("login.html")
@app.route('/authorize/', methods=["GET", "POST"])
def main_authorize():
error_msg = ""
# check form inputs and creates user
if request.form:
# validation should be in HTML
user_name = request.form.get("user_name")
email = request.form.get("email")
password1 = request.form.get("password1")
password2 = request.form.get("password2") # password should be verified
if password1 == password2:
if authorize(user_name, email, password1):
return redirect(url_for('main_login'))
else:
error_msg = "Passwords do not match"
# show the auth user page if the above fails for some reason
return render_template("authorize.html", error_msg=error_msg)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/about/')
def about():
return render_template("about.html")
@app.route('/calendar')
def calendar():
return render_template("calendar.html")
@app.route('/activity')
def activity():
return render_template("activity.html")
@app.route('/generator')
def generator():
return render_template("generator.html")
@app.route('/shop')
def shop():
return render_template("shop.html")
if __name__ == "__main__":
app.run(
debug=True,
host="0.0.0.0",
port=5000
),
``` |
{
"source": "aadithpm/code-a-day",
"score": 3
} |
#### File: code-a-day/py/A Rule Of Divisibility By 13.py
```python
def thirt(n):
seq = [1,10,9,12,3,4]
n = list(int(i) for i in reversed(str(n)))
if len(seq) < len(n):
compute1 = [i for i in seq[0:len(n)-len(seq)]]
seq.extend(compute1)
compute1 = sum(i * j for i,j in zip(n,seq))
compute1 = list(int(i) for i in reversed(str(compute1)))
compute2 = sum(i * j for i,j in zip(compute1,seq))
if compute1 == compute2:
return compute2
else:
compute1 = list(int(i) for i in reversed(str(compute2)))
return sum(i * j for i,j in zip(compute1,seq))
```
#### File: code-a-day/py/Can You Get The Loop.py
```python
def loop_size(node):
watcher = node
player = node.next
size = 0
while watcher != player:
watcher = watcher.next
player = player.next.next
size = size + 1
watcher = watcher.next
while watcher != player:
size = size + 1
watcher = watcher.next
return size
```
#### File: code-a-day/py/Consecutive Strings.py
```python
def longest_consec(strarr, k):
if k < 1 or k > len(strarr) or len(strarr) == 0:
return ''
cur_len = 0
cur_string = ""
for idx, val in enumerate(strarr[:len(strarr) - k + 1]):
temp_str = ''.join(strarr[idx: idx + k])
if len(temp_str) > cur_len:
cur_string = temp_str
cur_len = len(temp_str)
return cur_string
```
#### File: code-a-day/py/Help Your Granny.py
```python
import math
def tour(friends, friend_towns, home_to_town_distances):
d = 0
last_d = 0
friend_towns = [[friend,town] for friend,town in friend_towns if friend in friends]
for f,t in friend_towns:
if t in home_to_town_distances:
_d = home_to_town_distances[t]
if d == 0:
d += _d
last_d = _d
else:
d += math.sqrt(_d ** 2 - last_d ** 2)
last_d = _d
d += home_to_town_distances[t]
return int(math.floor(d))
```
#### File: code-a-day/py/Remove String Spaces.py
```python
def no_space(x):
return "".join(x.split())
```
#### File: code-a-day/py/Strip Comments.py
```python
import re
def solution(string,markers):
words = [i for i in string.split("\n")]
for marker in markers:
reg = re.compile(r"\s*\{}(.*)".format(marker))
for i, word in enumerate(words):
word = re.sub(reg, "", word)
words[i] = word
return '\n'.join(words)
```
#### File: code-a-day/py/Sum Of Positive.py
```python
def positive_sum(arr):
# Your code here
sum = 0
for number in arr:
if number > 0:
sum += number
return sum
```
#### File: code-a-day/py/Weight For Weight.py
```python
from operator import itemgetter
def digits(number):
s = 0
while number > 0:
s += number % 10
number = number / 10
return s
def order_weight(strng):
strng = [[i, digits(int(i))] for i in strng.split()]
strng.sort(key = itemgetter(1, 0))
return ' '.join([i[0] for i in strng])
``` |
{
"source": "aadithpm/sos-notebook",
"score": 2
} |
#### File: src/sos_notebook/converter.py
```python
import argparse
import re
import sys
import time
from io import StringIO
import nbformat
from nbconvert.exporters import Exporter
from nbconvert.preprocessors.execute import ExecutePreprocessor, CellExecutionError
from nbformat.v4 import new_code_cell, new_markdown_cell, new_notebook, output_from_msg
from sos.converter import extract_workflow
from sos.syntax import SOS_SECTION_HEADER
from sos.utils import env
#
# Converter from Notebook
#
def get_notebook_to_script_parser():
parser = argparse.ArgumentParser('sos convert FILE.ipynb FILE.sos (or --to sos)',
description='''Export Jupyter notebook with a SoS kernel to a
.sos file. The cells are presented in the .sos file as
cell structure lines, which will be ignored if executed
in batch mode ''')
return parser
# This class cannot be defined in .kernel because it would cause some
# weird problem with unittesting not able to resolve __main__
class SoS_Exporter(Exporter):
def __init__(self, config=None, **kwargs):
self.output_extension = '.sos'
self.output_mimetype = 'text/x-sos'
Exporter.__init__(self, config, **kwargs)
def from_notebook_cell(self, cell, fh, idx=0):
# in non-all mode, markdown cells are ignored because they can be mistakenly
# treated as markdown content of an action or script #806
if cell.cell_type != "code":
return
#
# Non-sos code cells are also ignored
if 'kernel' in cell.metadata and cell.metadata['kernel'] not in ('sos', 'SoS', None):
return
lines = cell.source.split('\n')
valid_cell = False
for idx, line in enumerate(lines):
if valid_cell or (line.startswith('%include') or line.startswith('%from')):
fh.write(line + '\n')
elif SOS_SECTION_HEADER.match(line):
valid_cell = True
# look retrospectively for comments
c = idx - 1
comment = ''
while c >= 0 and lines[c].startswith('#'):
comment = lines[c] + '\n' + comment
c -= 1
fh.write(comment + line + '\n')
# other content, namely non-%include lines before section header is ignored
if valid_cell:
fh.write('\n')
return idx
def from_notebook_node(self, nb, resources, **kwargs):
#
cells = nb.cells
with StringIO() as fh:
fh.write('#!/usr/bin/env sos-runner\n')
fh.write('#fileformat=SOS1.0\n\n')
idx = 0
for cell in cells:
idx = self.from_notebook_cell(cell, fh, idx)
content = fh.getvalue()
resources['output_extension'] = '.sos'
return content, resources
def notebook_to_script(notebook_file, sos_file, args=None, unknown_args=None):
'''
Convert a ipython notebook to sos format.
'''
if unknown_args:
raise ValueError(f'Unrecognized parameter {unknown_args}')
exporter = SoS_Exporter()
notebook = nbformat.read(notebook_file, nbformat.NO_CONVERT)
output, _ = exporter.from_notebook_node(notebook, {})
if not sos_file:
sys.stdout.write(output)
elif isinstance(sos_file, str):
with open(sos_file, 'w') as sos:
sos.write(output)
env.logger.info(f'SoS script saved to {sos_file}')
else:
sos_file.write(output)
#
# Converter to Notebook
#
def get_script_to_notebook_parser():
parser = argparse.ArgumentParser('sos convert FILE.sos FILE._ipynb (or --to ipynb)',
description='''Convert a sos script to Jupyter notebook (.ipynb)
so that it can be opened by Jupyter notebook.''')
return parser
def add_cell(cells, content, cell_type, cell_count, metainfo):
# if a section consist of all report, report it as a markdown cell
if not content:
return
if cell_type not in ('code', 'markdown'):
env.logger.warning(
f'Unrecognized cell type {cell_type}, code assumed.')
if cell_type == 'markdown' and any(x.strip() and not x.startswith('#! ') for x in content):
env.logger.warning(
'Markdown lines not starting with #!, code cell assumed.')
cell_type = 'code'
#
if cell_type == 'markdown':
cells.append(new_markdown_cell(source=''.join([x[3:] for x in content]).strip(),
metadata=metainfo))
else:
cells.append(
new_code_cell(
# remove any trailing blank lines...
source=''.join(content).strip(),
execution_count=cell_count,
metadata=metainfo)
)
class SoS_ExecutePreprocessor(ExecutePreprocessor):
def __init__(self, filename, *args, **kwargs):
super(SoS_ExecutePreprocessor, self).__init__(*args, **kwargs)
self._filename = filename
def _prepare_meta(self, cell):
meta = {}
run_notebook = re.search(
r'^%sosrun($|\s)|^%sossave($|\s)|^%preview\s.*(-w|--workflow).*$', cell.source, re.MULTILINE)
if run_notebook:
meta['workflow'] = self._workflow
if re.search(r'^%toc\s/', cell.source, re.MULTILINE):
meta['toc'] = self._toc
meta['path'] = self._filename
meta['use_panel'] = False
meta['rerun'] = False
# ID is dynamically generated by the frontend and does not exist
# in the backend for batch mode
meta['cell_id'] = 0
meta['batch_mode'] = True
meta['cell_kernel'] = cell.metadata.kernel
return meta
def run_cell(self, cell, cell_index=0):
# sos is the additional meta information sent to kernel
content = dict(code=cell.source, silent=False, store_history=False,
user_expressions='',
allow_stdin=False, stop_on_error=False,
sos=self._prepare_meta(cell))
msg = self.kc.session.msg('execute_request', content)
self.kc.shell_channel.send(msg)
msg_id = msg['header']['msg_id']
# the reset is copied from https://github.com/jupyter/nbconvert/blob/master/nbconvert/preprocessors/execute.py
# because we only need to change the first line
# msg_id = self.kc.execute(cell.source)
self.log.debug("Executing cell:\n%s", cell.source)
exec_reply = self._wait_for_reply(msg_id, cell)
outs = cell.outputs = []
while True:
try:
# We've already waited for execute_reply, so all output
# should already be waiting. However, on slow networks, like
# in certain CI systems, waiting < 1 second might miss messages.
# So long as the kernel sends a status:idle message when it
# finishes, we won't actually have to wait this long, anyway.
msg = self.kc.iopub_channel.get_msg(timeout=self.iopub_timeout)
except Empty:
self.log.warning("Timeout waiting for IOPub output")
if self.raise_on_iopub_timeout:
raise RuntimeError("Timeout waiting for IOPub output")
else:
break
if msg['parent_header'].get('msg_id') != msg_id:
# not an output from our execution
continue
msg_type = msg['msg_type']
self.log.debug("output: %s", msg_type)
content = msg['content']
# set the prompt number for the input and the output
if 'execution_count' in content:
cell['execution_count'] = content['execution_count']
if msg_type == 'status':
if content['execution_state'] == 'idle':
break
else:
continue
elif msg_type == 'execute_input':
continue
elif msg_type == 'clear_output':
outs[:] = []
# clear display_id mapping for this cell
for display_id, cell_map in self._display_id_map.items():
if cell_index in cell_map:
cell_map[cell_index] = []
continue
elif msg_type.startswith('comm'):
continue
display_id = None
if msg_type in {'execute_result', 'display_data', 'update_display_data'}:
display_id = msg['content'].get(
'transient', {}).get('display_id', None)
if display_id:
self._update_display_id(display_id, msg)
if msg_type == 'update_display_data':
# update_display_data doesn't get recorded
continue
try:
out = output_from_msg(msg)
except ValueError:
self.log.error("unhandled iopub msg: " + msg_type)
continue
if display_id:
# record output index in:
# _display_id_map[display_id][cell_idx]
cell_map = self._display_id_map.setdefault(display_id, {})
output_idx_list = cell_map.setdefault(cell_index, [])
output_idx_list.append(len(outs))
outs.append(out)
return exec_reply, outs
def _scan_table_of_content(self, nb):
cells = nb.cells
TOC = ''
for cell in cells:
if cell.cell_type == "markdown":
for line in cell.source.splitlines():
if re.match('^#+ ', line):
TOC += line + '\n'
return TOC
def preprocess(self, nb, *args, **kwargs):
self._workflow = extract_workflow(nb)
self._toc = self._scan_table_of_content(nb)
return super(SoS_ExecutePreprocessor, self).preprocess(nb, *args, **kwargs)
def script_to_notebook(script_file, notebook_file, args=None, unknown_args=None):
'''
Convert a sos script to iPython notebook (.ipynb) so that it can be opened
by Jupyter notebook.
'''
if unknown_args:
raise ValueError(f'Unrecognized parameter {unknown_args}')
cells = []
cell_count = 1
cell_type = 'code'
metainfo = {}
content = []
with open(script_file) as script:
first_block = True
for line in script:
if line.startswith('#') and first_block:
if line.startswith('#!'):
continue
if line.startswith('#fileformat='):
if not line[12:].startswith('SOS'):
raise RuntimeError(
f'{script_file} is not a SoS script according to #fileformat line.')
continue
first_block = False
mo = SOS_SECTION_HEADER.match(line)
if mo:
# get ride of empty content
if not any(x.strip() for x in content):
content = []
if content:
add_cell(cells, content, cell_type, cell_count, metainfo)
cell_type = 'code'
cell_count += 1
metainfo = {'kernel': 'SoS'}
content = [line]
continue
if line.startswith('#!'):
if cell_type == 'markdown':
content.append(line)
continue
else:
# get ride of empty content
if not any(x.strip() for x in content):
content = []
if content:
add_cell(cells, content, cell_type,
cell_count, metainfo)
cell_type = 'markdown'
cell_count += 1
content = [line]
continue
# other cases
content.append(line)
#
if content and any(x.strip() for x in content):
add_cell(cells, content, cell_type, cell_count, metainfo)
#
nb = new_notebook(cells=cells,
metadata={
'kernelspec': {
"display_name": "SoS",
"language": "sos",
"name": "sos"
},
"language_info": {
'codemirror_mode': 'sos',
"file_extension": ".sos",
"mimetype": "text/x-sos",
"name": "sos",
"pygments_lexer": "python",
'nbconvert_exporter': 'sos_notebook.converter.SoS_Exporter',
},
'sos': {
'kernels': [
['SoS', 'sos', '', '']
]
}
}
)
if not notebook_file:
nbformat.write(nb, sys.stdout, 4)
else:
with open(notebook_file, 'w') as notebook:
nbformat.write(nb, notebook, 4)
env.logger.info(f'Jupyter notebook saved to {notebook_file}')
# if err:
# raise RuntimeError(repr(err))
#
# notebook to HTML
#
def export_notebook(exporter_class, to_format, notebook_file, output_file, unknown_args=None, view=False):
import os
import subprocess
if not os.path.isfile(notebook_file):
raise RuntimeError(f'{notebook_file} does not exist')
cfg_file = os.path.join(os.path.expanduser('~'), '.sos', 'nbconfig.py')
if not os.path.isfile(cfg_file):
with open(cfg_file, 'w') as cfg:
cfg.write(f'''
import os
import sos
import sos_notebook
c = get_config()
c.TemplateExporter.template_path.extend([
os.path.join(os.path.split(os.path.abspath(sos.__file__))[0], 'templates'),
os.path.join(os.path.split(os.path.abspath(sos_notebook.__file__))[0], 'templates')])
''')
if not output_file:
import tempfile
tmp = tempfile.NamedTemporaryFile(
delete=False, suffix='.' + to_format).name
tmp_stderr = tempfile.NamedTemporaryFile(
delete=False, suffix='.' + to_format).name
with open(tmp_stderr, 'w') as err:
ret = subprocess.call(['jupyter', 'nbconvert', notebook_file, '--to', to_format,
'--output', tmp, '--config', cfg_file] + ([] if unknown_args is None else unknown_args), stderr=err)
with open(tmp_stderr) as err:
err_msg = err.read()
if ret != 0:
env.logger.error(err_msg)
env.logger.error(
f'Failed to convert {notebook_file} to {to_format} format')
else:
# identify output files
dest_file = err_msg.rsplit()[-1]
if not os.path.isfile(dest_file):
env.logger.error(err_msg)
env.logger.error('Failed to get converted file.')
elif view:
import webbrowser
url = f'file://{os.path.abspath(dest_file)}'
env.logger.info(f'Viewing {url} in a browser')
webbrowser.open(url, new=2)
# allow browser some time to process the file before this process removes it
time.sleep(2)
else:
with open(dest_file, 'rb') as tfile:
sys.stdout.buffer.write(tfile.read())
try:
os.remove(tmp)
except Exception:
pass
else:
ret = subprocess.call(['jupyter', 'nbconvert', os.path.abspath(notebook_file), '--to', to_format,
'--output', os.path.abspath(output_file), '--config', cfg_file] + ([] if unknown_args is None else unknown_args))
if ret != 0:
env.logger.error(
f'Failed to convert {notebook_file} to {to_format} format')
else:
env.logger.info(f'Output saved to {output_file}')
def get_notebook_to_html_parser():
parser = argparse.ArgumentParser('sos convert FILE.ipynb FILE.html (or --to html)',
description='''Export Jupyter notebook with a SoS kernel to a
.html file. Additional command line arguments are passed directly to
command "jupyter nbconvert --to html" so please refer to nbconvert manual for
available options.''')
parser.add_argument('--template',
help='''Template to export Jupyter notebook with sos kernel. SoS provides a number
of templates, with sos-report displays markdown cells and only output of cells with
prominent tag, and a control panel to control the display of the rest of the content
''')
parser.add_argument('-e', '--execute', action='store_true',
help='''Execute the notebook in batch mode (as if running "Cell -> Run All"
from Jupyter notebook interface before converting to HTML''')
parser.add_argument('-v', '--view', action='store_true',
help='''Open the output file in a broswer. In case no html file is specified,
this option will display the HTML file in a browser, instead of writing its
content to standard output.''')
return parser
def notebook_to_html(notebook_file, output_file, sargs=None, unknown_args=None):
from nbconvert.exporters.html import HTMLExporter
import os
if unknown_args is None:
unknown_args = []
if sargs and sargs.execute:
# the step can take long time to complete
ep = SoS_ExecutePreprocessor(notebook_file, timeout=60000)
try:
nb = nbformat.read(notebook_file, nbformat.NO_CONVERT)
ep.preprocess(nb, {'metadata': {'path': '.'}})
import tempfile
tmp_file = os.path.join(env.temp_dir, os.path.basename(notebook_file))
with open(tmp_file, 'w') as tmp_nb:
nbformat.write(nb, tmp_nb, 4)
notebook_file = tmp_file
except CellExecutionError as e:
env.logger.error(f'Failed to execute notebook: {e}')
if sargs.template:
unknown_args = ['--template', os.path.abspath(sargs.template) if os.path.isfile(
sargs.template) else sargs.template] + unknown_args
export_notebook(HTMLExporter, 'html', notebook_file,
output_file, unknown_args, view=sargs.view)
def get_notebook_to_pdf_parser():
parser = argparse.ArgumentParser('sos convert FILE.ipynb FILE.pdf (or --to pdf)',
description='''Export Jupyter notebook with a SoS kernel to a
.pdf file. Additional command line arguments are passed directly to
command "jupyter nbconvert --to pdf" so please refer to nbconvert manual for
available options.''')
parser.add_argument('--template',
help='''Template to export Jupyter notebook with sos kernel. SoS provides a number
of templates, with sos-report displays markdown cells and only output of cells with
prominent tag, and a control panel to control the display of the rest of the content
''')
return parser
def notebook_to_pdf(notebook_file, output_file, sargs=None, unknown_args=None):
from nbconvert.exporters.pdf import PDFExporter
import os
if unknown_args is None:
unknown_args = []
if sargs.template:
unknown_args = ['--template', os.path.abspath(sargs.template) if os.path.isfile(
sargs.template) else sargs.template] + unknown_args
# jupyter convert will add extension to output file...
if output_file is not None and output_file.endswith('.pdf'):
output_file = output_file[:-4]
export_notebook(PDFExporter, 'pdf', notebook_file,
output_file, unknown_args)
def get_notebook_to_md_parser():
parser = argparse.ArgumentParser('sos convert FILE.ipynb FILE.md (or --to md)',
description='''Export Jupyter notebook with a SoS kernel to a
markdown file. Additional command line arguments are passed directly to
command "jupyter nbconvert --to markdown" so please refer to nbconvert manual for
available options.''')
return parser
def notebook_to_md(notebook_file, output_file, sargs=None, unknown_args=None):
from nbconvert.exporters.markdown import MarkdownExporter
export_notebook(MarkdownExporter, 'markdown',
notebook_file, output_file, unknown_args)
def get_notebook_to_notebook_parser():
parser = argparse.ArgumentParser('sos convert FILE.ipynb FILE.ipynb (or --to ipynb)',
description='''Export a Jupyter notebook with a non-SoS kernel to a
SoS notebook with SoS kernel. A SoS notebook will simply be copied to
the destination file.''')
parser.add_argument('--python3-to-sos', action='store_true',
help='''Convert python3 cells to SoS.''')
parser.add_argument('--inplace', action='store_true',
help='''Overwrite input notebook with the output.''')
return parser
def notebook_to_notebook(notebook_file, output_file, sargs=None, unknown_args=None):
notebook = nbformat.read(notebook_file, nbformat.NO_CONVERT)
# get the kernel of the notebook
# this is like 'R', there is another 'display_name'
lan_name = notebook['metadata']['kernelspec']['language']
# this is like 'ir'
kernel_name = notebook['metadata']['kernelspec']['name']
if kernel_name == 'sos':
# already a SoS notebook?
if sargs.inplace:
return
if output_file:
import shutil
shutil.copy(notebook_file, output_file)
else:
with open(notebook_file) as nb:
sys.stdout.write(nb.read())
return
# convert to?
elif kernel_name == 'python3' and sargs.python3_to_sos:
to_lan = 'SoS'
to_kernel = 'sos'
else:
to_lan = lan_name
to_kernel = kernel_name
# write all cells
#
cells = []
for cell in notebook.cells:
if cell.cell_type == 'code':
cell.metadata['kernel'] = to_lan
cells.append(cell)
#
# create header
nb = new_notebook(cells=cells,
metadata={
'kernelspec': {
"display_name": "SoS",
"language": "sos",
"name": "sos"
},
"language_info": {
"file_extension": ".sos",
"mimetype": "text/x-sos",
"name": "sos",
"pygments_lexer": "python",
'nbconvert_exporter': 'sos_notebook.converter.SoS_Exporter',
},
'sos': {
'kernels': [
['SoS', 'sos', '', '']] +
([[to_lan, to_kernel, '', '']]
if to_lan != 'SoS' else []),
'default_kernel': to_lan
}
}
)
if sargs.inplace:
with open(notebook_file, 'w') as new_nb:
nbformat.write(nb, new_nb, 4)
env.logger.info(f'Jupyter notebook saved to {notebook_file}')
elif not output_file:
nbformat.write(nb, sys.stdout, 4)
else:
with open(output_file, 'w') as new_nb:
nbformat.write(nb, new_nb, 4)
env.logger.info(f'Jupyter notebook saved to {output_file}')
def get_Rmarkdown_to_notebook_parser():
parser = argparse.ArgumentParser('sos convert FILE.Rmd FILE.ipynb (or --to ipynb)',
description='''Export a Rmarkdown file kernel to a SoS notebook. It currently
only handles code block and Markdown, and not inline expression.''')
return parser
def Rmarkdown_to_notebook(rmarkdown_file, output_file, sargs=None, unknown_args=None):
#
with open(rmarkdown_file) as script:
content = script.read()
#
# identify the headers
header = header = re.compile('^(#+\s.*$)', re.M)
paragraphs = re.split(header, content)
#
cells = []
cell_count = 1
for idx, p in enumerate(paragraphs):
if idx % 2 == 1:
# this is header, let us create a markdown cell
cells.append(
new_markdown_cell(
source=p.strip()))
else:
# this is unknown yet, let us find ```{} block
code = re.compile('^\s*(```{.*})$', re.M)
endcode = re.compile('^\s*```$', re.M)
for pidx, pc in enumerate(re.split(code, p)):
if pidx == 0:
# piece before first code block. it might contain
# inline expression
cells.append(
new_markdown_cell(
source=pc.strip())
)
elif pidx % 2 == 0:
# this is AFTER the {r} piece, let us assume all R code
# for now
# this is code, but it should end somewhere
pieces = re.split(endcode, pc)
# I belive that we should have
# pieces[0] <- code
# pieces[1] <- rest...
# but I could be wrong.
cells.append(
new_code_cell(
source=pieces[0],
execution_count=cell_count,
metadata={'kernel': 'R'}
)
)
cell_count += 1
#
for piece in pieces[1:]:
cells.append(
new_markdown_cell(
source=piece.strip())
)
#
# create header
nb = new_notebook(cells=cells,
metadata={
'kernelspec': {
"display_name": "SoS",
"language": "sos",
"name": "sos"
},
"language_info": {
"file_extension": ".sos",
"mimetype": "text/x-sos",
"name": "sos",
"pygments_lexer": "python",
'nbconvert_exporter': 'sos_notebook.converter.SoS_Exporter',
},
'sos': {
'kernels': [
['SoS', 'sos', '', ''],
['R', 'ir', '', '']],
'default_kernel': 'R'
}
}
)
if not output_file:
nbformat.write(nb, sys.stdout, 4)
else:
with open(output_file, 'w') as new_nb:
nbformat.write(nb, new_nb, 4)
env.logger.info(f'Jupyter notebook saved to {output_file}')
```
#### File: src/sos_notebook/inspector.py
```python
import pydoc
from sos.syntax import SOS_USAGES
from sos.utils import env
from .magics import SoS_Magics
class SoS_VariableInspector(object):
def __init__(self, kernel):
self.kernel = kernel
self.preview_magic = kernel.magics.get('preview')
def inspect(self, name, line, pos):
try:
obj_desc, preview = self.preview_magic.preview_var(name, style=None)
if preview is None:
return {}
else:
format_dict, md_dict = preview
if 'text/plain' in format_dict:
return format_dict
else:
return {'text/plain': f'{repr(env.sos_dict["name"])} ({obj_desc})'}
except Exception:
return {}
class SoS_SyntaxInspector(object):
def __init__(self, kernel):
self.kernel = kernel
def inspect(self, name, line, pos):
if line.startswith('%') and name in SoS_Magics.names and pos <= len(name) + 1:
try:
magic = SoS_Magics(self.kernel).get(name)
parser = magic.get_parser()
return {'text/plain': parser.format_help() }
except Exception as e:
return {'text/plain': f'Magic %{name}: {e}'}
elif line.startswith(name + ':') and pos <= len(name):
if self.kernel.original_keys is None:
self.kernel._reset_dict()
# input: etc
if name in SOS_USAGES:
return {'text/plain': SOS_USAGES[name]}
elif name in env.sos_dict:
# action?
return {'text/plain': pydoc.render_doc(env.sos_dict[name], title='%s', renderer=pydoc.plaintext),
'text/html': pydoc.render_doc(env.sos_dict[name], title='%s', renderer=pydoc.html)
}
else:
return {}
else:
return {}
class SoS_Inspector(object):
def __init__(self, kernel):
self.inspectors = [
SoS_SyntaxInspector(kernel),
SoS_VariableInspector(kernel),
]
def inspect(self, name, line, pos):
for c in self.inspectors:
try:
data = c.inspect(name, line, pos)
if data:
return data
except Exception:
continue
# No match
return {}
```
#### File: src/sos_notebook/kernel.py
```python
import contextlib
import logging
import os
import subprocess
import sys
import time
from collections import OrderedDict, defaultdict
from textwrap import dedent
import pandas as pd
import pkg_resources
from ipykernel.ipkernel import IPythonKernel
from IPython.core.display import HTML
from IPython.utils.tokenutil import line_at_cursor, token_at_cursor
from jupyter_client import manager
from sos._version import __sos_version__, __version__
from sos.eval import SoS_eval, SoS_exec, interpolate
from sos.syntax import SOS_SECTION_HEADER
from sos.utils import (format_duration, WorkflowDict, env, log_to_file,
short_repr)
from ._version import __version__ as __notebook_version__
from .completer import SoS_Completer
from .inspector import SoS_Inspector
from .step_executor import PendingTasks
from .workflow_executor import runfile, NotebookLoggingHandler
from .magics import SoS_Magics
class FlushableStringIO:
'''This is a string buffer for output, but it will only
keep the first 200 lines and the last 10 lines.
'''
def __init__(self, kernel, name, *args, **kwargs):
self.kernel = kernel
self.name = name
def write(self, content):
if content.startswith('HINT: '):
content = content.splitlines()
hint_line = content[0][6:].strip()
content = '\n'.join(content[1:])
self.kernel.send_response(self.kernel.iopub_socket, 'display_data',
{
'metadata': {},
'data': {'text/html': HTML(
f'<div class="sos_hint">{hint_line}</div>').data}
})
if content:
if self.kernel._meta['capture_result'] is not None:
self.kernel._meta['capture_result'].append(
('stream', {'name': self.name, 'text': content}))
self.kernel.send_response(self.kernel.iopub_socket, 'stream',
{'name': self.name, 'text': content})
def flush(self):
pass
__all__ = ['SoS_Kernel']
class subkernel(object):
# a class to information on subkernel
def __init__(self, name=None, kernel=None, language='', color='', options={}):
self.name = name
self.kernel = kernel
self.language = language
self.color = color
self.options = options
def __repr__(self):
return f'subkernel {self.name} with kernel {self.kernel} for language {self.language} with color {self.color}'
# translate a message to transient_display_data message
def make_transient_msg(msg_type, content, title, append=False, page='Info'):
if msg_type == 'display_data':
return {
'title': title,
'data': content.get('data', {}),
'metadata': {'append': append, 'page': page}
}
elif msg_type == 'stream':
if content['name'] == 'stdout':
return {
'title': title,
'data': {
'text/plain': content['text'],
'application/vnd.jupyter.stdout': content['text']
},
'metadata': {'append': append, 'page': page}
}
else:
return {
'title': title,
'data': {
'text/plain': content['text'],
'application/vnd.jupyter.stderr': content['text']
},
'metadata': {'append': append, 'page': page}
}
else:
raise ValueError(
f"failed to translate message {msg_type} to transient_display_data message")
class Subkernels(object):
# a collection of subkernels
def __init__(self, kernel):
self.sos_kernel = kernel
self.language_info = kernel.supported_languages
from jupyter_client.kernelspec import KernelSpecManager
km = KernelSpecManager()
specs = km.find_kernel_specs()
# get supported languages
self._kernel_list = []
lan_map = {}
for x in self.language_info.keys():
for lname, knames in kernel.supported_languages[x].supported_kernels.items():
for kname in knames:
if x != kname:
lan_map[kname] = (lname, self.get_background_color(self.language_info[x], lname),
getattr(self.language_info[x], 'options', {}))
# kernel_list has the following items
#
# 1. displayed name
# 2. kernel name
# 3. language name
# 4. color
for spec in specs.keys():
if spec == 'sos':
# the SoS kernel will be default theme color.
self._kernel_list.append(
subkernel(name='SoS', kernel='sos', options={
'variable_pattern': r'^\s*[_A-Za-z0-9\.]+\s*$',
'assignment_pattern': r'^\s*([_A-Za-z0-9\.]+)\s*=.*$'}))
elif spec in lan_map:
# e.g. ir ==> R
self._kernel_list.append(
subkernel(name=lan_map[spec][0], kernel=spec, language=lan_map[spec][0],
color=lan_map[spec][1], options=lan_map[spec][2]))
else:
# undefined language also use default theme color
self._kernel_list.append(subkernel(name=spec, kernel=spec))
def kernel_list(self):
return self._kernel_list
# now, no kernel is found, name has to be a new name and we need some definition
# if kernel is defined
def add_or_replace(self, kdef):
for idx, x in enumerate(self._kernel_list):
if x.name == kdef.name:
self._kernel_list[idx] = kdef
return self._kernel_list[idx]
else:
self._kernel_list.append(kdef)
return self._kernel_list[-1]
def get_background_color(self, plugin, lan):
# if a single color is defined, it is used for all supported
# languages
if isinstance(plugin.background_color, str):
# return the same background color for all inquiry
return plugin.background_color
else:
# return color for specified, or any color if unknown inquiry is made
return plugin.background_color.get(lan, next(iter(plugin.background_color.values())))
def find(self, name, kernel=None, language=None, color=None, notify_frontend=True):
# find from subkernel name
def update_existing(idx):
x = self._kernel_list[idx]
if (kernel is not None and kernel != x.kernel) or (language not in (None, '', 'None') and language != x.language):
raise ValueError(
f'Cannot change kernel or language of predefined subkernel {name} {x}')
if color is not None:
if color == 'default':
if self._kernel_list[idx].language:
self._kernel_list[idx].color = self.get_background_color(
self.language_info[self._kernel_list[idx].language], self._kernel_list[idx].language)
else:
self._kernel_list[idx].color = ''
else:
self._kernel_list[idx].color = color
if notify_frontend:
self.notify_frontend()
# if the language module cannot be loaded for some reason
if name in self.sos_kernel._failed_languages:
raise self.sos_kernel._failed_languages[name]
# find from language name (subkernel name, which is usually language name)
for idx, x in enumerate(self._kernel_list):
if x.name == name:
if x.name == 'SoS' or x.language or language is None:
update_existing(idx)
return x
else:
if not kernel:
kernel = name
break
# find from kernel name
for idx, x in enumerate(self._kernel_list):
if x.kernel == name:
# if exist language or no new language defined.
if x.language or language is None:
update_existing(idx)
return x
else:
# otherwise, try to use the new language
kernel = name
break
if kernel is not None:
# in this case kernel should have been defined in kernel list
if kernel not in [x.kernel for x in self._kernel_list]:
raise ValueError(
f'Unrecognized Jupyter kernel name {kernel}. Please make sure it is properly installed and appear in the output of command "jupyter kenelspec list"')
# now this a new instance for an existing kernel
kdef = [x for x in self._kernel_list if x.kernel == kernel][0]
if not language:
if color == 'default':
if kdef.language:
color = self.get_background_color(
self.language_info[kdef.language], kdef.language)
else:
color = kdef.color
new_def = self.add_or_replace(subkernel(name, kdef.kernel, kdef.language, kdef.color if color is None else color,
getattr(self.language_info[kdef.language], 'options', {}) if kdef.language else {}))
if notify_frontend:
self.notify_frontend()
return new_def
else:
# if language is defined,
if ':' in language:
# if this is a new module, let us create an entry point and load
from pkg_resources import EntryPoint
mn, attr = language.split(':', 1)
ep = EntryPoint(name=kernel, module_name=mn,
attrs=tuple(attr.split('.')))
try:
plugin = ep.resolve()
self.language_info[name] = plugin
# for convenience, we create two entries for, e.g. R and ir
# but only if there is no existing definition
for supported_lan, supported_kernels in plugin.supported_kernels.items():
for supported_kernel in supported_kernels:
if name != supported_kernel and supported_kernel not in self.language_info:
self.language_info[supported_kernel] = plugin
if supported_lan not in self.language_info:
self.language_info[supported_lan] = plugin
except Exception as e:
raise RuntimeError(
f'Failed to load language {language}: {e}')
#
if color == 'default':
color = self.get_background_color(plugin, kernel)
new_def = self.add_or_replace(subkernel(name, kdef.kernel, kernel, kdef.color if color is None else color,
getattr(plugin, 'options', {})))
else:
# if should be defined ...
if language not in self.language_info:
raise RuntimeError(
f'Unrecognized language definition {language}, which should be a known language name or a class in the format of package.module:class')
#
self.language_info[name] = self.language_info[language]
if color == 'default':
color = self.get_background_color(
self.language_info[name], language)
new_def = self.add_or_replace(subkernel(name, kdef.kernel, language, kdef.color if color is None else color,
getattr(self.language_info[name], 'options', {})))
if notify_frontend:
self.notify_frontend()
return new_def
elif language is not None:
# kernel is not defined and we only have language
if ':' in language:
# if this is a new module, let us create an entry point and load
from pkg_resources import EntryPoint
mn, attr = language.split(':', 1)
ep = EntryPoint(name='__unknown__', module_name=mn,
attrs=tuple(attr.split('.')))
try:
plugin = ep.resolve()
self.language_info[name] = plugin
except Exception as e:
raise RuntimeError(
f'Failed to load language {language}: {e}')
if name in plugin.supported_kernels:
# if name is defined in the module, only search kernels for this language
avail_kernels = [x for x in plugin.supported_kernels[name] if
x in [y.kernel for y in self._kernel_list]]
else:
# otherwise we search all supported kernels
avail_kernels = [x for x in sum(plugin.supported_kernels.values(), []) if
x in [y.kernel for y in self._kernel_list]]
if not avail_kernels:
raise ValueError(
'Failed to find any of the kernels {} supported by language {}. Please make sure it is properly installed and appear in the output of command "jupyter kenelspec list"'.format(
', '.join(sum(plugin.supported_kernels.values(), [])), language))
# use the first available kernel
# find the language that has the kernel
lan_name = list({x: y for x, y in plugin.supported_kernels.items(
) if avail_kernels[0] in y}.keys())[0]
if color == 'default':
color = self.get_background_color(plugin, lan_name)
new_def = self.add_or_replace(subkernel(name, avail_kernels[0], lan_name, self.get_background_color(plugin, lan_name) if color is None else color,
getattr(plugin, 'options', {})))
else:
# if a language name is specified (not a path to module), if should be defined in setup.py
if language not in self.language_info:
raise RuntimeError(
f'Unrecognized language definition {language}')
#
plugin = self.language_info[language]
if language in plugin.supported_kernels:
avail_kernels = [x for x in plugin.supported_kernels[language] if
x in [y.kernel for y in self._kernel_list]]
else:
avail_kernels = [x for x in sum(plugin.supported_kernels.values(), []) if
x in [y.kernel for y in self._kernel_list]]
if not avail_kernels:
raise ValueError(
'Failed to find any of the kernels {} supported by language {}. Please make sure it is properly installed and appear in the output of command "jupyter kenelspec list"'.format(
', '.join(
sum(self.language_info[language].supported_kernels.values(), [])),
language))
new_def = self.add_or_replace(subkernel(
name, avail_kernels[0], language,
self.get_background_color(
self.language_info[language], language) if color is None or color == 'default' else color,
getattr(self.language_info[language], 'options', {})))
self.notify_frontend()
return new_def
else:
# let us check if there is something wrong with the pre-defined language
for entrypoint in pkg_resources.iter_entry_points(group='sos_languages'):
if entrypoint.name == name:
# there must be something wrong, let us trigger the exception here
entrypoint.load()
# if nothing is triggerred, kernel is not defined, return a general message
raise ValueError(
f'No subkernel named {name} is found. Please make sure that you have the kernel installed (listed in the output of "jupyter kernelspec list" and usable in jupyter by itself), install appropriate language module (e.g. "pip install sos-r"), restart jupyter notebook and try again.')
def update(self, notebook_kernel_list):
for kinfo in notebook_kernel_list:
try:
# if we can find the kernel, fine...
self.find(kinfo[0], kinfo[1], kinfo[2],
kinfo[3], notify_frontend=False)
except Exception as e:
# otherwise do not worry about it.
env.logger.warning(
f'Failed to locate subkernel {kinfo[0]} with kernerl "{kinfo[1]}" and language "{kinfo[2]}": {e}')
def notify_frontend(self):
self._kernel_list.sort(key=lambda x: x.name)
self.sos_kernel.send_frontend_msg('kernel-list',
[[x.name, x.kernel, x.language, x.color, x.options] for x in self._kernel_list])
class SoS_Kernel(IPythonKernel):
implementation = 'SOS'
implementation_version = __version__
language = 'sos'
language_version = __sos_version__
language_info = {
'mimetype': 'text/x-sos',
'name': 'sos',
'file_extension': '.sos',
'pygments_lexer': 'sos',
'codemirror_mode': 'sos',
'nbconvert_exporter': 'sos_notebook.converter.SoS_Exporter',
}
banner = "SoS kernel - script of scripts"
def get_supported_languages(self):
if self._supported_languages is not None:
return self._supported_languages
group = 'sos_languages'
self._supported_languages = {}
for entrypoint in pkg_resources.iter_entry_points(group=group):
# Grab the function that is the actual plugin.
name = entrypoint.name
try:
plugin = entrypoint.load()
self._supported_languages[name] = plugin
except Exception as e:
self._failed_languages[name] = e
return self._supported_languages
supported_languages = property(lambda self: self.get_supported_languages())
def get_kernel_list(self):
if not hasattr(self, '_subkernels'):
self._subkernels = Subkernels(self)
# sort kernel list by name to avoid unnecessary change of .ipynb files
return self._subkernels
subkernels = property(lambda self: self.get_kernel_list())
def get_completer(self):
if self._completer is None:
self._completer = SoS_Completer(self)
return self._completer
completer = property(lambda self: self.get_completer())
def get_inspector(self):
if self._inspector is None:
self._inspector = SoS_Inspector(self)
return self._inspector
inspector = property(lambda self: self.get_inspector())
def __init__(self, **kwargs):
super(SoS_Kernel, self).__init__(**kwargs)
self.options = ''
self.kernel = 'SoS'
# a dictionary of started kernels, with the format of
#
# 'R': ['ir', 'sos.R.sos_R', '#FFEEAABB']
#
# Note that:
#
# 'R' is the displayed name of the kernel.
# 'ir' is the kernel name.
# 'sos.R.sos_R' is the language module.
# '#FFEEAABB' is the background color
#
self.kernels = {}
# self.shell = InteractiveShell.instance()
self.format_obj = self.shell.display_formatter.format
self.original_keys = None
self._meta = {'use_panel': True}
self._supported_languages = None
self._completer = None
self._inspector = None
self._real_execution_count = 1
self._execution_count = 1
self._debug_mode = False
self.frontend_comm = None
self.comm_manager.register_target('sos_comm', self.sos_comm)
self.my_tasks = {}
self.magics = SoS_Magics(self)
self.last_executed_code = ''
self._kernel_return_vars = []
self._failed_languages = {}
env.__task_notifier__ = self.notify_task_status
# enable matplotlib by default #77
self.shell.enable_gui = lambda gui: None
# sos does not yet support MaxOSX backend to start a new window
# so a default inline mode is used.
self.shell.enable_matplotlib('inline')
#
self.editor_kernel = 'sos'
# remove all other ahdnlers
env.logger.handlers = []
env.logger.addHandler(
NotebookLoggingHandler(logging.DEBUG, kernel=self))
cell_id = property(lambda self: self._meta['cell_id'])
_workflow_mode = property(lambda self: self._meta['workflow_mode'])
_resume_execution = property(lambda self: self._meta['resume_execution'])
def sos_comm(self, comm, msg):
# record frontend_comm to send messages
self.frontend_comm = comm
@comm.on_msg
def handle_frontend_msg(msg):
content = msg['content']['data']
# log_to_file(msg)
for k, v in content.items():
if k == 'list-kernel':
if v:
self.subkernels.update(v)
self.subkernels.notify_frontend()
elif k == 'set-editor-kernel':
self.editor_kernel = v
elif k == 'kill-task':
# kill specified task
from sos.hosts import Host
Host(v[1])._task_engine.kill_tasks([v[0]])
self.notify_task_status(
['change-status', v[1], v[0], 'aborted', (None, None, None)])
elif k == 'resume-task':
# kill specified task
from sos.hosts import Host
Host(v[1])._task_engine.resume_task(v[0])
self.notify_task_status(
['change-status', v[1], v[0], 'pending', (None, None, None)])
elif k == 'task-info':
self._meta['use_panel'] = True
self.update_taskinfo(v[0], v[1])
elif k == 'update-task-status':
if not isinstance(v, list):
continue
# split by host ...
host_status = defaultdict(list)
for name in v:
if not name.startswith('status_'):
continue
try:
tqu, tid = name[7:].rsplit('_', 1)
except Exception:
# incorrect ID...
continue
host_status[tqu].append(tid)
# log_to_file(host_status)
#
from sos.hosts import Host
for tqu, tids in host_status.items():
try:
h = Host(tqu)
except Exception:
continue
for _, tst, tdt in h._task_engine.monitor_tasks(tids):
self.notify_task_status(
['change-status', tqu, tid, tst, tdt])
self.send_frontend_msg('update-duration', {})
elif k == 'paste-table':
try:
from tabulate import tabulate
df = pd.read_clipboard()
tbl = tabulate(df, headers='keys', tablefmt='pipe')
self.send_frontend_msg('paste-table', tbl)
if self._debug_mode:
log_to_file(tbl)
except Exception as e:
self.send_frontend_msg(
'alert', f'Failed to paste clipboard as table: {e}')
elif k == 'notebook-version':
# send the version of notebook, right now we will not do anything to it, but
# we will send the version of sos-notebook there
self.send_frontend_msg(
'notebook-version', __notebook_version__)
else:
# this somehow does not work
self.warn(f'Unknown message {k}: {v}')
status_class = {
'pending': 'fa-square-o',
'submitted': 'fa-spinner',
'running': 'fa-spinner fa-pulse fa-spin',
'completed': 'fa-check-square-o',
'failed': 'fa-times-circle-o',
'aborted': 'fa-frown-o',
'missing': 'fa-question',
'unknown': 'fa-question',
}
def update_taskinfo(self, task_id, task_queue):
# requesting information on task
from sos.hosts import Host
host = Host(task_queue)
result = host._task_engine.query_tasks(
[task_id], verbosity=2, html=True)
# log_to_file(result)
self.send_frontend_msg('display_data', {
'metadata': {},
'data': {'text/plain': result,
'text/html': HTML(result).data
}}, title=f'%taskinfo {task_id} -q {task_queue}', page='Tasks')
# now, there is a possibility that the status of the task is different from what
# task engine knows (e.g. a task is rerun outside of jupyter). In this case, since we
# already get the status, we should update the task engine...
#
# <tr><th align="right" width="30%">Status</th><td align="left"><div class="one_liner">completed</div></td></tr>
status = result.split(
'>Status<', 1)[-1].split('</div', 1)[0].split('>')[-1]
host._task_engine.update_task_status(task_id, status)
def notify_task_status(self, task_status):
action_class = {
'pending': 'fa-stop',
'submitted': 'fa-stop',
'running': 'fa-stop',
'completed': 'fa-play',
'failed': 'fa-play',
'aborted': 'fa-play',
'missing': 'fa-question',
'unknown': 'fa-question',
}
action_func = {
'pending': 'kill_task',
'submitted': 'kill_task',
'running': 'kill_task',
'completed': 'resume_task',
'failed': 'resume_task',
'aborted': 'resume_task',
'missing': 'function(){}',
'unknown': 'function(){}',
}
if task_status[0] == 'new-status':
tqu, tid, tst, tdt = task_status[1:]
# tdt contains cretion time, start running time, and duration time.
if tdt[2]:
timer = f'Ran for {format_duration(tdt[2])}</time>'
elif tdt[1]:
# start running
timer = f'<time id="duration_{tqu}_{tid}" class="{tst}" datetime="{tdt[1]*1000}">Ran for {format_duration(time.time() - tdt[1])}</time>'
else:
timer = f'<time id="duration_{tqu}_{tid}" class="{tst}" datetime="{tdt[0]*1000}">Pending for {format_duration(time.time() - tdt[0])}</time>'
self.send_response(self.iopub_socket, 'display_data',
{
'metadata': {},
'data': {'text/html':
HTML(f'''<table id="table_{tqu}_{tid}" class="task_table"><tr style="border: 0px">
<td style="border: 0px">
<i id="status_{tqu}_{tid}"
class="fa fa-2x fa-fw {self.status_class[tst]}"
onmouseover="'{self.status_class[tst]}'.split(' ').map(x => document.getElementById('status_{tqu}_{tid}').classList.remove(x));'{action_class[tst]} task_hover'.split(' ').map(x => document.getElementById('status_{tqu}_{tid}').classList.add(x));"
onmouseleave="'{action_class[tst]} task_hover'.split(' ').map(x => document.getElementById('status_{tqu}_{tid}').classList.remove(x));'{self.status_class[tst]}'.split(' ').map(x => document.getElementById('status_{tqu}_{tid}').classList.add(x));"
onclick="{action_func[tst]}('{tid}', '{tqu}')"
></i> </td>
<td style="border:0px"><a href='#' onclick="task_info('{tid}', '{tqu}')"><pre>{tid}</pre></a></td>
<td style="border:0px"> </td>
<td style="border:0px;text-align=right;">
<pre><span id="tagline_{tqu}_{tid}">{timer}</span></pre></td>
</tr>
</table>''').data}})
# keep tracks of my tasks to avoid updating status of
# tasks that does not belong to the notebook
self.my_tasks[(tqu, tid)] = time.time()
elif task_status[0] == 'remove-task':
tqu, tid = task_status[1:]
if (tqu, tid) in self.my_tasks:
self.send_frontend_msg('remove-task', [tqu, tid])
elif task_status[0] == 'change-status':
tqu, tid, tst, tdt = task_status[1:]
if tst not in ('pending', 'submitted', 'running', 'completed',
'failed', 'aborted'):
tst = 'unknown'
self.send_frontend_msg('task-status',
[tqu, tid, tst, tdt, self.status_class[tst], action_class[tst], action_func[tst]])
self.my_tasks[(tqu, tid)] = time.time()
elif task_status[0] == 'pulse-status':
tqu, tid, tst, tdt = task_status[1:]
if tst not in ('pending', 'submitted', 'running', 'completed',
'failed', 'aborted'):
tst = 'unknown'
if (tqu, tid) in self.my_tasks:
if time.time() - self.my_tasks[(tqu, tid)] < 20:
# if it has been within the first 20 seconds of new or updated message
# can confirm to verify it has been successfully delivered. Otherwise
# ignore such message
self.send_frontend_msg('task-status',
[tqu, tid, tst, tdt, self.status_class[tst], action_class[tst], action_func[tst]])
else:
# perhaps the pulse one does not have an initial value yet
self.send_frontend_msg('task-status',
[tqu, tid, tst, tdt, self.status_class[tst], action_class[tst], action_func[tst]])
self.my_tasks[(tqu, tid)] = time.time()
else:
raise RuntimeError(
f'Unrecognized status change message {task_status}')
def send_frontend_msg(self, msg_type, msg=None, title='', append=False, page='Info'):
# if comm is never created by frontend, the kernel is in test mode without frontend
if msg_type in ('display_data', 'stream'):
if self._meta['use_panel'] is False:
if msg_type in ('display_data', 'stream'):
self.send_response(self.iopub_socket, msg_type,
{} if msg is None else msg)
else:
self.frontend_comm.send(
make_transient_msg(
msg_type, msg, append=append, title=title, page=page),
{'msg_type': 'transient_display_data'})
elif self.frontend_comm:
self.frontend_comm.send({} if msg is None else msg, {
'msg_type': msg_type})
elif self._debug_mode:
# we should not always do this because the kernel could be triggered by
# tests, which will not have a frontend sos comm
self.warn(
'Frontend communicator is broken. Please restart jupyter server')
def _reset_dict(self):
env.sos_dict = WorkflowDict()
SoS_exec('import os, sys, glob', None)
SoS_exec('from sos.runtime import *', None)
SoS_exec("run_mode = 'interactive'", None)
self.original_keys = set(env.sos_dict._dict.keys()) | {'SOS_VERSION', 'CONFIG',
'step_name', '__builtins__', 'input', 'output',
'depends'}
@contextlib.contextmanager
def redirect_sos_io(self):
save_stdout = sys.stdout
save_stderr = sys.stderr
sys.stdout = FlushableStringIO(self, 'stdout')
sys.stderr = FlushableStringIO(self, 'stderr')
yield
sys.stdout = save_stdout
sys.stderr = save_stderr
def get_vars_from(self, items, from_kernel=None, explicit=False):
if from_kernel is None or from_kernel.lower() == 'sos':
# autmatically get all variables with names start with 'sos'
default_items = [x for x in env.sos_dict.keys() if x.startswith(
'sos') and x not in self.original_keys]
items = default_items if not items else items + default_items
for item in items:
if item not in env.sos_dict:
self.warn(f'Variable {item} does not exist')
return
if not items:
return
if self.kernel in self.supported_languages:
lan = self.supported_languages[self.kernel]
kinfo = self.subkernels.find(self.kernel)
try:
lan(self, kinfo.kernel).get_vars(items)
except Exception as e:
self.warn(f'Failed to get variable: {e}\n')
return
elif self.kernel == 'SoS':
self.warn(
'Magic %get without option --kernel can only be executed by subkernels')
return
else:
if explicit:
self.warn(
f'Magic %get failed because the language module for {self.kernel} is not properly installed. Please install it according to language specific instructions on the Running SoS section of the SoS homepage and restart Jupyter server.')
return
elif self.kernel.lower() == 'sos':
# if another kernel is specified and the current kernel is sos
# we get from subkernel
try:
self.switch_kernel(from_kernel)
self.put_vars_to(items)
except Exception as e:
self.warn(
f'Failed to get {", ".join(items)} from {from_kernel}: {e}')
finally:
self.switch_kernel('SoS')
else:
# if another kernel is specified, we should try to let that kernel pass
# the variables to this one directly
try:
my_kernel = self.kernel
self.switch_kernel(from_kernel)
# put stuff to sos or my_kernel directly
self.put_vars_to(
items, to_kernel=my_kernel, explicit=explicit)
except Exception as e:
self.warn(
f'Failed to get {", ".join(items)} from {from_kernel}: {e}')
finally:
# then switch back
self.switch_kernel(my_kernel)
def put_vars_to(self, items, to_kernel=None, explicit=False):
if self.kernel.lower() == 'sos':
if to_kernel is None:
self.warn(
'Magic %put without option --kernel can only be executed by subkernels')
return
# if another kernel is specified and the current kernel is sos
try:
# switch to kernel and bring in items
self.switch_kernel(to_kernel, in_vars=items)
except Exception as e:
self.warn(
f'Failed to put {", ".join(items)} to {to_kernel}: {e}')
finally:
# switch back
self.switch_kernel('SoS')
else:
# put to sos kernel or another kernel
#
# items can be None if unspecified
if not items:
# we do not simply return because we need to return default variables (with name startswith sos
items = []
if self.kernel not in self.supported_languages:
if explicit:
self.warn(
f'Subkernel {self.kernel} does not support magic %put.')
return
#
lan = self.supported_languages[self.kernel]
kinfo = self.subkernels.find(self.kernel)
# pass language name to to_kernel
try:
if to_kernel:
objects = lan(self, kinfo.kernel).put_vars(
items, to_kernel=self.subkernels.find(to_kernel).language)
else:
objects = lan(self, kinfo.kernel).put_vars(
items, to_kernel='SoS')
except Exception as e:
# if somethign goes wrong in the subkernel does not matter
if self._debug_mode:
self.warn(
f'Failed to call put_var({items}) from {kinfo.kernel}')
objects = {}
if isinstance(objects, dict):
# returns a SOS dictionary
try:
env.sos_dict.update(objects)
except Exception as e:
self.warn(
f'Failed to put {", ".join(items)} to {to_kernel}: {e}')
return
if to_kernel is None:
return
# if another kernel is specified and the current kernel is not sos
# we need to first put to sos then to another kernel
try:
my_kernel = self.kernel
# switch to the destination kernel and bring in vars
self.switch_kernel(to_kernel, in_vars=items)
except Exception as e:
self.warn(
f'Failed to put {", ".join(items)} to {to_kernel}: {e}')
finally:
# switch back to the original kernel
self.switch_kernel(my_kernel)
elif isinstance(objects, str):
# an statement that will be executed in the destination kernel
if to_kernel is None or to_kernel == 'SoS':
# evaluate in SoS, this should not happen or rarely happen
# because the subkernel should return a dictionary for SoS kernel
try:
exec(objects, env.sos_dict._dict)
except Exception as e:
self.warn(
f'Failed to put variables {items} to SoS kernel: {e}')
return
try:
my_kernel = self.kernel
# switch to the destination kernel
self.switch_kernel(to_kernel)
# execute the statement to pass variables directly to destination kernel
self.run_cell(objects, True, False)
except Exception as e:
self.warn(
f'Failed to put {", ".join(items)} to {to_kernel}: {e}')
finally:
# switch back to the original kernel
self.switch_kernel(my_kernel)
else:
self.warn(
f'Unrecognized return value of type {object.__class__.__name__} for action %put')
return
def do_is_complete(self, code):
'''check if new line is in order'''
code = code.strip()
if not code:
return {'status': 'complete', 'indent': ''}
if any(code.startswith(x) for x in ['%dict', '%paste', '%edit', '%cd', '!']):
return {'status': 'complete', 'indent': ''}
if code.endswith(':') or code.endswith(','):
return {'status': 'incomplete', 'indent': ' '}
lines = code.split('\n')
if lines[-1].startswith(' ') or lines[-1].startswith('\t'):
# if it is a new line, complte
empty = [idx for idx, x in enumerate(
lines[-1]) if x not in (' ', '\t')][0]
return {'status': 'incomplete', 'indent': lines[-1][:empty]}
#
if SOS_SECTION_HEADER.match(lines[-1]):
return {'status': 'incomplete', 'indent': ''}
#
return {'status': 'incomplete', 'indent': ''}
def do_inspect(self, code, cursor_pos, detail_level=0):
if self.editor_kernel.lower() == 'sos':
line, offset = line_at_cursor(code, cursor_pos)
name = token_at_cursor(code, cursor_pos)
data = self.inspector.inspect(name, line, cursor_pos - offset)
return {
'status': 'ok',
'metadata': {},
'found': True if data else False,
'data': data
}
else:
cell_kernel = self.subkernels.find(self.editor_kernel)
try:
_, KC = self.kernels[cell_kernel.name]
except Exception as e:
if self._debug_mode:
log_to_file(f'Failed to get subkernels {cell_kernel.name}')
KC = self.KC
try:
KC.inspect(code, cursor_pos)
while KC.shell_channel.msg_ready():
msg = KC.shell_channel.get_msg()
if msg['header']['msg_type'] == 'inspect_reply':
return msg['content']
else:
# other messages, do not know what is going on but
# we should not wait forever and cause a deadloop here
if self._debug_mode:
log_to_file(
f"complete_reply not obtained: {msg['header']['msg_type']} {msg['content']} returned instead")
break
except Exception as e:
if self._debug_mode:
log_to_file(f'Completion fail with exception: {e}')
def do_complete(self, code, cursor_pos):
if self.editor_kernel.lower() == 'sos':
text, matches = self.completer.complete_text(code, cursor_pos)
return {'matches': matches,
'cursor_end': cursor_pos,
'cursor_start': cursor_pos - len(text),
'metadata': {},
'status': 'ok'}
else:
cell_kernel = self.subkernels.find(self.editor_kernel)
try:
_, KC = self.kernels[cell_kernel.name]
except Exception as e:
if self._debug_mode:
log_to_file(f'Failed to get subkernels {cell_kernel.name}')
KC = self.KC
try:
KC.complete(code, cursor_pos)
while KC.shell_channel.msg_ready():
msg = KC.shell_channel.get_msg()
if msg['header']['msg_type'] == 'complete_reply':
return msg['content']
else:
# other messages, do not know what is going on but
# we should not wait forever and cause a deadloop here
if self._debug_mode:
log_to_file(
f"complete_reply not obtained: {msg['header']['msg_type']} {msg['content']} returned instead")
break
except Exception as e:
if self._debug_mode:
log_to_file(f'Completion fail with exception: {e}')
def warn(self, message):
message = str(message).rstrip() + '\n'
if message.strip():
self.send_response(self.iopub_socket, 'stream',
{'name': 'stderr', 'text': message})
def run_cell(self, code, silent, store_history, on_error=None):
#
if not self.KM.is_alive():
self.send_response(self.iopub_socket, 'stream',
dict(name='stdout', text='Restarting kernel "{}"\n'.format(self.kernel)))
self.KM.restart_kernel(now=False)
self.KC = self.KM.client()
# flush stale replies, which could have been ignored, due to missed heartbeats
while self.KC.shell_channel.msg_ready():
self.KC.shell_channel.get_msg()
# executing code in another kernel
self.KC.execute(code, silent=silent, store_history=store_history)
# first thing is wait for any side effects (output, stdin, etc.)
_execution_state = "busy"
while _execution_state != 'idle':
# display intermediate print statements, etc.
while self.KC.stdin_channel.msg_ready():
sub_msg = self.KC.stdin_channel.get_msg()
if self._debug_mode:
log_to_file(f"MSG TYPE {sub_msg['header']['msg_type']}")
log_to_file(f'CONTENT {sub_msg}')
if sub_msg['header']['msg_type'] != 'input_request':
self.send_response(
self.stdin_socket, sub_msg['header']['msg_type'], sub_msg["content"])
else:
content = sub_msg["content"]
if content['password']:
res = self.getpass(prompt=content['prompt'])
else:
res = self.raw_input(prompt=content['prompt'])
self.KC.input(res)
while self.KC.iopub_channel.msg_ready():
sub_msg = self.KC.iopub_channel.get_msg()
msg_type = sub_msg['header']['msg_type']
if self._debug_mode:
log_to_file(f'MSG TYPE {msg_type}')
log_to_file(f'CONTENT {sub_msg["content"]}')
if msg_type == 'status':
_execution_state = sub_msg["content"]["execution_state"]
else:
if msg_type in ('execute_input', 'execute_result'):
# override execution count with the master count,
# not sure if it is needed
sub_msg['content']['execution_count'] = self._execution_count
#
if msg_type in ['display_data', 'stream', 'execute_result', 'update_display_data']:
if self._meta['capture_result'] is not None:
self._meta['capture_result'].append((msg_type, sub_msg['content']))
if silent:
continue
self.send_response(
self.iopub_socket, msg_type, sub_msg['content'])
#
# now get the real result
reply = self.KC.get_shell_msg(timeout=10)
reply['content']['execution_count'] = self._execution_count
return reply['content']
def switch_kernel(self, kernel, in_vars=None, ret_vars=None, kernel_name=None, language=None, color=None):
# switching to a non-sos kernel
if not kernel:
kinfo = self.subkernels.find(self.kernel)
self.send_response(self.iopub_socket, 'stream',
dict(name='stdout', text='''\
Active subkernels: {}
Available subkernels:\n{}'''.format(', '.join(self.kernels.keys()),
'\n'.join([' {} ({})'.format(x.name, x.kernel) for x in self.subkernels.kernel_list()]))))
return
kinfo = self.subkernels.find(kernel, kernel_name, language, color)
if kinfo.name == self.kernel:
# the same kernel, do nothing?
# but the senario can be
#
# kernel in SoS
# cell R
# %use R -i n
#
# SoS get:
#
# %softwidth --default-kernel R --cell-kernel R
# %use R -i n
#
# Now, SoS -> R without variable passing
# R -> R should honor -i n
# or, when we randomly jump cells, we should more aggreessively return
# automatically shared variables to sos (done by the following) (#375)
if kinfo.name != 'SoS':
self.switch_kernel('SoS')
self.switch_kernel(kinfo.name, in_vars, ret_vars)
elif kinfo.name == 'SoS':
self.put_vars_to(self._kernel_return_vars)
self._kernel_return_vars = []
self.kernel = 'SoS'
elif self.kernel != 'SoS':
# not to 'sos' (kernel != 'sos'), see if they are the same kernel under
self.switch_kernel('SoS', in_vars, ret_vars)
self.switch_kernel(kinfo.name, in_vars, ret_vars)
else:
if self._debug_mode:
self.warn(f'Switch from {self.kernel} to {kinfo.name}')
# case when self.kernel == 'sos', kernel != 'sos'
# to a subkernel
new_kernel = False
if kinfo.name not in self.kernels:
# start a new kernel
try:
self.kernels[kinfo.name] = manager.start_new_kernel(
startup_timeout=60, kernel_name=kinfo.kernel, cwd=os.getcwd())
new_kernel = True
except Exception as e:
# try toget error message
import tempfile
with tempfile.TemporaryFile() as ferr:
try:
# this should fail
manager.start_new_kernel(
startup_timeout=60, kernel_name=kinfo.kernel, cwd=os.getcwd(),
stdout=subprocess.DEVNULL, stderr=ferr)
except:
ferr.seek(0)
self.warn(
f'Failed to start kernel "{kernel}". {e}\nError Message:\n{ferr.read().decode()}')
return
self.KM, self.KC = self.kernels[kinfo.name]
self._kernel_return_vars = [] if ret_vars is None else ret_vars
self.kernel = kinfo.name
if new_kernel and self.kernel in self.supported_languages:
init_stmts = self.supported_languages[self.kernel](
self, kinfo.kernel).init_statements
if init_stmts:
self.run_cell(init_stmts, True, False)
# passing
self.get_vars_from(in_vars)
def shutdown_kernel(self, kernel, restart=False):
kernel = self.subkernels.find(kernel).name
if kernel == 'SoS':
# cannot restart myself ...
self.warn('Cannot restart SoS kernel from within SoS.')
elif kernel:
if kernel not in self.kernels:
self.send_response(self.iopub_socket, 'stream',
dict(name='stdout', text=f'{kernel} is not running'))
elif restart:
orig_kernel = self.kernel
try:
# shutdown
self.shutdown_kernel(kernel)
# switch back to kernel (start a new one)
self.switch_kernel(kernel)
finally:
# finally switch to starting kernel
self.switch_kernel(orig_kernel)
else:
# shutdown
if self.kernel == kernel:
self.switch_kernel('SoS')
try:
self.kernels[kernel][0].shutdown_kernel(restart=False)
except Exception as e:
self.warn(f'Failed to shutdown kernel {kernel}: {e}\n')
finally:
self.kernels.pop(kernel)
else:
self.send_response(self.iopub_socket, 'stream',
dict(name='stdout', text='Specify one of the kernels to shutdown: SoS{}\n'
.format(''.join(f', {x}' for x in self.kernels))))
def get_response(self, statement, msg_types, name=None):
# get response of statement of specific msg types.
responses = []
self.KC.execute(statement, silent=False, store_history=False)
# first thing is wait for any side effects (output, stdin, etc.)
_execution_state = "busy"
while _execution_state != 'idle':
# display intermediate print statements, etc.
while self.KC.iopub_channel.msg_ready():
sub_msg = self.KC.iopub_channel.get_msg()
msg_type = sub_msg['header']['msg_type']
if self._debug_mode:
log_to_file(f'Received {msg_type} {sub_msg["content"]}')
if msg_type == 'status':
_execution_state = sub_msg["content"]["execution_state"]
else:
if msg_type in msg_types and (name is None or sub_msg['content'].get('name', None) in name):
if self._debug_mode:
log_to_file(
f'Capture response: {msg_type}: {sub_msg["content"]}')
responses.append([msg_type, sub_msg['content']])
else:
if self._debug_mode:
log_to_file(
f'Non-response: {msg_type}: {sub_msg["content"]}')
self.send_response(
self.iopub_socket, msg_type, sub_msg['content'])
if not responses and self._debug_mode:
self.warn(
f'Failed to get a response from message type {msg_types} for the execution of {statement}')
return responses
def run_sos_code(self, code, silent):
code = dedent(code)
with self.redirect_sos_io():
try:
# record input and output
fopt = ''
res = runfile(
code=code, raw_args=self.options + fopt, kernel=self)
self.send_result(res, silent)
except PendingTasks as e:
# send cell index and task IDs to frontend
self.send_frontend_msg(
'tasks-pending', [self._meta['cell_id'], e.tasks])
return
except Exception as e:
sys.stderr.flush()
sys.stdout.flush()
# self.send_response(self.iopub_socket, 'display_data',
# {
# 'metadata': {},
# 'data': { 'text/html': HTML('<hr color="black" width="60%">').data}
# })
raise
except KeyboardInterrupt:
self.warn('Keyboard Interrupt\n')
return {'status': 'abort', 'execution_count': self._execution_count}
finally:
sys.stderr.flush()
sys.stdout.flush()
#
if not silent and (not hasattr(self, 'preview_output') or self.preview_output):
# Send standard output
# if os.path.isfile('.sos/report.md'):
# with open('.sos/report.md') as sr:
# sos_report = sr.read()
# with open(self.report_file, 'a') as summary_report:
# summary_report.write(sos_report + '\n\n')
# if sos_report.strip():
# self.send_response(self.iopub_socket, 'display_data',
# {
# 'metadata': {},
# 'data': {'text/markdown': sos_report}
# })
#
if 'step_input' in env.sos_dict:
input_files = env.sos_dict['step_input']
if input_files is None:
input_files = []
else:
input_files = [
x for x in input_files if isinstance(x, str)]
else:
input_files = []
if 'step_output' in env.sos_dict:
output_files = env.sos_dict['step_output']
if output_files is None:
output_files = []
else:
output_files = [
x for x in output_files if isinstance(x, str)]
else:
output_files = []
# use a table to list input and/or output file if exist
if output_files:
title = f'%preview {" ".join(output_files)}'
if not self._meta['use_panel']:
self.send_response(self.iopub_socket, 'display_data',
{
'metadata': {},
'data': {'text/html': HTML(f'<div class="sos_hint">{title}</div>').data}
})
if hasattr(self, 'in_sandbox') and self.in_sandbox:
# if in sand box, do not link output to their files because these
# files will be removed soon.
self.send_frontend_msg('display_data',
{
'metadata': {},
'data': {'text/html':
HTML(
'''<div class="sos_hint"> input: {}<br>output: {}\n</div>'''.format(
', '.join(
x for x in input_files),
', '.join(x for x in output_files))).data
}
}, title=title, page='Preview')
else:
self.send_frontend_msg('display_data',
{
'metadata': {},
'data': {'text/html':
HTML(
'''<div class="sos_hint"> input: {}<br>output: {}\n</div>'''.format(
', '.join(
f'<a target="_blank" href="{x}">{x}</a>' for x
in input_files),
', '.join(
f'<a target="_blank" href="{x}">{x}</a>' for x
in output_files))).data
}
}, title=title, page='Preview')
for filename in output_files:
self.preview_file(filename, style=None, title=title)
def render_result(self, res):
if not self._meta['render_result']:
return res
if not isinstance(res, str):
self.warn(
f'Cannot render result {short_repr(res)} in type {res.__class__.__name__} as {self._meta["render_result"]}.')
else:
# import the object from IPython.display
mod = __import__('IPython.display')
if not hasattr(mod.display, self._meta['render_result']):
self.warn(
f'Unrecognized render format {self._meta["render_result"]}')
else:
func = getattr(mod.display, self._meta['render_result'])
res = func(res)
return res
def send_result(self, res, silent=False):
# this is Ok, send result back
if not silent and res is not None:
format_dict, md_dict = self.format_obj(self.render_result(res))
self.send_response(self.iopub_socket, 'execute_result',
{'execution_count': self._execution_count, 'data': format_dict,
'metadata': md_dict})
def init_metadata(self, metadata):
super(SoS_Kernel, self).init_metadata(metadata)
if 'sos' in metadata['content']:
meta = metadata['content']['sos']
else:
# if there is no sos metadata, the execution should be started from a test suite
# just ignore
self._meta = {
'workflow': '',
'workflow_mode': False,
'render_result': False,
'capture_result': None,
'cell_id': 0,
'notebook_name': '',
'notebook_path': '',
'use_panel': False,
'default_kernel': self.kernel,
'cell_kernel': self.kernel,
'resume_execution': False,
'toc': '',
'batch_mode': False
}
return self._meta
if self._debug_mode:
self.warn(f"Meta info: {meta}")
self._meta = {
'workflow': meta['workflow'] if 'workflow' in meta else '',
'workflow_mode': False,
'render_result': False,
'capture_result': None,
'cell_id': meta['cell_id'] if 'cell_id' in meta else "",
'notebook_path': meta['path'] if 'path' in meta else 'Untitled.ipynb',
'use_panel': True if 'use_panel' in meta and meta['use_panel'] is True else False,
'default_kernel': meta['default_kernel'] if 'default_kernel' in meta else 'SoS',
'cell_kernel': meta['cell_kernel'] if 'cell_kernel' in meta else (meta['default_kernel'] if 'default_kernel' in meta else 'SoS'),
'resume_execution': True if 'rerun' in meta and meta['rerun'] else False,
'toc': meta.get('toc', ''),
'batch_mode': meta.get('batch_mode', False)
}
# remove path and extension
self._meta['notebook_name'] = os.path.basename(
self._meta['notebook_path']).rsplit('.', 1)[0]
if 'list_kernel' in meta and meta['list_kernel']:
# https://github.com/jupyter/help/issues/153#issuecomment-289026056
#
# when the frontend is refreshed, cached comm would be lost and
# communication would be discontinued. However, a kernel-list
# request would be sent by the new-connection so we reset the
# frontend_comm to re-connect to the frontend.
self.comm_manager.register_target('sos_comm', self.sos_comm)
return self._meta
def do_execute(self, code, silent, store_history=True, user_expressions=None,
allow_stdin=True):
if self._debug_mode:
self.warn(code)
self._forward_input(allow_stdin)
# switch to global default kernel
try:
if self.subkernels.find(self._meta['default_kernel']).name != self.subkernels.find(self.kernel).name:
self.switch_kernel(self._meta['default_kernel'])
# evaluate user expression
except Exception as e:
self.warn(
f'Failed to switch to language {self._meta["default_kernel"]}: {e}\n')
return {'status': 'error',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self._execution_count,
}
# switch to cell kernel
try:
if self.subkernels.find(self._meta['cell_kernel']).name != self.subkernels.find(self.kernel).name:
self.switch_kernel(self._meta['cell_kernel'])
except Exception as e:
self.warn(
f'Failed to switch to language {self._meta["cell_kernel"]}: {e}\n')
return {'status': 'error',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self._execution_count,
}
# execute with cell kernel
try:
ret = self._do_execute(code=code, silent=silent, store_history=store_history,
user_expressions=user_expressions, allow_stdin=allow_stdin)
except Exception as e:
self.warn(e)
return {'status': 'error',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self._execution_count,
}
finally:
self._meta['resume_execution'] = False
if ret is None:
ret = {'status': 'ok',
'payload': [], 'user_expressions': {},
'execution_count': self._execution_count}
out = {}
for key, expr in (user_expressions or {}).items():
try:
# value = self.shell._format_user_obj(SoS_eval(expr))
value = SoS_eval(expr)
value = self.shell._format_user_obj(value)
except Exception as e:
self.warn(f'Failed to evaluate user expression {expr}: {e}')
value = self.shell._user_obj_error()
out[key] = value
ret['user_expressions'] = out
#
if not silent and store_history:
self._real_execution_count += 1
self._execution_count = self._real_execution_count
# make sure post_executed is triggered after the completion of all cell content
self.shell.user_ns.update(env.sos_dict._dict)
# trigger post processing of object and display matplotlib figures
self.shell.events.trigger('post_execute')
# tell the frontend the kernel for the "next" cell
return ret
def _do_execute(self, code, silent, store_history=True, user_expressions=None,
allow_stdin=True):
# handles windows/unix newline
code = '\n'.join(code.splitlines()) + '\n'
if self.original_keys is None:
self._reset_dict()
if code == 'import os\n_pid = os.getpid()':
# this is a special probing command from vim-ipython. Let us handle it specially
# so that vim-python can get the pid.
return
for magic in self.magics.values():
if magic.match(code):
return magic.apply(code, silent, store_history, user_expressions, allow_stdin)
if self.kernel != 'SoS':
# handle string interpolation before sending to the underlying kernel
if code:
self.last_executed_code = code
if self._meta['cell_id']:
self.send_frontend_msg(
'cell-kernel', [self._meta['cell_id'], self.kernel])
self._meta['cell_id'] = ""
if code is None:
return
try:
# We remove leading new line in case that users have a SoS
# magic and a cell magic, separated by newline.
# issue #58 and #33
return self.run_cell(code.lstrip(), silent, store_history)
except KeyboardInterrupt:
self.warn('Keyboard Interrupt\n')
self.KM.interrupt_kernel()
return {'status': 'abort', 'execution_count': self._execution_count}
else:
if code:
self.last_executed_code = code
# if the cell starts with comment, and newline, remove it
lines = code.splitlines()
empties = [x.startswith('#') or not x.strip() for x in lines]
self.send_frontend_msg(
'cell-kernel', [self._meta['cell_id'], 'SoS'])
if all(empties):
return {'status': 'ok', 'payload': [], 'user_expressions': {}, 'execution_count': self._execution_count}
else:
idx = empties.index(False)
if idx != 0:
# not start from empty, but might have magic etc
return self._do_execute('\n'.join(lines[idx:]) + '\n', silent, store_history, user_expressions, allow_stdin)
# if there is no more empty, magic etc, enter workflow mode
# run sos
try:
self.run_sos_code(code, silent)
if self._meta['cell_id']:
self._meta['cell_id'] = ""
return {'status': 'ok', 'payload': [], 'user_expressions': {}, 'execution_count': self._execution_count}
except Exception as e:
self.warn(str(e))
return {'status': 'error',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self._execution_count,
}
finally:
# even if something goes wrong, we clear output so that the "preview"
# will not be viewed by a later step.
env.sos_dict.pop('input', None)
env.sos_dict.pop('output', None)
def do_shutdown(self, restart):
#
for name, (km, _) in self.kernels.items():
try:
km.shutdown_kernel(restart=restart)
except Exception as e:
self.warn(f'Failed to shutdown kernel {name}: {e}')
def __del__(self):
# upon releasing of sos kernel, kill all subkernels. This I thought would be
# called by the Jupyter cleanup code or the OS (because subkernels are subprocesses)
# but they are not.
self.do_shutdown(False)
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=SoS_Kernel)
```
#### File: src/sos_notebook/magics.py
```python
import argparse
import fnmatch
import os
import pydoc
import re
import shlex
import subprocess
import sys
from collections import Sized, OrderedDict
from io import StringIO
from types import ModuleType
import pandas as pd
from IPython.core.display import HTML
from IPython.core.error import UsageError
from IPython.lib.clipboard import (ClipboardEmpty, osx_clipboard_get,
tkinter_clipboard_get)
from jupyter_client import find_connection_file
from sos.eval import SoS_eval, interpolate
from sos.syntax import SOS_SECTION_HEADER
from sos.utils import env, pretty_size, short_repr, pexpect_run
from sos._version import __version__
class SoS_Magic(object):
name = 'BaseMagic'
def __init__(self, kernel):
self.sos_kernel = kernel
self.pattern = re.compile(f'%{self.name}(\s|$)')
def _interpolate_text(self, text, quiet=False):
# interpolate command
try:
new_text = interpolate(text, local_dict=env.sos_dict._dict)
if new_text != text and not quiet:
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'display_data',
{
'metadata': {},
'data': {
'text/html': HTML(
f'<div class="sos_hint">> {new_text.strip() + "<br>"}</div>').data}
})
return new_text
except Exception as e:
self.sos_kernel.warn(
f'Failed to interpolate {short_repr(text)}: {e}\n')
return None
def get_magic_and_code(self, code, warn_remaining=False):
if code.startswith('%') or code.startswith('!'):
lines = re.split(r'(?<!\\)\n', code, 1)
# remove lines joint by \
lines[0] = lines[0].replace('\\\n', '')
else:
lines = code.split('\n', 1)
pieces = self._interpolate_text(
lines[0], quiet=False).strip().split(None, 1)
if len(pieces) == 2:
command_line = pieces[1]
else:
command_line = ''
remaining_code = lines[1] if len(lines) > 1 else ''
if warn_remaining and remaining_code.strip():
self.sos_kernel.warn('Statement {} ignored'.format(
short_repr(remaining_code)))
return command_line, remaining_code
def match(self, code):
return self.pattern.match(code)
def run_shell_command(self, cmd):
# interpolate command
if not cmd:
return
try:
with self.sos_kernel.redirect_sos_io():
pexpect_run(cmd, shell=True,
win_width=40 if self.sos_kernel._meta['cell_id'] == "" else 80)
except Exception as e:
self.sos_kernel.warn(e)
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
raise RuntimeError(f'Unimplemented magic {self.name}')
def _parse_error(self, msg):
self.sos_kernel.warn(msg)
class Command_Magic(SoS_Magic):
name = '!'
def match(self, code):
return code.startswith('!')
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
self.run_shell_command(code.split(' ')[0][1:] + ' ' + options)
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Capture_Magic(SoS_Magic):
name = 'capture'
def __init__(self, kernel):
super(Capture_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%capture',
description='''Capture output (stdout) or output file from a subkernel
as variable in SoS''')
parser.add_argument('msg_type', nargs='?', default='stdout', choices=['stdout', 'stderr', 'text', 'markdown',
'html', 'raw'],
help='''Message type to capture, default to standard output. In terms of Jupyter message
types, "stdout" refers to "stream" message with "stdout" type, "stderr" refers to "stream"
message with "stderr" type, "text", "markdown" and "html" refers to "display_data" message
with "text/plain", "text/markdown" and "text/html" type respectively. If "raw" is specified,
all returned messages will be returned in a list format.''')
parser.add_argument('--as', dest='as_type', default='text', nargs='?', choices=('text', 'json', 'csv', 'tsv'),
help='''How to interpret the captured text. This only applicable to stdout, stderr and
text message type where the text from cell output will be collected. If this
option is given, SoS will try to parse the text as json, csv (comma separated text),
tsv (tab separated text), and store text (from text), Pandas DataFrame
(from csv or tsv), dict or other types (from json) to the variable.''')
grp = parser.add_mutually_exclusive_group(required=False)
grp.add_argument('-t', '--to', dest='__to__', metavar='VAR',
help='''Name of variable to which the captured content will be saved. If no varialbe is
specified, the return value will be saved to variable "__captured" and be displayed
at the side panel. ''')
grp.add_argument('-a', '--append', dest='__append__', metavar='VAR',
help='''Name of variable to which the captured content will be appended.
This option is equivalent to --to if VAR does not exist. If VAR exists
and is of the same type of new content (str or dict or DataFrame), the
new content will be appended to VAR if VAR is of str (str concatenation),
dict (dict update), or DataFrame (DataFrame.append) types. If VAR is of
list type, the new content will be appended to the end of the list.''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
parser = self.get_parser()
try:
args = parser.parse_args(shlex.split(options))
except SystemExit:
return
try:
self.sos_kernel._meta['capture_result'] = []
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
finally:
# parse capture_result
content = ''
if args.msg_type == 'stdout':
for msg in self.sos_kernel._meta['capture_result']:
if msg[0] == 'stream' and msg[1]['name'] == 'stdout':
content += msg[1]['text']
elif args.msg_type == 'stderr':
for msg in self.sos_kernel._meta['capture_result']:
if msg[0] == 'stream' and msg[1]['name'] == 'stderr':
content += msg[1]['text']
elif args.msg_type == 'text':
for msg in self.sos_kernel._meta['capture_result']:
if msg[0] == 'display_data' and 'data' in msg[1] and 'text/plain' in msg[1]['data']:
content += msg[1]['data']['text/plain']
elif args.msg_type == 'markdown':
for msg in self.sos_kernel._meta['capture_result']:
if msg[0] == 'display_data' and 'data' in msg[1] and 'text/markdown' in msg[1]['data']:
content += msg[1]['data']['text/markdown']
elif args.msg_type == 'html':
for msg in self.sos_kernel._meta['capture_result']:
if msg[0] == 'display_data' and 'data' in msg[1] and 'text/html' in msg[1]['data']:
content += msg[1]['data']['text/html']
else:
args.as_type = 'raw'
content = self.sos_kernel._meta['capture_result']
if self.sos_kernel._debug_mode:
self.sos_kernel.warn(
f'Captured {self.sos_kernel._meta["capture_result"][:40]}')
if not args.as_type or args.as_type == 'text':
if not isinstance(content, str):
self.sos_kernel.warn(
'Option --as is only available for message types stdout, stderr, and text.')
elif args.as_type == 'json':
import json
try:
if isinstance(content, str):
content = json.loads(content)
else:
self.sos_kernel.warn(
'Option --as is only available for message types stdout, stderr, and text.')
except Exception as e:
self.sos_kernel.warn(
f'Failed to capture output in JSON format, text returned: {e}')
elif args.as_type == 'csv':
try:
if isinstance(content, str):
with StringIO(content) as ifile:
content = pd.read_csv(ifile)
else:
self.sos_kernel.warn(
'Option --as is only available for message types stdout, stderr, and text.')
except Exception as e:
self.sos_kernel.warn(
f'Failed to capture output in {args.as_type} format, text returned: {e}')
elif args.as_type == 'tsv':
try:
if isinstance(content, str):
with StringIO(content) as ifile:
content = pd.read_csv(ifile, sep='\t')
else:
self.sos_kernel.warn(
'Option --as is only available for message types stdout, stderr, and text.')
except Exception as e:
self.sos_kernel.warn(
f'Failed to capture output in {args.as_type} format, text returned: {e}')
#
if args.__to__ and not args.__to__.isidentifier():
self.sos_kernel.warn(f'Invalid variable name {args.__to__}')
self.sos_kernel._meta['capture_result'] = None
return
if args.__append__ and not args.__append__.isidentifier():
self.sos_kernel.warn(f'Invalid variable name {args.__append__}')
self.sos_kernel._meta['capture_result'] = None
return
if args.__to__:
env.sos_dict.set(args.__to__, content)
elif args.__append__:
if args.__append__ not in env.sos_dict:
env.sos_dict.set(args.__append__, content)
elif isinstance(env.sos_dict[args.__append__], str):
if isinstance(content, str):
env.sos_dict[args.__append__] += content
else:
self.sos_kernel.warn(
f'Cannot append new content of type {type(content).__name__} to {args.__append__} of type {type(env.sos_dict[args.__append__]).__name__}')
elif isinstance(env.sos_dict[args.__append__], dict):
if isinstance(content, dict):
env.sos_dict[args.__append__].update(content)
else:
self.sos_kernel.warn(
f'Cannot append new content of type {type(content).__name__} to {args.__append__} of type {type(env.sos_dict[args.__append__]).__name__}')
elif isinstance(env.sos_dict[args.__append__], pd.DataFrame):
if isinstance(content, pd.DataFrame):
env.sos_dict.set(
args.__append__, env.sos_dict[args.__append__].append(content))
else:
self.sos_kernel.warn(
f'Cannot append new content of type {type(content).__name__} to {args.__append__} of type {type(env.sos_dict[args.__append__]).__name__}')
elif isinstance(env.sos_dict[args.__append__], list):
env.sos_dict[args.__append__].append(content)
else:
self.sos_kernel.warn(
f'Cannot append new content of type {type(content).__name__} to {args.__append__} of type {type(env.sos_dict[args.__append__]).__name__}')
else:
env.sos_dict.set('__captured', content)
import pprint
self.sos_kernel.send_frontend_msg('display_data',
{'metadata': {},
'data': {'text/plain': pprint.pformat(content)}
}, title="__captured", append=False, page='Preview')
self.sos_kernel._meta['capture_result'] = None
class Cd_Magic(SoS_Magic):
name = 'cd'
def __init__(self, kernel):
super(Cd_Magic, self).__init__(kernel)
def handle_magic_cd(self, option):
if not option:
return
to_dir = option.strip()
try:
os.chdir(os.path.expanduser(to_dir))
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'stream',
{'name': 'stdout', 'text': os.getcwd()})
except Exception as e:
self.sos_kernel.warn(
f'Failed to change dir to {os.path.expanduser(to_dir)}: {e}')
return
#
cur_kernel = self.sos_kernel.kernel
try:
for kernel in self.sos_kernel.kernels.keys():
if kernel not in self.sos_kernel.supported_languages:
self.sos_kernel.warn(
f'Current directory of kernel {kernel} is not changed: unsupported language')
continue
lan = self.sos_kernel.supported_languages[kernel]
if hasattr(lan, 'cd_command'):
try:
self.sos_kernel.switch_kernel(kernel)
cmd = interpolate(lan.cd_command, {'dir': to_dir})
self.sos_kernel.run_cell(
cmd, True, False, on_error=f'Failed to execute {cmd} in {kernel}')
except Exception as e:
self.sos_kernel.warn(
f'Current directory of kernel {kernel} is not changed: {e}')
else:
self.sos_kernel.warn(
f'Current directory of kernel {kernel} is not changed: cd_command not defined')
finally:
self.sos_kernel.switch_kernel(cur_kernel)
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
self.handle_magic_cd(options)
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Clear_Magic(SoS_Magic):
name = 'clear'
def __init__(self, kernel):
super(Clear_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%clear',
description='''Clear the output of the current cell, or the current
active cell if executed in the sidepanel.''')
parser.add_argument('-a', '--all', action='store_true',
help='''Clear all output or selected status or class of the current notebook.''')
grp = parser.add_mutually_exclusive_group()
grp.add_argument('-s', '--status', nargs='+',
help='''Clear tasks that match specifie status (e.g. completed).''')
grp.add_argument('-c', '--class', nargs='+', dest='elem_class',
help='''Clear all HTML elements with specified classes (e.g. sos_hint)''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
parser = self.get_parser()
try:
args = parser.parse_args(options.split())
except SystemExit:
return
# self.sos_kernel._meta['cell_id'] could be reset by _do_execute
cell_id = self.sos_kernel._meta['cell_id']
try:
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
finally:
if self.sos_kernel._meta.get('batch_mode', False):
return
if args.status:
status_style = [self.status_class[x] for x in args.status]
else:
status_style = None
self.sos_kernel.send_frontend_msg(
'clear-output', [cell_id, args.all, status_style, args.elem_class])
class ConnectInfo_Magic(SoS_Magic):
name = 'connectinfo'
def __init__(self, kernel):
super(ConnectInfo_Magic, self).__init__(kernel)
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
cfile = find_connection_file()
with open(cfile) as conn:
conn_info = conn.read()
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'stream',
{'name': 'stdout', 'text': 'Connection file: {}\n{}'.format(cfile, conn_info)})
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Debug_Magic(SoS_Magic):
name = 'debug'
def __init__(self, kernel):
super(Debug_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%debug',
description='''Turn on or off debug information''')
parser.add_argument('status', choices=['on', 'off'],
help='''Turn on or off debugging''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
parser = self.get_parser()
try:
args = parser.parse_args(options.split())
except SystemExit:
return
self.sos_kernel._debug_mode = args.status == 'on'
if self.sos_kernel._debug_mode:
self.sos_kernel.warn(remaining_code)
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Dict_Magic(SoS_Magic):
name = 'dict'
def __init__(self, kernel):
super(Dict_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%dict',
description='Inspect or reset SoS dictionary')
parser.add_argument('vars', nargs='*')
parser.add_argument('-k', '--keys', action='store_true',
help='Return only keys')
parser.add_argument('-r', '--reset', action='store_true',
help='Rest SoS dictionary (clear all user variables)')
parser.add_argument('-a', '--all', action='store_true',
help='Return all variales, including system functions and variables')
parser.add_argument('-d', '--del', nargs='+', metavar='VAR', dest='__del__',
help='Remove specified variables from SoS dictionary')
parser.error = self._parse_error
return parser
def handle_magic_dict(self, line):
'Magic that displays content of the dictionary'
# do not return __builtins__ beacuse it is too long...
parser = self.get_parser()
try:
args = parser.parse_args(shlex.split(line))
except SystemExit:
return
for x in args.vars:
if not x in env.sos_dict:
self.sos_kernel.warn(
'Unrecognized sosdict option or variable name {}'.format(x))
return
if args.reset:
self.sos_kernel._reset_dict()
return
if args.__del__:
for x in args.__del__:
if x in env.sos_dict:
env.sos_dict.pop(x)
return
if args.keys:
if args.all:
self.sos_kernel.send_result(env.sos_dict._dict.keys())
elif args.vars:
self.sos_kernel.send_result(set(args.vars))
else:
self.sos_kernel.send_result({x for x in env.sos_dict._dict.keys(
) if not x.startswith('__')} - self.sos_kernel.original_keys)
else:
if args.all:
self.sos_kernel.send_result(env.sos_dict._dict)
elif args.vars:
self.sos_kernel.send_result(
{x: y for x, y in env.sos_dict._dict.items() if x in args.vars})
else:
self.sos_kernel.send_result({x: y for x, y in env.sos_dict._dict.items() if
x not in self.sos_kernel.original_keys and not x.startswith('__')})
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
# %dict should be the last magic
options, remaining_code = self.get_magic_and_code(code, False)
self.handle_magic_dict(options)
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Expand_Magic(SoS_Magic):
name = 'expand'
def __init__(self, kernel):
super(Expand_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%expand',
description='''Expand the script in the current cell with default ({}) or
specified sigil.''')
parser.add_argument('sigil', nargs='?', help='''Sigil to be used to interpolated the
texts. It can be quoted, or be specified as two options.''')
parser.add_argument('right_sigil', nargs='?', help='''Right sigil if the sigil is
specified as two pieces.''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
lines = code.splitlines()
options = lines[0]
parser = self.get_parser()
try:
args = parser.parse_args(options.split()[1:])
except SystemExit:
return
if self.sos_kernel.kernel.lower() == 'sos':
self.sos_kernel.warn(
'Use of %expand magic in SoS cells is deprecated.')
if args.sigil in ('None', None):
sigil = None
if args.right_sigil is not None:
sigil = f'{args.sigil} {args.right_sigil}'
# now we need to expand the text, but separate the SoS magics first
lines = lines[1:]
start_line: int = 0
for idx, line in enumerate(lines):
if line.strip() and not any(line.startswith(f'%{x} ') for x in SoS_Magics.names) and not line.startswith('!'):
start_line = idx
break
text = '\n'.join(lines[start_line:])
if sigil is not None and sigil != '{ }':
from sos.parser import replace_sigil
text = replace_sigil(text, sigil)
try:
interpolated = interpolate(text, local_dict=env.sos_dict._dict)
remaining_code = '\n'.join(
lines[:start_line] + [interpolated]) + '\n'
# self.sos_kernel.options will be set to inflence the execution of remaing_code
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
except Exception as e:
self.sos_kernel.warn(e)
return
class Get_Magic(SoS_Magic):
name = 'get'
def __init__(self, kernel):
super(Get_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%get',
description='''Get specified variables from another kernel, which is
by default the SoS kernel.''')
parser.add_argument('--from', dest='__from__',
help='''Name of kernel from which the variables will be obtained.
Default to the SoS kernel.''')
parser.add_argument('vars', nargs='*',
help='''Names of SoS variables''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
try:
parser = self.get_parser()
try:
args = parser.parse_args(options.split())
except SystemExit:
return
except Exception as e:
self.sos_kernel.warn(f'Invalid option "{options}": {e}\n')
return {'status': 'error',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self.sos_kernel._execution_count,
}
self.sos_kernel.get_vars_from(args.vars, args.__from__, explicit=True)
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Matplotlib_Magic(SoS_Magic):
name = 'matplotlib'
def __init__(self, kernel):
super(Matplotlib_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%matplotlib',
description='''Set matplotlib parser type''')
parser.add_argument('gui', choices=['agg', 'gtk', 'gtk3', 'inline', 'ipympl', 'nbagg',
'notebook', 'osx', 'pdf', 'ps', 'qt', 'qt4', 'qt5', 'svg', 'tk', 'widget', 'wx'],
nargs='?',
help='''Name of the matplotlib backend to use (‘agg’, ‘gtk’, ‘gtk3’,''')
parser.add_argument('-l', '--list', action='store_true',
help='''Show available matplotlib backends''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
parser = self.get_parser()
try:
args = parser.parse_args(shlex.split(options))
except SystemExit:
return
if args.list:
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'stream',
{'name': 'stdout', 'text': 'Available matplotlib backends: {}'.format(
['agg', 'gtk', 'gtk3', 'inline', 'ipympl', 'nbagg', 'notebook',
'osx', 'pdf', 'ps', 'qt', 'qt4', 'qt5', 'svg', 'tk', 'widget', 'wx'])})
return
try:
_, backend = self.sos_kernel.shell.enable_matplotlib(args.gui)
if not args.gui or args.gui == 'auto':
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'stream',
{'name': 'stdout',
'text': f'Using matplotlib backend {backend}'})
except Exception as e:
self.sos_kernel.warn(
'Failed to set matplotlib backnd {}: {}'.format(options, e))
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Paste_Magic(SoS_Magic):
name = 'paste'
def __init__(self, kernel):
super(Paste_Magic, self).__init__(kernel)
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
if self.sos_kernel._meta.get('batch_mode', False):
return
options, remaining_code = self.get_magic_and_code(code, True)
try:
old_options = self.sos_kernel.options
self.sos_kernel.options = options + ' ' + self.sos_kernel.options
try:
if sys.platform == 'darwin':
try:
code = osx_clipboard_get()
except Exception:
code = tkinter_clipboard_get()
else:
code = tkinter_clipboard_get()
except ClipboardEmpty:
raise UsageError("The clipboard appears to be empty")
except Exception as e:
env.logger.warn(
f'Failed to get text from the clipboard: {e}')
return
#
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'stream',
{'name': 'stdout', 'text': code.strip() + '\n## -- End pasted text --\n'})
return self.sos_kernel._do_execute(code, silent, store_history, user_expressions, allow_stdin)
finally:
self.sos_kernel.options = old_options
class Preview_Magic(SoS_Magic):
name = 'preview'
def __init__(self, kernel):
super(Preview_Magic, self).__init__(kernel)
self.previewers = None
def preview_var(self, item, style=None):
if item in env.sos_dict:
obj = env.sos_dict[item]
else:
obj = SoS_eval(item)
# get the basic information of object
txt = type(obj).__name__
# we could potentially check the shape of data frame and matrix
# but then we will need to import the numpy and pandas libraries
if hasattr(obj, 'shape') and getattr(obj, 'shape') is not None:
txt += f' of shape {getattr(obj, "shape")}'
elif isinstance(obj, Sized):
txt += f' of length {obj.__len__()}'
if isinstance(obj, ModuleType):
return txt, ({'text/plain': pydoc.render_doc(obj, title='SoS Documentation: %s')}, {})
elif hasattr(obj, 'to_html') and getattr(obj, 'to_html') is not None:
try:
from sos.visualize import Visualizer
result = Visualizer(self, style).preview(obj)
if isinstance(result, (list, tuple)) and len(result) == 2:
return txt, result
elif isinstance(result, dict):
return txt, (result, {})
elif result is None:
return txt, None
else:
raise ValueError(
f'Unrecognized return value from visualizer: {short_repr(result)}.')
except Exception as e:
self.sos_kernel.warn(f'Failed to preview variable: {e}')
return txt, self.sos_kernel.format_obj(obj)
else:
return txt, self.sos_kernel.format_obj(obj)
def preview_file(self, filename, style=None, title=''):
if not os.path.isfile(filename):
self.sos_kernel.warn('\n> ' + filename + ' does not exist')
return
self.sos_kernel.send_frontend_msg('display_data',
{'metadata': {},
'data': {
'text/plain': f'\n> {filename} ({pretty_size(os.path.getsize(filename))}):',
'text/html': HTML(
f'<div class="sos_hint">> {filename} ({pretty_size(os.path.getsize(filename))}):</div>').data,
}
}, title=title, append=True, page='Preview')
previewer_func = None
# lazy import of previewers
if self.previewers is None:
from sos.preview import get_previewers
self.previewers = get_previewers()
for x, y, _ in self.previewers:
if isinstance(x, str):
if fnmatch.fnmatch(os.path.basename(filename), x):
# we load entrypoint only before it is used. This is to avoid
# loading previewers that require additional external modules
# we can cache the loaded function but there does not seem to be
# a strong performance need for this.
previewer_func = y.load()
break
else:
# it should be a function
try:
if x(filename):
try:
previewer_func = y.load()
except Exception as e:
self.sos_kernel.send_frontend_msg('stream',
dict(name='stderr',
text=f'Failed to load previewer {y}: {e}'),
title=title, append=True, page='Preview')
continue
break
except Exception as e:
self.sos_kernel.send_frontend_msg('stream', {
'name': 'stderr',
'text': str(e)},
title=title, append=True, page='Preview')
continue
#
# if no previewer can be found
if previewer_func is None:
return
try:
result = previewer_func(filename, self.sos_kernel, style)
if not result:
return
if isinstance(result, str):
if result.startswith('HINT: '):
result = result.splitlines()
hint_line = result[0][6:].strip()
result = '\n'.join(result[1:])
self.sos_kernel.send_frontend_msg('display_data',
{
'metadata': {},
'data': {'text/html': HTML(
f'<div class="sos_hint">{hint_line}</div>').data}
}, title=title, append=True, page='Preview')
if result:
self.sos_kernel.send_frontend_msg('stream',
{'name': 'stdout',
'text': result},
title=title, append=True, page='Preview')
elif isinstance(result, dict):
self.sos_kernel.send_frontend_msg('display_data',
{'data': result, 'metadata': {}},
title=title, append=True, page='Preview')
elif isinstance(result, [list, tuple]) and len(result) == 2:
self.sos_kernel.send_frontend_msg('display_data',
{'data': result[0],
'metadata': result[1]},
title=title, append=True, page='Preview')
else:
self.sos_kernel.send_frontend_msg('stream',
dict(
name='stderr', text=f'Unrecognized preview content: {result}'),
title=title, append=True, page='Preview')
except Exception as e:
if self.sos_kernel._debug_mode:
self.sos_kernel.send_frontend_msg('stream',
dict(
name='stderr', text=f'Failed to preview {filename}: {e}'),
title=title, append=True, page='Preview')
def get_parser(self):
parser = argparse.ArgumentParser(prog='%preview',
description='''Preview files, sos variables, or expressions in the
side panel, or notebook if side panel is not opened, unless
options --panel or --notebook is specified.''')
parser.add_argument('items', nargs='*',
help='''Filename, variable name, or expression. Wildcard characters
such as '*' and '?' are allowed for filenames.''')
parser.add_argument('-k', '--kernel',
help='''kernel in which variables will be previewed. By default
the variable will be previewed in the current kernel of the cell.''')
parser.add_argument('-w', '--workflow', action='store_true',
help='''Preview notebook workflow''')
parser.add_argument('-o', '--keep-output', action='store_true',
help='''Do not clear the output of the side panel.''')
# this option is currently hidden
parser.add_argument('-s', '--style', choices=['table', 'scatterplot', 'png'],
help='''Option for preview file or variable, which by default is "table"
for Pandas DataFrame. The %%preview magic also accepts arbitrary additional
keyword arguments, which would be interpreted by individual style. Passing
'-h' with '--style' would display the usage information of particular
style.''')
parser.add_argument('-r', '--host', dest='host', metavar='HOST',
help='''Preview files on specified remote host, which should
be one of the hosts defined in sos configuration files.''')
parser.add_argument('--off', action='store_true',
help='''Turn off file preview''')
loc = parser.add_mutually_exclusive_group()
loc.add_argument('-p', '--panel', action='store_true',
help='''Preview in side panel even if the panel is currently closed''')
loc.add_argument('-n', '--notebook', action='store_true',
help='''Preview in the main notebook.''')
parser.add_argument('-c', '--config', help='''A configuration file with host
definitions, in case the definitions are not defined in global or local
sos config.yml files.''')
parser.error = self._parse_error
return parser
def handle_magic_preview(self, items, kernel=None, style=None, title=''):
handled = [False for x in items]
for idx, item in enumerate(items):
try:
# quoted
if (item.startswith('"') and item.endswith('"')) or \
(item.startswith("'") and item.endswith("'")):
try:
item = eval(item)
except Exception:
pass
item = os.path.expanduser(item)
if os.path.isfile(item):
self.preview_file(item, style, title=title)
handled[idx] = True
continue
if os.path.isdir(item):
handled[idx] = True
_, dirs, files = os.walk(item).__next__()
self.sos_kernel.send_frontend_msg('display_data',
{'metadata': {},
'data': {'text/plain': '>>> ' + item + ':\n',
'text/html': HTML(
f'<div class="sos_hint">> {item}: directory<br>{len(files)} file{"s" if len(files)>1 else ""}<br>{len(dirs)} subdirector{"y" if len(dirs)<=1 else "ies"}</div>').data
}
}, title=title, append=False, page='Preview')
continue
else:
import glob
files = glob.glob(item)
if files:
for pfile in files:
self.preview_file(pfile, style, title=title)
handled[idx] = True
continue
except Exception as e:
self.sos_kernel.warn(f'\n> Failed to preview file {item}: {e}')
continue
# non-sos kernel
use_sos = kernel in ('sos', 'SoS') or (
kernel is None and self.sos_kernel.kernel == 'SoS')
orig_kernel = self.sos_kernel.kernel
if kernel is not None and self.sos_kernel.kernel != self.sos_kernel.subkernels.find(kernel).name:
self.sos_kernel.switch_kernel(kernel)
if self.sos_kernel._meta['use_panel']:
self.sos_kernel.send_frontend_msg(
'preview-kernel', self.sos_kernel.kernel, page='Preview')
try:
for idx, item in enumerate(items):
try:
# quoted
if (item.startswith('"') and item.endswith('"')) or \
(item.startswith("'") and item.endswith("'")):
try:
item = eval(item)
except Exception:
pass
if use_sos:
obj_desc, preview = self.preview_var(item, style)
if preview is None:
continue
else:
format_dict, md_dict = preview
self.sos_kernel.send_frontend_msg('display_data',
{'metadata': {},
'data': {'text/plain': '>>> ' + item + ':\n',
'text/html': HTML(
f'<div class="sos_hint">> {item}: {obj_desc}</div>').data
}
}, title=title, append=True, page='Preview')
self.sos_kernel.send_frontend_msg('display_data',
{'execution_count': self.sos_kernel._execution_count, 'data': format_dict,
'metadata': md_dict}, title=title, append=True, page='Preview')
else:
# evaluate
responses = self.sos_kernel.get_response(
item, ['stream', 'display_data', 'execution_result', 'error'])
if not self.sos_kernel._debug_mode:
# if the variable or expression is invalid, do not do anything
responses = [
x for x in responses if x[0] != 'error']
if responses:
self.sos_kernel.send_frontend_msg('display_data',
{'metadata': {},
'data': {'text/plain': '>>> ' + item + ':\n',
'text/html': HTML(
f'<div class="sos_hint">> {item}:</div>').data
}
}, title=title, append=True, page='Preview')
for response in responses:
# self.sos_kernel.warn(f'{response[0]} {response[1]}' )
self.sos_kernel.send_frontend_msg(
response[0], response[1], title=title, append=True, page='Preview')
else:
raise ValueError(
f'Cannot preview expresison {item}')
except Exception as e:
if not handled[idx]:
self.sos_kernel.send_frontend_msg('stream',
dict(name='stderr',
text='> Failed to preview file or expression {}{}'.format(
item, f': {e}' if self.sos_kernel._debug_mode else '')),
title=title, append=True, page='Preview')
finally:
self.sos_kernel.switch_kernel(orig_kernel)
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
parser = self.get_parser()
options = shlex.split(options, posix=False)
help_option = []
if ('-s' in options or '--style' in options) and '-h' in options:
# defer -h to subparser
options.remove('-h')
help_option = ['-h']
try:
args, style_options = parser.parse_known_args(options)
except SystemExit:
return
#
style_options.extend(help_option)
style = {'style': args.style, 'options': style_options}
#
if args.off:
self.preview_output = False
else:
self.preview_output = True
#
if args.panel:
self.sos_kernel._meta['use_panel'] = True
elif args.notebook:
self.sos_kernel._meta['use_panel'] = False
# else, use default _use_panel
try:
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
finally:
# preview workflow
if args.workflow:
import random
ta_id = 'preview_wf_{}'.format(random.randint(1, 1000000))
content = {
'data': {
'text/plain': self.sos_kernel._meta['workflow'],
'text/html': HTML(
f'<textarea id="{ta_id}">{self.sos_kernel._meta["workflow"]}</textarea>').data
},
'metadata': {}
}
self.sos_kernel.send_frontend_msg('display_data', content,
title='%preview --workflow', page='Workflow')
self.sos_kernel.send_frontend_msg('highlight-workflow', ta_id)
if not args.off and args.items:
if args.host:
title = f'%preview {" ".join(args.items)} -r {args.host}'
else:
title = f'%preview {" ".join(args.items)}'
# reset preview panel
if not self.sos_kernel._meta['use_panel']:
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'display_data',
{
'metadata': {},
'data': {'text/html': HTML(f'<div class="sos_hint">{title}</div>').data}
})
else:
# clear the page
self.sos_kernel.send_frontend_msg(
'display_data', {}, page='Preview')
if args.host is None:
self.handle_magic_preview(
args.items, args.kernel, style,
title=title)
elif args.workflow:
self.sos_kernel.warn(
'Invalid option --kernel with -r (--host)')
elif args.kernel:
self.sos_kernel.warn(
'Invalid option --kernel with -r (--host)')
else:
if args.config:
from sos.utils import load_config_files
load_config_files(args.config)
try:
rargs = ['sos', 'preview', '--html'] + options
rargs = [x for x in rargs if x not in (
'-n', '--notebook', '-p', '--panel')]
if self.sos_kernel._debug_mode:
self.sos_kernel.warn(f'Running "{" ".join(rargs)}"')
for msg in eval(subprocess.check_output(rargs)):
self.sos_kernel.send_frontend_msg(
msg[0], msg[1], title=title, append=True, page='Preview')
except Exception as e:
self.sos_kernel.warn('Failed to preview {} on remote host {}{}'.format(
args.items, args.host, f': {e}' if self.sos_kernel._debug_mode else ''))
class Pull_Magic(SoS_Magic):
name = 'pull'
def __init__(self, kernel):
super(Pull_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser('pull',
description='''Pull files or directories from remote host to local host''')
parser.add_argument('items', nargs='+', help='''Files or directories to be
retrieved from remote host. The files should be relative to local file
system. The files to retrieve are determined by "path_map"
determined by "paths" definitions of local and remote hosts.''')
parser.add_argument('-f', '--from', dest='host',
help='''Remote host to which the files will be sent, which should
be one of the hosts defined in sos configuration files.''')
parser.add_argument('-c', '--config', help='''A configuration file with host
definitions, in case the definitions are not defined in global or local
sos config.yml files.''')
parser.add_argument('-v', '--verbosity', type=int, choices=range(5), default=2,
help='''Output error (0), warning (1), info (2), debug (3) and trace (4)
information to standard output (default to 2).''')
parser.error = self._parse_error
return parser
def handle_magic_pull(self, args):
from sos.hosts import Host
if args.config:
from sos.utils import load_config_files
load_config_files(args.config)
env.sos_dict['CONFIG']
try:
host = Host(args.host)
#
received = host.receive_from_host(args.items)
#
msg = '{} item{} received from {}:<br>{}'.format(len(received),
' is' if len(
received) <= 1 else 's are', args.host,
'<br>'.join([f'{x} <= {received[x]}' for x in
sorted(received.keys())]))
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'display_data',
{
'metadata': {},
'data': {'text/html': HTML(f'<div class="sos_hint">{msg}</div>').data}
})
except Exception as e:
self.sos_kernel.warn(
f'Failed to retrieve {", ".join(args.items)}: {e}')
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
try:
parser = self.get_parser()
try:
args = parser.parse_args(options.split())
except SystemExit:
return
except Exception as e:
self.sos_kernel.warn(f'Invalid option "{options}": {e}\n')
return {'status': 'error',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self.sos_kernel._execution_count,
}
self.handle_magic_pull(args)
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Push_Magic(SoS_Magic):
name = 'push'
def __init__(self, kernel):
super(Push_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser('push',
description='''Push local files or directory to a remote host''')
parser.add_argument('items', nargs='+', help='''Files or directories to be sent
to remote host. The location of remote files are determined by "path_map"
determined by "paths" definitions of local and remote hosts.''')
parser.add_argument('-t', '--to', dest='host',
help='''Remote host to which the files will be sent. SoS will list all
configured queues if no such key is defined''')
parser.add_argument('-c', '--config', help='''A configuration file with host
definitions, in case the definitions are not defined in global or local
sos config.yml files.''')
parser.add_argument('-v', '--verbosity', type=int, choices=range(5), default=2,
help='''Output error (0), warning (1), info (2), debug (3) and trace (4)
information to standard output (default to 2).''')
parser.error = self._parse_error
return parser
def handle_magic_push(self, args):
from sos.hosts import Host
if args.config:
from sos.utils import load_config_files
load_config_files(args.config)
env.sos_dict['CONFIG']
try:
host = Host(args.host)
#
sent = host.send_to_host(args.items)
#
msg = '{} item{} sent to {}:<br>{}'.format(len(sent),
' is' if len(
sent) <= 1 else 's are', args.host,
'<br>'.join([f'{x} => {sent[x]}' for x in sorted(sent.keys())]))
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'display_data',
{
'metadata': {},
'data': {'text/html': HTML(f'<div class="sos_hint">{msg}</div>').data}
})
except Exception as e:
self.sos_kernel.warn(f'Failed to send {", ".join(args.items)}: {e}')
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
try:
parser = self.get_parser()
try:
args = parser.parse_args(options.split())
except SystemExit:
return
except Exception as e:
self.sos_kernel.warn(f'Invalid option "{options}": {e}\n')
return {'status': 'error',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self.sos_kernel._execution_count,
}
self.handle_magic_push(args)
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Put_Magic(SoS_Magic):
name = 'put'
def __init__(self, kernel):
super(Put_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%put',
description='''Put specified variables in the subkernel to another
kernel, which is by default the SoS kernel.''')
parser.add_argument('--to', dest='__to__',
help='''Name of kernel from which the variables will be obtained.
Default to the SoS kernel.''')
parser.add_argument('vars', nargs='*',
help='''Names of SoS variables''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
try:
parser = self.get_parser()
try:
args = parser.parse_args(options.split())
except SystemExit:
return
except Exception as e:
self.sos_kernel.warn(f'Invalid option "{options}": {e}\n')
return {'status': 'error',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self.sos_kernel._execution_count,
}
self.sos_kernel.put_vars_to(args.vars, args.__to__, explicit=True)
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Render_Magic(SoS_Magic):
name = 'render'
def __init__(self, kernel):
super(Render_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%render',
description='''Treat the output of a SoS cell as another format, default to markdown.''')
parser.add_argument('msg_type', default='stdout', choices=['stdout', 'text'], nargs='?',
help='''Message type to capture, default to standard output. In terms of Jupyter message
types, "stdout" refers to "stream" message with "stdout" type, and "text" refers to
"display_data" message with "text/plain" type.''')
parser.add_argument('--as', dest='as_type', default='Markdown', nargs='?',
help='''Format to render output of cell, default to Markdown, but can be any
format that is supported by the IPython.display module such as HTML, Math, JSON,
JavaScript and SVG.''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
parser = self.get_parser()
try:
args = parser.parse_args(shlex.split(options))
except SystemExit:
return
try:
self.sos_kernel._meta['capture_result'] = []
self.sos_kernel._meta['render_result'] = args.as_type
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
finally:
content = ''
if args.msg_type == 'stdout':
for msg in self.sos_kernel._meta['capture_result']:
if msg[0] == 'stream' and msg[1]['name'] == 'stdout':
content += msg[1]['text']
elif args.msg_type == 'text':
for msg in self.sos_kernel._meta['capture_result']:
if msg[0] == 'display_data' and 'data' in msg[1] and 'text/plain' in msg[1]['data']:
content += msg[1]['data']['text/plain']
try:
if content:
format_dict, md_dict = self.sos_kernel.format_obj(
self.sos_kernel.render_result(content))
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'display_data',
{'metadata': md_dict,
'data': format_dict
})
finally:
self.sos_kernel._meta['capture_result'] = None
self.sos_kernel._meta['render_result'] = False
class Rerun_Magic(SoS_Magic):
name = 'rerun'
def __init__(self, kernel):
super(Rerun_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%rerun',
description='''Re-execute the last executed code, most likely with
different command line options''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, True)
old_options = self.sos_kernel.options
self.sos_kernel.options = options + ' ' + self.sos_kernel.options
try:
self.sos_kernel._meta['workflow_mode'] = True
old_dict = env.sos_dict
self.sos_kernel._reset_dict()
if not self.sos_kernel.last_executed_code:
self.sos_kernel.warn('No saved script')
self.sos_kernel.last_executed_code = ''
return self.sos_kernel._do_execute(self.sos_kernel.last_executed_code, silent, store_history, user_expressions, allow_stdin)
except Exception as e:
self.sos_kernel.warn(f'Failed to execute workflow: {e}')
raise
finally:
old_dict.quick_update(env.sos_dict._dict)
env.sos_dict = old_dict
self.sos_kernel._meta['workflow_mode'] = False
self.sos_kernel.options = old_options
class Revisions_Magic(SoS_Magic):
name = 'revisions'
def __init__(self, kernel):
super(Revisions_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%revision',
description='''Revision history of the document, parsed from the log
message of the notebook if it is kept in a git repository. Additional parameters to "git log" command
(e.g. -n 5 --since --after) could be specified to limit the revisions to display.''')
parser.add_argument('-s', '--source', nargs='?', default='',
help='''Source URL to to create links for revisions.
SoS automatically parse source URL of the origin and provides variables "repo" for complete origin
URL without trailing ".git" (e.g. https://github.com/vatlab/sos-notebook), "path" for complete
path name (e.g. src/document/doc.ipynb), "filename" for only the name of the "path", and "revision"
for revisions. Because sos interpolates command line by default, variables in URL template should be
included with double braceses (e.g. --source {{repo}}/blob/{{revision}}/{{path}})). If this option is
provided without value and the document is hosted on github, a default template will be provided.''')
parser.add_argument('-l', '--links', nargs='+', help='''Name and URL or additional links for related
files (e.g. --links report URL_to_repo ) with URL interpolated as option --source.''')
parser.error = self._parse_error
return parser
def handle_magic_revisions(self, args, unknown_args):
filename = self.sos_kernel._meta['notebook_name'] + '.ipynb'
path = self.sos_kernel._meta['notebook_path']
revisions = subprocess.check_output(['git', 'log'] + unknown_args + ['--date=short', '--pretty=%H!%cN!%cd!%s',
'--', filename]).decode().splitlines()
if not revisions:
return
# args.source is None for --source without option
if args.source != '' or args.links:
# need to determine origin etc for interpolation
try:
origin = subprocess.check_output(
['git', 'ls-remote', '--get-url', 'origin']).decode().strip()
repo = origin[:-4] if origin.endswith('.git') else origin
except Exception as e:
repo = ''
if self.sos_kernel._debug_mode:
self.sos_kernel.warn(f'Failed to get repo URL: {e}')
if args.source is None:
if 'github.com' in repo:
args.source = '{repo}/blob/{revision}/{path}'
if self.sos_kernel._debug_mode:
self.sos_kernel.warn(
f"source is set to {args.source} with repo={repo}")
else:
args.source = ''
self.sos_kernel.warn(
f'A default source URL is unavailable for repository {repo}')
text = '''
<table class="revision_table">
<tr>
<th>Revision</th>
<th>Author</th>
<th>Date</th>
<th>Message</th>
<tr>
'''
for line in revisions:
fields = line.split('!', 3)
revision = fields[0]
fields[0] = f'<span class="revision_id">{fields[0][:7]}<span>'
if args.source != '':
# source URL
URL = interpolate(args.source, {'revision': revision, 'repo': repo,
'filename': filename, 'path': path})
fields[0] = f'<a target="_blank" href="{URL}">{fields[0]}</a>'
links = []
if args.links:
for i in range(len(args.links) // 2):
name = args.links[2 * i]
if len(args.links) == 2 * i + 1:
continue
URL = interpolate(args.links[2 * i + 1],
{'revision': revision, 'repo': repo, 'filename': filename, 'path': path})
links.append(f'<a target="_blank" href="{URL}">{name}</a>')
if links:
fields[0] += ' (' + ', '.join(links) + ')'
text += '<tr>' + \
'\n'.join(f'<td>{x}</td>' for x in fields) + '</tr>'
text += '</table>'
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'display_data',
{
'metadata': {},
'data': {'text/html': HTML(text).data}
})
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, True)
parser = self.get_parser()
try:
args, unknown_args = parser.parse_known_args(
shlex.split(options))
except SystemExit:
return
try:
self.handle_magic_revisions(args, unknown_args)
except Exception as e:
self.sos_kernel.warn(f'Failed to retrieve revisions of notebook: {e}')
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Run_Magic(SoS_Magic):
name = 'run'
def __init__(self, kernel):
super(Run_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%run',
description='''Execute the current cell with specified command line
arguments. Arguments set by magic %set will be appended at the
end of command line''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
# there can be multiple %run magic, but there should not be any other magics
run_code = code
run_options = []
while True:
if self.pattern.match(run_code):
options, run_code = self.get_magic_and_code(
run_code, False)
run_options.append(options)
else:
break
# if there are more magics after %run, they will be ignored so a warning
# is needed.
if run_code.lstrip().startswith('%') and not any(run_code.lstrip().startswith(x) for x in ('%include', '%from')):
self.sos_kernel.warn(
f'Magic {run_code.split()[0]} after magic %run will be ignored.')
if not any(SOS_SECTION_HEADER.match(line) for line in run_code.splitlines()):
run_code = '[default]\n' + run_code
# now we need to run the code multiple times with each option
for options in run_options:
old_options = self.sos_kernel.options
self.sos_kernel.options = options + ' ' + self.sos_kernel.options
try:
# %run is executed in its own namespace
old_dict = env.sos_dict
self.sos_kernel._reset_dict()
self.sos_kernel._meta['workflow_mode'] = True
if self.sos_kernel._debug_mode:
self.sos_kernel.warn(f'Executing\n{run_code}')
ret = self.sos_kernel._do_execute(run_code, silent, store_history, user_expressions,
allow_stdin)
except Exception as e:
self.sos_kernel.warn(f'Failed to execute workflow: {e}')
raise
finally:
old_dict.quick_update(env.sos_dict._dict)
env.sos_dict = old_dict
self.sos_kernel._meta['workflow_mode'] = False
self.sos_kernel.options = old_options
return ret
class Sandbox_Magic(SoS_Magic):
name = 'sandbox'
def __init__(self, kernel):
super(Sandbox_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%sandbox',
description='''Execute content of a cell in a temporary directory
with fresh dictionary (by default).''')
parser.add_argument('-d', '--dir',
help='''Execute workflow in specified directory. The directory
will be created if does not exist, and will not be removed
after the completion. ''')
parser.add_argument('-k', '--keep-dict', action='store_true',
help='''Keep current sos dictionary.''')
parser.add_argument('-e', '--expect-error', action='store_true',
help='''If set, expect error from the excution and report
success if an error occurs.''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
import tempfile
import shutil
options, remaining_code = self.get_magic_and_code(code, False)
parser = self.get_parser()
try:
args = parser.parse_args(shlex.split(options))
except SystemExit:
return
self.in_sandbox = True
try:
old_dir = os.getcwd()
if args.dir:
args.dir = os.path.expanduser(args.dir)
if not os.path.isdir(args.dir):
os.makedirs(args.dir)
env.exec_dir = os.path.abspath(args.dir)
os.chdir(args.dir)
else:
new_dir = tempfile.mkdtemp()
env.exec_dir = os.path.abspath(new_dir)
os.chdir(new_dir)
if not args.keep_dict:
old_dict = env.sos_dict
self.sos_kernel._reset_dict()
ret = self.sos_kernel._do_execute(
remaining_code, silent, store_history, user_expressions, allow_stdin)
if args.expect_error and ret['status'] == 'error':
# self.sos_kernel.warn('\nSandbox execution failed.')
return {'status': 'ok',
'payload': [], 'user_expressions': {},
'execution_count': self.sos_kernel._execution_count}
else:
return ret
finally:
if not args.keep_dict:
env.sos_dict = old_dict
os.chdir(old_dir)
if not args.dir:
shutil.rmtree(new_dir)
self.in_sandbox = False
# env.exec_dir = old_dir
class Save_Magic(SoS_Magic):
name = 'save'
def __init__(self, kernel):
super(Save_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%save',
description='''Save the content of the cell (after the magic itself) to specified file''')
parser.add_argument('filename',
help='''Filename of saved report or script.''')
parser.add_argument('-f', '--force', action='store_true',
help='''If destination file already exists, overwrite it.''')
parser.add_argument('-a', '--append', action='store_true',
help='''If destination file already exists, append to it.''')
parser.add_argument('-x', '--set-executable', dest="setx", action='store_true',
help='''Set `executable` permission to saved script.''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
# if sos kernel ...
options, remaining_code = self.get_magic_and_code(code, False)
try:
parser = self.get_parser()
try:
args = parser.parse_args(shlex.split(options))
except SystemExit:
return
filename = os.path.expanduser(args.filename)
if os.path.isfile(filename) and not args.force:
raise ValueError(
f'Cannot overwrite existing output file {filename}')
with open(filename, 'a' if args.append else 'w') as script:
script.write(
'\n'.join(remaining_code.splitlines()).rstrip() + '\n')
if args.setx:
import stat
os.chmod(filename, os.stat(
filename).st_mode | stat.S_IEXEC)
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'display_data',
{'metadata': {},
'data': {
'text/plain': f'Cell content saved to {filename}\n',
'text/html': HTML(
f'<div class="sos_hint">Cell content saved to <a href="{filename}" target="_blank">{filename}</a></div>').data
}
})
return
except Exception as e:
self.sos_kernel.warn(f'Failed to save cell: {e}')
return {'status': 'error',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self.sos_kernel._execution_count,
}
class SessionInfo_Magic(SoS_Magic):
name = 'sessioninfo'
def __init__(self, kernel):
super(SessionInfo_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%sessioninfo',
description='''List the session info of all subkernels, and information
stored in variable sessioninfo''')
parser.error = self._parse_error
return parser
def handle_sessioninfo(self):
#
from sos.utils import loaded_modules
result = OrderedDict()
#
result['SoS'] = [('SoS Version', __version__)]
result['SoS'].extend(loaded_modules(env.sos_dict))
#
cur_kernel = self.sos_kernel.kernel
try:
for kernel in self.sos_kernel.kernels.keys():
kinfo = self.sos_kernel.subkernels.find(kernel)
self.sos_kernel.switch_kernel(kernel)
result[kernel] = [
('Kernel', kinfo.kernel),
('Language', kinfo.language)
]
if kernel not in self.sos_kernel.supported_languages:
continue
lan = self.sos_kernel.supported_languages[kernel]
if hasattr(lan, 'sessioninfo'):
try:
sinfo = lan(self.sos_kernel, kinfo.kernel).sessioninfo()
if isinstance(sinfo, str):
result[kernel].append([sinfo])
elif isinstance(sinfo, dict):
result[kernel].extend(list(sinfo.items()))
elif isinstance(sinfo, list):
result[kernel].extend(sinfo)
else:
self.sos_kernel.warn(f'Unrecognized session info: {sinfo}')
except Exception as e:
self.sos_kernel.warn(
f'Failed to obtain sessioninfo of kernel {kernel}: {e}')
finally:
self.sos_kernel.switch_kernel(cur_kernel)
#
if 'sessioninfo' in env.sos_dict:
result.update(env.sos_dict['sessioninfo'])
#
res = ''
for key, items in result.items():
res += f'<p class="session_section">{key}</p>\n'
res += '<table class="session_info">\n'
for item in items:
res += '<tr>\n'
if isinstance(item, str):
res += f'<td colspan="2"><pre>{item}</pre></td>\n'
elif len(item) == 1:
res += f'<td colspan="2"><pre>{item[0]}</pre></td>\n'
elif len(item) == 2:
res += f'<th>{item[0]}</th><td><pre>{item[1]}</pre></td>\n'
else:
self.sos_kernel.warn(
f'Invalid session info item of type {item.__class__.__name__}: {short_repr(item)}')
res += '</tr>\n'
res += '</table>\n'
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'display_data',
{'metadata': {},
'data': {'text/html': HTML(res).data}})
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
parser = self.get_parser()
try:
parser.parse_known_args(shlex.split(options))
except SystemExit:
return
self.handle_sessioninfo()
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Set_Magic(SoS_Magic):
name = 'set'
def __init__(self, kernel):
super(Set_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%set',
description='''Set persistent command line options for SoS runs.''')
parser.error = self._parse_error
return parser
def handle_magic_set(self, options):
if options.strip():
# self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'stream',
# {'name': 'stdout', 'text': 'sos options set to "{}"\n'.format(options)})
if not options.strip().startswith('-'):
self.sos_kernel.warn(
f'Magic %set cannot set positional argument, {options} provided.\n')
else:
self.sos_kernel.options = options.strip()
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'stream',
dict(name='stdout', text=f'Set sos options to "{self.sos_kernel.options}"\n'))
else:
if self.sos_kernel.options:
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'stream',
dict(name='stdout', text=f'Reset sos options from "{self.sos_kernel.options}" to ""\n'))
self.sos_kernel.options = ''
else:
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'stream',
{'name': 'stdout',
'text': 'Usage: set persistent sos command line options such as "-v 3" (debug output)\n'})
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
self.handle_magic_set(options)
# self.sos_kernel.options will be set to inflence the execution of remaing_code
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Shutdown_Magic(SoS_Magic):
name = 'shutdown'
def __init__(self, kernel):
super(Shutdown_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%shutdown',
description='''Shutdown or restart specified subkernel''')
parser.add_argument('kernel', nargs='?',
help='''Name of the kernel to be restarted, default to the
current running kernel.''')
parser.add_argument('-r', '--restart', action='store_true',
help='''Restart the kernel''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
parser = self.get_parser()
try:
args = parser.parse_args(shlex.split(options))
except SystemExit:
return
self.shutdown_kernel(
args.kernel if args.kernel else self.sos_kernel, args.restart)
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class SoSRun_Magic(SoS_Magic):
name = 'sosrun'
def __init__(self, kernel):
super(SoSRun_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%sosrun',
description='''Execute the entire notebook with steps consisting of SoS
cells (cells with SoS kernel) with section header, with specified command
line arguments. Arguments set by magic %set will be appended at the
end of command line''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
old_options = self.sos_kernel.options
self.sos_kernel.options = options + ' ' + self.sos_kernel.options
try:
# %run is executed in its own namespace
old_dict = env.sos_dict
self.sos_kernel._reset_dict()
self.sos_kernel._meta['workflow_mode'] = True
# self.sos_kernel.send_frontend_msg('preview-workflow', self.sos_kernel._meta['workflow'])
if not self.sos_kernel._meta['workflow']:
self.sos_kernel.warn(
'Nothing to execute (notebook workflow is empty).')
else:
self.sos_kernel._do_execute(self.sos_kernel._meta['workflow'], silent,
store_history, user_expressions, allow_stdin)
except Exception as e:
self.sos_kernel.warn(f'Failed to execute workflow: {e}')
raise
finally:
old_dict.quick_update(env.sos_dict._dict)
env.sos_dict = old_dict
self.sos_kernel._meta['workflow_mode'] = False
self.sos_kernel.options = old_options
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class SoSSave_Magic(SoS_Magic):
name = 'sossave'
def __init__(self, kernel):
super(SoSSave_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%sossave',
description='''Save the jupyter notebook as workflow (consisting of all sos
steps defined in cells starting with section header) or a HTML report to
specified file.''')
parser.add_argument('filename', nargs='?',
help='''Filename of saved report or script. Default to notebookname with file
extension determined by option --to.''')
parser.add_argument('-t', '--to', dest='__to__', choices=['sos', 'html'],
help='''Destination format, default to sos.''')
parser.add_argument('-c', '--commit', action='store_true',
help='''Commit the saved file to git directory using command
git commit FILE''')
parser.add_argument('-a', '--all', action='store_true',
help='''The --all option for sos convert script.ipynb script.sos, which
saves all cells and their metadata to a .sos file, that contains all input
information of the notebook but might not be executable in batch mode.''')
parser.add_argument('-m', '--message',
help='''Message for git commit. Default to "save FILENAME"''')
parser.add_argument('-p', '--push', action='store_true',
help='''Push the commit with command "git push"''')
parser.add_argument('-f', '--force', action='store_true',
help='''If destination file already exists, overwrite it.''')
parser.add_argument('-x', '--set-executable', dest="setx", action='store_true',
help='''Set `executable` permission to saved script.''')
parser.add_argument('--template', default='default-sos-template',
help='''Template to generate HTML output. The default template is a
template defined by configuration key default-sos-template, or
sos-report if such a key does not exist.''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
# get the saved filename
options, remaining_code = self.get_magic_and_code(code, False)
try:
parser = self.get_parser()
try:
args = parser.parse_args(shlex.split(options))
except SystemExit:
return
if args.filename:
filename = args.filename
if filename.lower().endswith('.html'):
if args.__to__ is None:
ftype = 'html'
elif args.__to__ != 'html':
self.sos_kernel.warn(
f'%sossave to an .html file in {args.__to__} format')
ftype = args.__to__
else:
ftype = 'sos'
else:
ftype = args.__to__ if args.__to__ else 'sos'
filename = self.sos_kernel._meta['notebook_name'] + '.' + ftype
filename = os.path.expanduser(filename)
if os.path.isfile(filename) and not args.force:
raise ValueError(
f'Cannot overwrite existing output file {filename}')
# self.sos_kernel.send_frontend_msg('preview-workflow', self.sos_kernel._meta['workflow'])
if ftype == 'sos':
if not args.all:
with open(filename, 'w') as script:
script.write(self.sos_kernel._meta['workflow'])
else:
# convert to sos report
from .converter import notebook_to_script
arg = argparse.Namespace()
arg.execute = False
arg.all = True
notebook_to_script(
self.sos_kernel._meta['notebook_name'] + '.ipynb', filename, args=arg, unknown_args=[])
if args.setx:
import stat
os.chmod(filename, os.stat(
filename).st_mode | stat.S_IEXEC)
else:
# convert to sos report
from .converter import notebook_to_html
arg = argparse.Namespace()
if args.template == 'default-sos-template':
from sos.utils import load_config_files
cfg = load_config_files()
if 'default-sos-template' in cfg:
arg.template = cfg['default-sos-template']
else:
arg.template = 'sos-report'
else:
arg.template = args.template
arg.view = False
arg.execute = False
notebook_to_html(self.sos_kernel._meta['notebook_name'] + '.ipynb',
filename, sargs=arg, unknown_args=[])
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'display_data',
{'metadata': {},
'data': {
'text/plain': f'Workflow saved to {filename}\n',
'text/html': HTML(
f'<div class="sos_hint">Workflow saved to <a href="{filename}" target="_blank">{filename}</a></div>').data
}
})
#
if args.commit:
self.run_shell_command({'git', 'commit', filename, '-m',
args.message if args.message else f'save {filename}'})
if args.push:
self.run_shell_command(['git', 'push'])
return
except Exception as e:
self.sos_kernel.warn(f'Failed to save workflow: {e}')
return {'status': 'error',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self.sos_kernel._execution_count,
}
class TaskInfo_Magic(SoS_Magic):
name = 'taskinfo'
def __init__(self, kernel):
super(TaskInfo_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%taskinfo',
description='''Get information on specified task. By default
sos would query against all running task queues but it would
start a task queue and query status if option -q is specified.
''')
parser.add_argument('task', help='ID of task')
parser.add_argument('-q', '--queue',
help='''Task queue on which the task is executed.''')
parser.add_argument('-c', '--config', help='''A configuration file with host
definitions, in case the definitions are not defined in global or local
sos config.yml files.''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
parser = self.get_parser()
try:
args = parser.parse_args(options.split())
except SystemExit:
return
if args.config:
from sos.utils import load_cfg_files
load_cfg_files(args.config)
self.sos_kernel.update_taskinfo(args.task, args.queue)
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Tasks_Magic(SoS_Magic):
name = 'tasks'
def __init__(self, kernel):
super(Tasks_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%tasks',
description='''Show a list of tasks from specified queue''')
parser.add_argument('tasks', nargs='*', help='ID of tasks')
parser.add_argument('-s', '--status', nargs='*',
help='''Display tasks of specified status. Default to all.''')
parser.add_argument('-q', '--queue',
help='''Task queue on which the tasks are retrived.''')
parser.add_argument('--age', help='''Limit to tasks that is created more than
(default) or within specified age. Value of this parameter can be in units
s (second), m (minute), h (hour), or d (day, default), with optional
prefix + for older (default) and - for younder than specified age.''')
parser.add_argument('-c', '--config', help='''A configuration file with host
definitions, in case the definitions are not defined in global or local
sos config.yml files.''')
parser.error = self._parse_error
return parser
def handle_tasks(self, tasks, queue='localhost', status=None, age=None):
from sos.hosts import Host
try:
host = Host(queue)
except Exception as e:
self.sos_kernel.warn('Invalid task queu {}: {}'.format(queue, e))
return
# get all tasks
for tid, tst, tdt in host._task_engine.monitor_tasks(tasks, status=status, age=age):
self.sos_kernel.notify_task_status(
['new-status', queue, tid, tst, tdt])
self.sos_kernel.send_frontend_msg('update-duration', {})
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
parser = self.get_parser()
try:
args = parser.parse_args(options.split())
except SystemExit:
return
if args.config:
from sos.utils import load_cfg_files
load_cfg_files(args.config)
self.handle_tasks(
args.tasks, args.queue if args.queue else 'localhost', args.status, args.age)
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
def header_to_toc(text, id):
'''Convert a bunch of ## header to TOC'''
toc = [f'<div class="toc" id="{id}">' if id else '<div class="toc">']
lines = [x for x in text.splitlines() if x.strip()]
if not lines:
return ''
top_level = min(x.split(' ')[0].count('#') for x in lines)
level = top_level - 1
for line in lines:
header, text = line.split(' ', 1)
# the header might have anchor link like <a id="videos"></a>
matched = re.match('.*(<a\s+id="(.*)">.*</a>).*', text)
anchor = ''
if matched:
text = text.replace(matched.group(1), '')
anchor = matched.group(2)
# remove image
matched = re.match('.*(<img .*>).*', text)
if matched:
text = text.replace(matched.group(1), '')
if not anchor:
anchor = re.sub('[^ a-zA-Z0-9]', '',
text).strip().replace(' ', '-')
# handle ` ` in header
text = re.sub('`(.*?)`', '<code>\\1</code>', text)
line_level = header.count('#')
if line_level > level:
# level 2
# line_leval 4
# add level 3, 4
for l in range(level + 1, line_level + 1):
# increase level, new ui
toc.append(f'<ul class="toc-item lev{l - top_level}">')
elif line_level < level:
# level 4
# line_level 2
# end level 4 and 3.
for level in range(level - line_level):
# end last one
toc.append('</ul>')
level = line_level
toc.append(f'''<li><a href="#{anchor}">{text}</a></li>''')
# if last level is 4, toplevel is 2 ...
if level:
for level in range(level - top_level):
toc.append('</div>')
return HTML('\n'.join(toc)).data
class Toc_Magic(SoS_Magic):
name = 'toc'
def __init__(self, kernel):
super(Toc_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%toc',
description='''Generate a table of content from the current notebook.''')
loc = parser.add_mutually_exclusive_group()
loc.add_argument('-p', '--panel', action='store_true',
help='''Show the TOC in side panel even if the panel is currently closed''')
loc.add_argument('-n', '--notebook', action='store_true',
help='''Show the TOC in the main notebook.''')
parser.add_argument(
'--id', help='''Optional ID of the generated TOC.''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
parser = self.get_parser()
try:
args = parser.parse_args(shlex.split(options))
except SystemExit:
return
if args.panel:
self.sos_kernel._meta['use_panel'] = True
elif args.notebook:
self.sos_kernel._meta['use_panel'] = False
if self.sos_kernel._meta['use_panel']:
self.sos_kernel.send_frontend_msg('show_toc')
else:
self.sos_kernel.send_response(self.sos_kernel.iopub_socket, 'display_data',
{'metadata': {},
'data': {
'text/html': header_to_toc(self.sos_kernel._meta['toc'], args.id)
},
})
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
class Use_Magic(SoS_Magic):
name = 'use'
def __init__(self, kernel):
super(Use_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%use',
description='''Switch to an existing subkernel
or start a new subkernel.''')
parser.add_argument('name', nargs='?', default='',
help='''Displayed name of kernel to start (if no kernel with name is
specified) or switch to (if a kernel with this name is already started).
The name is usually a kernel name (e.g. %%use ir) or a language name
(e.g. %%use R) in which case the language name will be used. One or
more parameters --language or --kernel will need to be specified
if a new name is used to start a separate instance of a kernel.''')
parser.add_argument('-k', '--kernel',
help='''kernel name as displayed in the output of jupyter kernelspec
list. Default to the default kernel of selected language (e.g. ir for
language R.''')
parser.add_argument('-l', '--language',
help='''Language extension that enables magics such as %%get and %%put
for the kernel, which should be in the name of a registered language
(e.g. R), or a specific language module in the format of
package.module:class. SoS maitains a list of languages and kernels
so this option is only needed for starting a new instance of a kernel.
''')
parser.add_argument('-c', '--color',
help='''Background color of new or existing kernel, which overrides
the default color of the language. A special value "default" can be
used to reset color to default.''')
parser.add_argument('-r', '--restart', action='store_true',
help='''Restart the kernel if it is running.''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
try:
parser = self.get_parser()
try:
args = parser.parse_args(shlex.split(options))
except SystemExit:
return
except Exception as e:
self.sos_kernel.warn(f'Invalid option "{options}": {e}\n')
return {'status': 'abort',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self.sos_kernel._execution_count,
}
if args.restart and args.name in elf.kernel.kernels:
self.shutdown_kernel(args.name)
self.sos_kernel.warn(f'{args.name} is shutdown')
try:
self.sos_kernel.switch_kernel(args.name, None, None, args.kernel,
args.language, args.color)
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
except Exception as e:
self.sos_kernel.warn(
f'Failed to switch to subkernel {args.name} (kernel {args.kernel}, language {args.language}): {e}')
return {'status': 'error',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self.sos_kernel._execution_count,
}
class With_Magic(SoS_Magic):
name = 'with'
def __init__(self, kernel):
super(With_Magic, self).__init__(kernel)
def get_parser(self):
parser = argparse.ArgumentParser(prog='%with',
description='''Use specified subkernel to evaluate current
cell, with optional input and output variables''')
parser.add_argument('name', nargs='?', default='',
help='''Name of an existing kernel.''')
parser.add_argument('-i', '--in', nargs='*', dest='in_vars',
help='Input variables (variables to get from SoS kernel)')
parser.add_argument('-o', '--out', nargs='*', dest='out_vars',
help='''Output variables (variables to put back to SoS kernel
before switching back to the SoS kernel''')
parser.error = self._parse_error
return parser
def apply(self, code, silent, store_history, user_expressions, allow_stdin):
options, remaining_code = self.get_magic_and_code(code, False)
try:
parser = self.get_parser()
try:
args = parser.parse_args(shlex.split(options))
except SystemExit:
return
except Exception as e:
self.sos_kernel.warn(f'Invalid option "{options}": {e}\n')
return {'status': 'error',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self.sos_kernel._execution_count,
}
original_kernel = self.sos_kernel.kernel
try:
self.sos_kernel.switch_kernel(args.name, args.in_vars, args.out_vars)
except Exception as e:
self.sos_kernel.warn(
f'Failed to switch to subkernel {args.name}): {e}')
return {'status': 'error',
'ename': e.__class__.__name__,
'evalue': str(e),
'traceback': [],
'execution_count': self.sos_kernel._execution_count,
}
try:
return self.sos_kernel._do_execute(remaining_code, silent, store_history, user_expressions, allow_stdin)
finally:
self.sos_kernel.switch_kernel(original_kernel)
class SoS_Magics(object):
magics = [
Command_Magic,
Capture_Magic,
Cd_Magic,
Clear_Magic,
ConnectInfo_Magic,
Debug_Magic,
Dict_Magic,
Expand_Magic,
Get_Magic,
Matplotlib_Magic,
Preview_Magic,
Pull_Magic,
Paste_Magic,
Push_Magic,
Put_Magic,
Render_Magic,
Rerun_Magic,
Run_Magic,
Revisions_Magic,
Save_Magic,
Set_Magic,
SessionInfo_Magic,
Shutdown_Magic,
SoSRun_Magic,
SoSSave_Magic,
TaskInfo_Magic,
Tasks_Magic,
Toc_Magic,
Sandbox_Magic,
Use_Magic,
With_Magic
]
names = [x.name for x in magics if x.name != '!']
def __init__(self, kernel=None):
self._magics = {x.name: x(kernel) for x in self.magics}
def get(self, name):
return self._magics[name]
def values(self):
return self._magics.values()
```
#### File: src/sos_notebook/test_utils.py
```python
import atexit
#
#
from contextlib import contextmanager
from queue import Empty
#
# % nosetests test_kernel.py
from ipykernel.tests import utils as test_utils
test_utils.TIMEOUT = 60
KM = None
KC = None
@contextmanager
def sos_kernel():
"""Context manager for the global kernel instance
Should be used for most kernel tests
Returns
-------
kernel_client: connected KernelClient instance
"""
yield start_sos_kernel()
def flush_channels(kc=None):
"""flush any messages waiting on the queue"""
if kc is None:
kc = KC
for channel in (kc.shell_channel, kc.iopub_channel):
while True:
try:
channel.get_msg(block=True, timeout=0.1)
except Empty:
break
# do not validate message because SoS has special sos_comm
# else:
# validate_message(msg)
def start_sos_kernel():
"""start the global kernel (if it isn't running) and return its client"""
global KM, KC
if KM is None:
KM, KC = test_utils.start_new_kernel(kernel_name='sos')
atexit.register(stop_sos_kernel)
else:
flush_channels(KC)
return KC
def stop_sos_kernel():
"""Stop the global shared kernel instance, if it exists"""
global KM, KC
KC.stop_channels()
KC = None
if KM is None:
return
KM.shutdown_kernel(now=False)
KM = None
def get_result(iopub):
"""retrieve result from an execution"""
result = None
while True:
msg = iopub.get_msg(block=True, timeout=1)
msg_type = msg['msg_type']
content = msg['content']
if msg_type == 'status' and content['execution_state'] == 'idle':
# idle message signals end of output
break
elif msg['msg_type'] == 'execute_result':
result = content['data']
elif msg['msg_type'] == 'display_data':
result = content['data']
else:
# other output, ignored
pass
# text/plain can have fronzen dict, this is ok,
from numpy import array, matrix, uint8
# suppress pyflakes warning
array
matrix
uint8
# it can also have dict_keys, we will have to redefine it
def dict_keys(args):
return args
if result is None:
return None
else:
return eval(result['text/plain'])
def get_display_data(iopub, data_type='text/plain'):
"""retrieve display_data from an execution from subkernel
because subkernel (for example irkernel) does not return
execution_result
"""
result = None
while True:
msg = iopub.get_msg(block=True, timeout=1)
msg_type = msg['msg_type']
content = msg['content']
if msg_type == 'status' and content['execution_state'] == 'idle':
# idle message signals end of output
break
elif msg['msg_type'] == 'display_data':
if isinstance(data_type, str):
if data_type in content['data']:
result = content['data'][data_type]
else:
for dt in data_type:
if dt in content['data']:
result = content['data'][dt]
# some early version of IRKernel still passes execute_result
elif msg['msg_type'] == 'execute_result':
result = content['data']['text/plain']
return result
def clear_channels(iopub):
"""assemble stdout/err from an execution"""
while True:
msg = iopub.get_msg(block=True, timeout=1)
msg_type = msg['msg_type']
content = msg['content']
if msg_type == 'status' and content['execution_state'] == 'idle':
# idle message signals end of output
break
def get_std_output(iopub):
'''Obtain stderr and remove some unnecessary warning from
https://github.com/jupyter/jupyter_client/pull/201#issuecomment-314269710'''
stdout, stderr = test_utils.assemble_output(iopub)
return stdout, '\n'.join([x for x in stderr.splitlines() if 'sticky' not in x and 'RuntimeWarning' not in x and 'communicator' not in x])
``` |
{
"source": "aadithyamd/BertSum",
"score": 2
} |
#### File: BertSum/src/train.py
```python
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_pretrained_bert import BertConfig
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import Summarizer
from models.trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def multi_main(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' %gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train(args,device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def wait_and_validate(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
valid_iter =data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter,step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = Summarizer(args, device, load_pretrained_bert=True)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
model.load_cp(checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
else:
optim = model_builder.build_optim(args, model, None)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
trainer.train(train_iter_fct, args.train_steps)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='classifier', type=str, choices=['classifier','transformer','rnn','baseline','multi_layer_classifier'])
parser.add_argument("-mode", default='train', type=str, choices=['train','validate','test'])
parser.add_argument("-bert_data_path", default='../bert_data/cnndm')
parser.add_argument("-model_path", default='../models/')
parser.add_argument("-result_path", default='../results/cnndm')
parser.add_argument("-temp_dir", default='../temp')
parser.add_argument("-bert_config_path", default='../bert_config_uncased_base.json')
parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=512, type=int)
parser.add_argument("-heads", default=4, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-out_layer", default=-1, type=int)
parser.add_argument("-freeze_initial", default=0, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=1, type=float)
parser.add_argument("-beta1", default= 0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=1, type=int)
parser.add_argument("-train_steps", default=1000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='../logs/cnndm.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-test_from", default='')
parser.add_argument("-train_from", default='')
parser.add_argument("-report_rouge", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
args = parser.parse_args()
args.gpu_ranks = [int(i) for i in args.gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
if(args.world_size>1):
multi_main(args)
elif (args.mode == 'train'):
train(args, device_id)
elif (args.mode == 'validate'):
wait_and_validate(args, device_id)
elif (args.mode == 'lead'):
baseline(args, cal_lead=True)
elif (args.mode == 'oracle'):
baseline(args, cal_oracle=True)
elif (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test(args, device_id, cp, step)
``` |
{
"source": "aadithyamd/video2txt",
"score": 3
} |
#### File: aadithyamd/video2txt/preprocessing.py
```python
import cv2
import os
import ipdb
import numpy as np
import pandas as pd
import skimage
from cnn_util import *
def preprocess_frame(image, target_height=224, target_width=224):
if len(image.shape) == 2:
image = np.tile(image[:,:,None], 3)
elif len(image.shape) == 4:
image = image[:,:,:,0]
image = skimage.img_as_float(image).astype(np.float32)
height, width, rgb = image.shape
if width == height:
resized_image = cv2.resize(image, (target_height,target_width))
elif height < width:
resized_image = cv2.resize(image, (int(width * float(target_height)/height), target_width))
cropping_length = int((resized_image.shape[1] - target_height) / 2)
resized_image = resized_image[:,cropping_length:resized_image.shape[1] - cropping_length]
else:
resized_image = cv2.resize(image, (target_height, int(height * float(target_width) / width)))
cropping_length = int((resized_image.shape[0] - target_width) / 2)
resized_image = resized_image[cropping_length:resized_image.shape[0] - cropping_length,:]
return cv2.resize(resized_image, (target_height, target_width))
def main():
num_frames = 80
vgg_model = '/media/ani/Secondry Storage/videodata/model/VGG_ILSVRC_19_layers.caffemodel'
vgg_deploy = '/media/ani/Secondry Storage/videodata/model/VGG_ILSVRC_19_layers_deploy.prototxt'
video_path = '/media/ani/Secondry Storage/videodata/YouTubeClips'
video_save_path = '/media/ani/Secondry Storage/videodata/youtube_feats'
videos = os.listdir(video_path)
videos = filter(lambda x: x.endswith('avi'), videos)
cnn = CNN(model=vgg_model, deploy=vgg_deploy, width=224, height=224)
for video in videos:
print video
if os.path.exists( os.path.join(video_save_path, video) ):
print "Already processed ... "
continue
video_fullpath = os.path.join(video_path, video)
try:
cap = cv2.VideoCapture( video_fullpath )
except:
pass
frame_count = 0
frame_list = []
while True:
ret, frame = cap.read()
if ret is False:
break
frame_list.append(frame)
frame_count += 1
frame_list = np.array(frame_list)
if frame_count > 80:
frame_indices = np.linspace(0, frame_count, num=num_frames, endpoint=False).astype(int)
frame_list = frame_list[frame_indices]
cropped_frame_list = np.array(map(lambda x: preprocess_frame(x), frame_list))
feats = cnn.get_features(cropped_frame_list)
save_full_path = os.path.join(video_save_path, video + '.npy')
np.save(save_full_path, feats)
if __name__=="__main__":
main()
``` |
{
"source": "aaditis/egem",
"score": 3
} |
#### File: python/MediumFormulation/format_medium_data.py
```python
import numpy as np
import pandas as pd
from openpyxl import Workbook
from openpyxl import load_workbook
from itertools import chain
import string
def get_media(dictionary, key, lst):
"""
This function requires a list of synonyms for a specific medium. It will construct a dictionary that maps the key to various synonyms of the medium.
"""
for i in lst:
try:
dictionary[key].append(i)
except KeyError:
dictionary[key] = [i]
return dictionary
def mapper(dict, series):
"""
This function maps the values in a medium key to the dataframe elements to get the keys into the dataframe.
"""
for k, v in dict.items():
idx = series.isin(v)
tmp = series[idx]
tmp[idx] = k
series[idx] = tmp
return series
<<<<<<< HEAD
<<<<<<< HEAD
def rename_medium_to_common_IDs():
"""
Extract will get the medium conditions that were used for the CCLE histone proteomics paper to grow the cells and classify them to simpler medium conditions. The result will be outputted as a table.
"""
df = pd.read_csv(r'./../data/CCLE_GCP.csv')
df = df.drop('BroadID', axis=1)
media = pd.read_excel(r'./../data/summary.xlsx', sheet_name='Cell Line Annotations',
usecols=['CCLE_ID', 'Growth.Medium', 'Supplements'])
media["New_medium"] = media["Growth.Medium"] + ' + ' + media["Supplements"]
df = pd.merge(df, media, left_on='CellLineName', right_on='CCLE_ID')
# Regex to remove
df["New_medium"] = df["New_medium"].str.lower()
df["New_medium"] = df["New_medium"].str.replace(' ', '')
df["New_medium"] = df["New_medium"].str.replace('\"\"', '', regex=True)
df["New_medium"] = df["New_medium"].str.replace('\"', '', regex=True)
df["New_medium"] = df["New_medium"].str.replace(',', '')
df["New_medium"] = df["New_medium"].str.replace('%', '')
df["New_medium"] = df["New_medium"].str.replace('\+', '')
df["New_medium"] = df["New_medium"].str.replace('\/+', '')
df["New_medium"] = df["New_medium"].str.replace('\.+', '')
df["New_medium"] = df["New_medium"].str.replace('\([^)]*\)', '')
df["New_medium"] = df["New_medium"].str.replace('\(', '')
df["New_medium"] = df["New_medium"].str.replace("'", '')
df["New_medium"] = df["New_medium"].str.replace("-", '')
df["New_medium"] = df["New_medium"].str.replace("^", '')
df["New_medium"] = df["New_medium"].str.replace(";", '')
df["New_medium"] = df["New_medium"].str.replace(":", '')
# Transform once
df["New_medium"] = df["New_medium"].str.replace('rpm', 'RPMI')
df["New_medium"] = df["New_medium"].str.replace('waymouths', 'WAYMOUTH')
df["New_medium"] = df["New_medium"].str.replace('l15', 'LFIFTEEN')
df["New_medium"] = df["New_medium"].str.replace(
'dmem:f12(1:1)', 'DMEM:FTWELVE(1:1)')
df["New_medium"] = df["New_medium"].str.replace('imdm', 'IMDM')
df["New_medium"] = df["New_medium"].str.replace('rpmi', 'RPMI')
df["New_medium"] = df["New_medium"].str.replace('f-12katcc', 'FTWELVE')
df["New_medium"] = df["New_medium"].str.replace('RPMIi-1640', 'RPMI')
df["New_medium"] = df["New_medium"].str.replace(
'mcdb105(1:1)medium199', 'MCDB105:M199(1:1)')
df["New_medium"] = df["New_medium"].str.replace(
'dmem:hamsf12', 'DMEM:FTWELVE(1:1)')
df["New_medium"] = df["New_medium"].str.replace(
'eaglesminimalessential', 'EMEM')
df["New_medium"] = df["New_medium"].str.replace(
'dulbeccosmodifiedeagles', 'DMEM')
df["New_medium"] = df["New_medium"].str.replace('hamsf12', 'FTWELVE')
df["New_medium"] = df["New_medium"].str.replace('hansf12', 'FTWELVE')
df["New_medium"] = df["New_medium"].str.replace('mccoys5a', 'MCCOY5A')
df["New_medium"] = df["New_medium"].str.replace(
'williamsemedium', 'WILLIAMS')
df["New_medium"] = df["New_medium"].str.replace('dme', 'DMEM')
df["New_medium"] = df["New_medium"].str.replace(
'dmem:f12', 'DMEM:FTWELVE(1:1)')
df["New_medium"] = df["New_medium"].str.replace('rphihi', 'RPMI')
df["New_medium"] = df["New_medium"].str.replace(
"dulbeccosmemiscovesmdmhi", 'RPMI')
df["New_medium"] = df["New_medium"].str.replace('alphamemhi', 'ALPHAMEM')
df["New_medium"] = df["New_medium"].str.replace('RPMI2humamcsf', 'RPMI')
df["New_medium"] = df["New_medium"].str.replace('glucose', 'GLC')
df["New_medium"] = df["New_medium"].str.replace('pyruvate', 'PYR')
df["New_medium"] = df["New_medium"].str.replace('glutathione', 'GSH')
df["New_medium"] = df["New_medium"].str.replace('alphamem', 'ALPHAMEM')
df["New_medium"] = df["New_medium"].str.replace('dulbeccosmem', 'DMEM')
df["New_medium"] = df["New_medium"].str.replace('iscovesmdmhi', 'IMDM')
df["New_medium"] = df["New_medium"].str.replace(
'DMEMm:f12', 'DMEM:FTWELVE(1:1)')
df["New_medium"] = df["New_medium"].str.replace('hamf10', 'FTEN')
df["New_medium"] = df["New_medium"].str.replace('hamf12', 'FTWELVE')
df["New_medium"] = df["New_medium"].str.replace('glutamine', 'GLN')
df["New_medium"] = df["New_medium"].str.replace('emem', 'EMEM')
df["New_medium"] = df["New_medium"].str.replace('mccoy5a', 'MCCOY5A')
df["New_medium"] = df["New_medium"].str.replace('Wayouth', 'WAYMOUTH')
df["New_medium"] = df["New_medium"].str.replace('waymouth', 'WAYMOUTH')
df["New_medium"] = df["New_medium"].str.replace('puruvate', 'PYR')
df["New_medium"] = df["New_medium"].str.replace('glutatone', 'GSH')
df["New_medium"] = df["New_medium"].str.replace(
'leibovitzsl-15medium', 'LFIFTEEN')
df["New_medium"] = df["New_medium"].str.replace('hamsf10', 'FTEN')
df["New_medium"] = df["New_medium"].str.replace('f12', 'FTWELVE')
df["New_medium"] = df["New_medium"].str.replace('f-12', 'FTWELVE')
df["New_medium"] = df["New_medium"].str.replace('acl4', 'ACL4')
df["New_medium"] = df["New_medium"].str.replace('rpi-1640', 'RPMI')
df["New_medium"] = df["New_medium"].str.replace(
'mcdb1051:1media199', 'MCDB105:M199(1:1)')
df["New_medium"] = df["New_medium"].str.replace('DMEMm', 'DMEM')
df["New_medium"] = df["New_medium"].str.replace(
'mcdb105:medium199(1:1)', 'ACL4')
df["New_medium"] = df["New_medium"].str.replace(
'acl-4', 'MCDB105:M199(1:1)')
df["New_medium"] = df["New_medium"].str.replace('mem', 'MEM')
df["New_medium"] = df["New_medium"].str.replace('alpha-MEM', 'ALPHAMEM')
df["New_medium"] = df["New_medium"].str.replace(
'DMEMF12(1:1)', 'DMEM:FTWELVE(1:1)')
df["New_medium"] = df["New_medium"].str.replace(
'DMEM:f:12', 'DMEM:FTWELVE(1:1)')
df["New_medium"] = df["New_medium"].str.replace('dEMEM', 'DMEM')
df["New_medium"] = df["New_medium"].str.replace(
'(DMEM:F12=1:1)', 'DMEM:FTWELVE(1:1)')
df["New_medium"] = df["New_medium"].str.replace(
'mcdb105(1:1)199+', 'MCDB105:M199(1:1)')
df["New_medium"] = df["New_medium"].str.replace('rpm+', 'RPMI')
df["New_medium"] = df["New_medium"].str.replace('iscovesmdm+', 'IMDM')
df["New_medium"] = df["New_medium"].str.replace(
'mcdb105:medium199(1:1)+15%fbs+empty', 'MCDB105:M199(1:1)')
df["New_medium"] = df["New_medium"].str.replace('rphi+', 'RPMI')
df["New_medium"] = df["New_medium"].str.replace(
"mcdb105:medium19915fbsempty'", 'MCDB105:M199(1:1)', regex=True)
df["New_medium"] = df["New_medium"].str.replace(
"mcdb105medium19915fbsempty", "MCDB105:M199(1:1)", regex=True)
df["New_medium"] = df["New_medium"].str.replace('emptyempty', 'NAN')
df["New_medium"] = df["New_medium"].str.replace('emptynempty', 'NAN')
df["New_medium"] = df["New_medium"].str.replace('10fbs01mmneaa', 'FTEN')
# Get rid of everything else
df["New_medium"] = df["New_medium"].str.replace('\d+', '')
df["New_medium"] = df["New_medium"].str.replace('[a-z]', '', regex=True)
# Retransform to final version
df["New_medium"] = df["New_medium"].str.replace('WAYMOUTH', 'Waymouth')
df["New_medium"] = df["New_medium"].str.replace('LFIFTEEN', 'L15')
df["New_medium"] = df["New_medium"].str.replace(
'DMEMFTWELVEGLN', 'DMEM:F12 wGln')
df["New_medium"] = df["New_medium"].str.replace('NAN', 'RPMI')
df["New_medium"] = df["New_medium"].str.replace('DMEMFTWELVE', 'DMEM:F12')
df["New_medium"] = df["New_medium"].str.replace(
'DMEMFTWELVEFTEN', 'DMEM:F12')
df["New_medium"] = df["New_medium"].str.replace('RPMIMEM', 'RPMI')
df["New_medium"] = df["New_medium"].str.replace('FTWELVEMEM', 'F12')
df["New_medium"] = df["New_medium"].str.replace('ALPHAMEMMEM', 'Alpha-MEM')
df["New_medium"] = df["New_medium"].str.replace('EMEMMEM', 'EMEM')
df["New_medium"] = df["New_medium"].str.replace('DMEMMEM', 'DMEM')
df["New_medium"] = df["New_medium"].str.replace(
'DMEMFTWELVEPYRGLN', 'DMEM:F12 wGln wPyr')
df["New_medium"] = df["New_medium"].str.replace('DMEMGLN', 'DMEM wGln')
df["New_medium"] = df["New_medium"].str.replace('FTWELVE', 'F12')
df["New_medium"] = df["New_medium"].str.replace('MCCOYA', 'McCoy5A')
df["New_medium"] = df["New_medium"].str.replace(
'DMEMFTWELVEGLNPYR', 'DMEM:F12 wGln wPyr')
df["New_medium"] = df["New_medium"].str.replace('DMEMGLC', 'DMEM wGlc')
df["New_medium"] = df["New_medium"].str.replace('RPMIGLN', 'RPMI wGln')
df["New_medium"] = df["New_medium"].str.replace('FTEN', 'F10')
df["New_medium"] = df["New_medium"].str.replace('RPMIGLNMEM', 'RPMI wGln')
df["New_medium"] = df["New_medium"].str.replace(
"MCDB+", 'MCDB105:M199', regex=True)
df["New_medium"] = df["New_medium"].str.replace('WILLIAMS', 'Williams')
df["New_medium"] = df["New_medium"].str.replace('RPMIMEMGLN', 'RPMI wGln')
df["New_medium"] = df["New_medium"].str.replace('EMEMPYR', 'EMEM wPyr')
df["New_medium"] = df["New_medium"].str.replace(
'WAYMOUTHGLN', 'Waymouth wGln')
df["New_medium"] = df["New_medium"].str.replace('GLN', 'RPMI wGln')
df["New_medium"] = df["New_medium"].str.replace(
'DMEMFTWELVEMEMGLN', 'DMEM:F12 wGln')
df["New_medium"] = df["New_medium"].str.replace('IMDMGLN', 'IMDM wGln')
df["New_medium"] = df["New_medium"].str.replace('ACL', 'ACL4')
df["New_medium"] = df["New_medium"].str.replace('RPMIFTWELVE', 'RPMI:F12')
df["New_medium"] = df["New_medium"].str.replace(
'FTWELVEMEMGLN', 'F12 wGln')
df["New_medium"] = df["New_medium"].str.replace('EMEMFTEN', 'EMEM:F10')
df["New_medium"] = df["New_medium"].str.replace('RPMIPYR', 'RPMI wPyr')
df["New_medium"] = df["New_medium"].str.replace('RPMIEMEM', 'RPMI:EMEM')
df["New_medium"] = df["New_medium"].str.replace('DMEMGLNMEM', 'DMEM wGln')
df["New_medium"] = df["New_medium"].str.replace(
'RPMIGLNPYR', 'RPMI wGln wPyr')
df["New_medium"] = df["New_medium"].str.replace('DMEMIMDM', 'DMEM:IMDM')
df["New_medium"] = df["New_medium"].str.replace('ALPHAMEM', 'AlphaMEM')
df["New_medium"] = df["New_medium"].str.replace(
'DMEMGLNPYR', 'DMEM wGln wPyr')
df["New_medium"] = df["New_medium"].str.replace(
'ALPHAMEMDMEM', 'AlphaMEM:DMEM')
df["New_medium"] = df["New_medium"].str.replace('IMDMRPMIDMEM', 'RPMI')
print(df.head(30))
tmp = df["New_medium"].unique()
tmp = pd.DataFrame(tmp)
#df.to_csv('tmp1.csv', index=False)
#tmp.to_csv('tmp2.csv', index=False)
# Had to manually edit some of the entries, but for the most part the algorithm is ~80% accurate in mapping.
#rename_medium_to_common_IDs()
=======
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
=======
>>>>>>> 74da9288525a82aac8136d448728b2176d33ce0d
def split_cellLine_and_tissues():
df = pd.read_csv('GCP_proteomics_remapped.csv')
df['Cell Line'] = df['CellLineName'].str.split('_').str[0]
df['Tissue'] = df['CellLineName'].str.split('_', n=1).str[1]
df['Tissue'] = df['Tissue'].str.replace('_', ' ')
df['Tissue'] = df['Tissue'].str.title()
df = df.drop(['CellLineName'], axis=1)
df = df.set_index(['Cell Line', 'Tissue'])
df.to_csv("GCP_proteomics_remapped.csv")
#split_cellLine_and_tissues()
def make_medium_xl_sheet():
"""
"""
medium_conditions = pd.read_csv('GCP_proteomics_remapped.csv', usecols=[
'Medium Condition', 'Tissue'])
unique_conditions = medium_conditions['Medium Condition'].sort_values(
ascending=True).unique()
#wb = Workbook()
#name = 'Medium_conditions.xlsx'
#wb.save(filename = name)
number_of_medium = medium_conditions['Medium Condition'].value_counts()
number_of_tissues = medium_conditions['Tissue'].value_counts()
<<<<<<< HEAD
<<<<<<< HEAD
book = load_workbook('./Medium_conditions.xlsx')
writer = pd.ExcelWriter('./Medium_conditions.xlsx', engine='openpyxl')
=======
=======
>>>>>>> 74da9288525a82aac8136d448728b2176d33ce0d
book = load_workbook(
r'./../../data/Medium_Component_Maps/final_medium.xlsx')
writer = pd.ExcelWriter(
r'./../../data/Medium_Component_Maps/final_medium.xlsx', engine='openpyxl')
<<<<<<< HEAD
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
=======
>>>>>>> 74da9288525a82aac8136d448728b2176d33ce0d
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
if "Summary" in book:
pass
else:
number_of_medium.to_excel(
writer, sheet_name="Summary", index=True, startcol=0)
number_of_tissues.to_excel(
writer, sheet_name="Summary", index=True, startcol=2)
writer.save()
for medium in unique_conditions:
if medium in book:
pass
else:
df = pd.DataFrame(
columns=["Components", "MW", "g/L", "mM", "BiGG ID", "Alpha", "LB", "Adjusted LB"])
df.to_excel(writer, sheet_name=medium, index=False, header=True)
writer.save()
#make_medium_xl_sheet()
<<<<<<< HEAD
def make_common_nutrient_id():
"""
"""
book = load_workbook('./Medium_conditions.xlsx')
writer = pd.ExcelWriter('./Medium_conditions.xlsx', engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
medium_components = []
for sheet in writer.sheets:
if "Summary" in sheet:
pass
else:
df = pd.read_excel('./Medium_conditions.xlsx', sheet_name=sheet)
components = df["Components"].tolist()
medium_components.append(components)
medium_components = list(chain.from_iterable(medium_components))
medium_components = set(medium_components)
medium_components = pd.DataFrame(medium_components)
medium_components[0] = medium_components[0].sort_values(ascending=True)
medium_components["Components"] = medium_components[0].str.replace(
'[\(*\)]', '')
medium_components["Components"] = medium_components["Components"].str.lower()
medium_components["Components"] = medium_components["Components"].str.replace(
'-', '', regex=True)
medium_components["Components"] = medium_components["Components"].str.replace(
'\u2022', ' ', regex=True)
medium_components["Components"] = medium_components["Components"].str.replace(
'2deoxydribose', 'twodeoxydribose', case=True)
medium_components["Components"] = medium_components["Components"].str.replace(
"adenosine 5'phosphate", 'Adenosine FivePrimePhosphate', case=True)
medium_components["Components"] = medium_components["Components"].str.replace(
"riboflavin 5'phosphate na", 'Riboflavin FivePrimePhosphate', case=True)
medium_components["Components"] = medium_components["Components"].str.replace(
"adenosine 5'triphosphate", 'Adenosine FivePrimeTriPhosphate', case=True)
medium_components["Components"] = medium_components["Components"].str.replace(
"menadione vitamin k3", 'Vitamin KThree', case=True)
medium_components["Components"] = medium_components["Components"].str.replace(
"vitamin d2 calciferol", 'Vitamin DTwo Calciferol', case=True)
medium_components["Components"] = medium_components["Components"].str.replace(
"vitamin b12 calciferol", 'Vitamin BTwelve', case=True)
medium_components["Components"] = medium_components["Components"].str.replace(
"\w*\d\w*", '')
medium_components["Components"] = medium_components["Components"].str.replace(
'"', '')
medium_components["Components"] = medium_components["Components"].str.replace(
"hcl", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"dibasic", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"anhydrous", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"monobasic", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"hydrochloride", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"disodium", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"dihydrate", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"nacl", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"anhyd.", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"hemicalcium", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"acid", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"phos\.", 'phosphate')
medium_components["Components"] = medium_components["Components"].str.replace(
"salt", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"\w*\d\w*", '')
medium_components["Components"] = medium_components["Components"].str.replace(
'"', '')
medium_components["Components"] = medium_components["Components"].str.replace(
"hcl", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"dibasic", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"anhydrous", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"monobasic", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"hydrochloride", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"disodium", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"dihydrate", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"nacl", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"anhyd.", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"hemicalcium", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"acid", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"phos\.", 'phosphate')
medium_components["Components"] = medium_components["Components"].str.replace(
"\.", '')
medium_components["Components"] = medium_components["Components"].str.replace(
" kcl", '')
medium_components["Components"] = medium_components["Components"].str.replace(
" na", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"freebase", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"salt", '')
medium_components["Components"] = medium_components["Components"].str.replace(
" ", '')
medium_components["Components"] = medium_components["Components"].str.replace(
"", '')
# Manual mapping:
medium_components["Components"] = medium_components["Components"].str.replace(
"putrescine", 'Putrescine')
medium_components["Components"] = medium_components["Components"].str.replace(
"sodium", 'Sodium')
medium_components["Components"] = medium_components["Components"].str.replace(
"phosphate", 'Phosphate')
medium_components["Components"] = medium_components["Components"].str.replace(
"RiboflavinFivePrimePhosphate", 'Alpha-D-Ribose 5-phosphate')
medium_components["Components"] = medium_components["Components"].str.replace(
"calcium", 'Calcium')
medium_components["Components"] = medium_components["Components"].str.replace(
"chloride", 'Chloride')
medium_components["Components"] = medium_components["Components"].str.replace(
"pyridoxal", 'Pyridoxal')
medium_components["Components"] = medium_components["Components"].str.replace(
"dglucosedextrose", 'D-Glucose')
medium_components["Components"] = medium_components["Components"].str.replace(
"ThiaminmonoPhosphate", 'Thiamin monophosphate')
medium_components["Components"] = medium_components["Components"].str.replace(
"lasparagine", 'L-Asparagine')
medium_components["Components"] = medium_components["Components"].str.replace(
"iinositol", 'Myo-Inositol')
medium_components["Components"] = medium_components["Components"].str.replace(
"manganese", 'Manganese')
medium_components["Components"] = medium_components["Components"].str.replace(
"ribose", 'D-Ribose')
medium_components["Components"] = medium_components["Components"].str.replace(
"lisoleucine", 'L-Isoleucine')
medium_components["Components"] = medium_components["Components"].str.replace(
"dCalciumpantothenate", '(R)-Pantothenate')
medium_components["Components"] = medium_components["Components"].str.replace(
"niacinamide", 'Nicotinamide')
medium_components["Components"] = medium_components["Components"].str.replace(
"linoleic", 'Linoleic acid (all cis C18:2) n-6')
medium_components["Components"] = medium_components["Components"].str.replace(
"vitaminaacetate", 'L-Asparagine')
medium_components["Components"] = medium_components["Components"].str.replace(
"acetate", 'Acetate')
medium_components["Components"] = medium_components["Components"].str.replace(
"magnesium", 'Magnesium')
medium_components["Components"] = medium_components["Components"].str.replace(
"sulfate", 'Sulfate')
medium_components["Components"] = medium_components["Components"].str.replace(
"lcysteine", 'L-Cysteine')
medium_components["Components"] = medium_components["Components"].str.replace(
"lproline", 'L-Proline')
medium_components["Components"] = medium_components["Components"].str.replace(
"dpantothenic", '(R)-Pantothenate')
medium_components["Components"] = medium_components["Components"].str.replace(
"potassium", 'Potassium')
medium_components["Components"] = medium_components["Components"].str.replace(
"twodeoxydD-Ribose", 'Deoxyribose C5H10O4')
medium_components["Components"] = medium_components["Components"].str.replace(
"laspartic", 'L-aspartate')
medium_components["Components"] = medium_components["Components"].str.replace(
"VitaminDTwoCalciferol", 'Vitamin D2; ergocalciferol')
medium_components["Components"] = medium_components["Components"].str.replace(
"lcystine", 'L Cystine C6H12N2O4S2')
medium_components["Components"] = medium_components["Components"].str.replace(
"uracil", 'Uracil')
medium_components["Components"] = medium_components["Components"].str.replace(
"ammonium", 'Ammonium')
medium_components["Components"] = medium_components["Components"].str.replace(
"ergocalciferol", 'Vitamin D2; ergocalciferol')
medium_components["Components"] = medium_components["Components"].str.replace(
"lipoic", 'Lipoate')
medium_components["Components"] = medium_components["Components"].str.replace(
"riboflavin", 'Riboflavin C17H20N4O6')
medium_components["Components"] = medium_components["Components"].str.replace(
"thiamine", 'Thiamin')
medium_components["Components"] = medium_components["Components"].str.replace(
"alphatocopherol", 'Alpha-Tocopherol')
medium_components["Components"] = medium_components["Components"].str.replace(
"nitrate", 'Nitrate')
medium_components["Components"] = medium_components["Components"].str.replace(
"bicarbonate", 'Bicarbonate')
medium_components["Components"] = medium_components["Components"].str.replace(
"paraaminobenzoic", '4-Aminobenzoate')
medium_components["Components"] = medium_components["Components"].str.replace(
"lserine", 'L-Serine')
medium_components["Components"] = medium_components["Components"].str.replace(
"glucose", 'D-Glucose')
medium_components["Components"] = medium_components["Components"].str.replace(
"follinic", 'Folate')
medium_components["Components"] = medium_components["Components"].str.replace(
"llysine", 'L-Lysine')
medium_components["Components"] = medium_components["Components"].str.replace(
"folic", 'Folate')
medium_components["Components"] = medium_components["Components"].str.replace(
"hypoxanthine", 'Hypoxanthine')
medium_components["Components"] = medium_components["Components"].str.replace(
"zinc", 'Zinc')
medium_components["Components"] = medium_components["Components"].str.replace(
"adenine", 'Adenine')
medium_components["Components"] = medium_components["Components"].str.replace(
"AdenosineFivePrimeTriPhosphate", 'ATP C10H12N5O13P3')
medium_components["Components"] = medium_components["Components"].str.replace(
"lalanine", 'L-Alanine')
medium_components["Components"] = medium_components["Components"].str.replace(
"guanosine", 'Guanosine')
medium_components["Components"] = medium_components["Components"].str.replace(
"glutathionereduced", 'Reduced glutathione')
medium_components["Components"] = medium_components["Components"].str.replace(
"AdenosineFivePrimePhosphate", 'AMP C10H12N5O7P')
medium_components["Components"] = medium_components["Components"].str.replace(
"lthreonine", 'L-Threonine')
medium_components["Components"] = medium_components["Components"].str.replace(
"pyruvate", 'Pyruvate')
medium_components["Components"] = medium_components["Components"].str.replace(
"lleucine", 'L-Leucine')
medium_components["Components"] = medium_components["Components"].str.replace(
"thymidine", 'Thymidine')
medium_components["Components"] = medium_components["Components"].str.replace(
"cholesterol", 'Chsterol c')
medium_components["Components"] = medium_components["Components"].str.replace(
"choline", 'Choline C5H14NO')
medium_components["Components"] = medium_components["Components"].str.replace(
"lphenyL-Alanine", 'L-Phenylalanine')
medium_components["Components"] = medium_components["Components"].str.replace(
"guanine", 'Guanine')
medium_components["Components"] = medium_components["Components"].str.replace(
"lhydroxyproline", 'Trans 4 Hydroxy L proline C5H9NO3')
medium_components["Components"] = medium_components["Components"].str.replace(
"lmethionine", 'L-Methionine')
medium_components["Components"] = medium_components["Components"].str.replace(
"thymine", 'Thymine C5H6N2O2')
medium_components["Components"] = medium_components["Components"].str.replace(
"guanine", 'Guanine')
medium_components["Components"] = medium_components["Components"].str.replace(
"ribonucleosides", 'Nicotinate D-ribonucleoside')
medium_components["Components"] = medium_components["Components"].str.replace(
"myoinositol", 'Myo-Inositol')
medium_components["Components"] = medium_components["Components"].str.replace(
"lalanyllglutamine", 'L-alanine-L-glutamate')
medium_components["Components"] = medium_components["Components"].str.replace(
"adenosine", 'Adenosine')
medium_components["Components"] = medium_components["Components"].str.replace(
"xanthinena", 'Xanthine')
medium_components["Components"] = medium_components["Components"].str.replace(
"lhistidine", 'L-Histidine')
medium_components["Components"] = medium_components["Components"].str.replace(
"ltryptophan", 'L-Tryptophan')
medium_components["Components"] = medium_components["Components"].str.replace(
"glycine", 'Glycine')
medium_components["Components"] = medium_components["Components"].str.replace(
"uridine", 'Uridine')
medium_components["Components"] = medium_components["Components"].str.replace(
"pyridoxine", 'Pyridoxine')
medium_components["Components"] = medium_components["Components"].str.replace(
"lglutamine", 'L-Glutamine')
medium_components["Components"] = medium_components["Components"].str.replace(
"lvaline", 'L-Valine')
medium_components["Components"] = medium_components["Components"].str.replace(
"larginine", 'L-Arginine')
medium_components["Components"] = medium_components["Components"].str.replace(
"lglutamic", 'L-Glutamate')
medium_components["Components"] = medium_components["Components"].str.replace(
"cytidine", 'Cytidine')
medium_components["Components"] = medium_components["Components"].str.replace(
"ascorbic", 'L-Ascorbate')
medium_components["Components"] = medium_components["Components"].str.replace(
"biotin", 'Biotin')
medium_components["Components"] = medium_components["Components"].str.replace(
"nicotinicniacin", 'Nicotinate')
medium_components["Components"] = medium_components["Components"].str.replace(
"d\+galactose", 'D-Galactose')
medium_components["Components"] = medium_components["Components"].str.replace(
"molybdate", 'Molybdate')
medium_components["Components"] = medium_components["Components"].str.replace(
"ltyrosine", 'L-Tyrosine')
medium_components["Components"] = medium_components["Components"].str.replace(
"vitamin", 'Cob(I)alamin')
medium_components["Components"] = medium_components["Components"].str.replace(
"VitaminKThree", 'Vitamin K3')
medium_components["Components"] = medium_components["Components"].str.replace(
"\r", '')
medium_components["Components"] = medium_components["Components"].str.replace(
r'(\w)([A-Z][a-z])', r"\1 \2", regex=True)
tmp = medium_components["Components"].str.split(
'[A-Z][^A-Z]*', expand=True).apply(pd.Series, 1).stack()
#make_common_nutrient_id()
=======
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
def get_unique_ids():
df = pd.read_csv('./medium_component_map.csv')
df = df.drop_duplicates(keep='first')
df.to_csv('./medium_component_map.csv', index=False)
#get_unique_ids()
def map_to_recon1_xchange():
pd.options.mode.chained_assignment = None
df = pd.read_csv('./medium_component_map.csv')
metabolites = pd.read_excel('./recon1_id.xlsx', sheet_name='Metabolites')
reactions = pd.read_excel('./recon1_id.xlsx', sheet_name='Reactions')
exchange_reactions = reactions[reactions['Reaction ID'].str.startswith(
'EX_')]
exchange_reactions['Metabolite'] = exchange_reactions['Reaction ID'].str.replace(
'EX_', '')
recon1_map = pd.merge(exchange_reactions, metabolites,
how='inner', left_on='Metabolite', right_on='Metabolite ID')
recon1_map = recon1_map.copy(deep=True)
full_map = pd.merge(recon1_map, df, how='inner',
left_on='Metabolite Name', right_on='BiGG Common Names')
full_map = full_map.drop_duplicates(keep='first')
full_map.to_csv('./medium_component_mapped_to_recon1.csv', index=False)
#recon1_map.to_csv('Recon1_exchange_map.csv', index=False)
#map_to_recon1_xchange()
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 74da9288525a82aac8136d448728b2176d33ce0d
def drop_all_duplicates():
df = pd.read_excel('./Medium_conditions.xlsx')
df = df.drop_duplicates(keep='first')
df.to_excel('./Medium_conditions.xlsx', index=False)
drop_all_duplicates()
def make_medium_conditions():
rpmi = pd.read_excel(r'./Medium_conditions.xlsx', sheet_name='RPMI')
dmem = pd.read_excel(r'./Medium_conditions.xlsx', sheet_name='DMEM')
imdm = pd.read_excel(r'./Medium_conditions.xlsx', sheet_name='IMDM')
emem = pd.read_excel(r'./Medium_conditions.xlsx', sheet_name='EMEM')
mcdb105 = pd.read_excel(r'./Medium_conditions.xlsx', sheet_name='MCDB105')
m199 = pd.read_excel(r'./Medium_conditions.xlsx', sheet_name='M199')
f12 = pd.read_excel(r'./Medium_conditions.xlsx', sheet_name='F12')
book = load_workbook(r'./Medium_conditions.xlsx')
writer = pd.ExcelWriter(r'./Medium_conditions.xlsx', engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
def make_medium(medium1, medium2):
medium1 = medium1.copy(deep=True)
medium1['g/L'] = medium1['g/L'].replace('Infinity', np.inf)
medium1['g/L'] = medium1['g/L']*0.5
medium2 = medium2.copy(deep=True)
medium2['g/L'] = medium2['g/L'].replace('Infinity', np.inf)
medium2['g/L'] = medium2['g/L']*0.5
<<<<<<< HEAD
=======
=======
>>>>>>> 74da9288525a82aac8136d448728b2176d33ce0d
def recon_xchange():
pd.options.mode.chained_assignment = None
metabolites = pd.read_excel(
'./../../data/metabolicModel_maps/RECON3_Reaction_Metabolite_ID_Maps.xlsx', sheet_name='Metabolites')
reactions = pd.read_excel(
'./../../data/metabolicModel_maps/RECON3_Reaction_Metabolite_ID_Maps.xlsx', sheet_name='Reactions')
exchange_reactions = reactions[reactions['Bigg Reaction ID'].str.startswith(
'EX_')]
exchange_reactions['Metabolite'] = exchange_reactions['Bigg Reaction ID'].str.replace(
'EX_', '')
recon1_map = pd.merge(exchange_reactions, metabolites,
how='inner', left_on='Metabolite', right_on='BiGG Metabolite ID')
recon1_map = recon1_map.copy(deep=True)
recon1_map.to_csv(
'./../../data/metabolicModel_maps/RECON3_ExchangeReaction_Map.csv', index=False)
recon_xchange()
def drop_all_duplicates():
df = pd.read_excel(r'./../../data/Medium_Component_Maps/final_medium.xlsx')
df = df.drop_duplicates(keep='first')
df.to_excel(
r'./../../data/Medium_Component_Maps/final_medium.xlsx', index=False)
def average_all_duplicates(df):
df = df.groupby(df.index).mean().reset_index()
df = df.set_index("Components")
return df
#drop_all_duplicates()
def make_medium_conditions():
rpmi = pd.read_excel(
r'./../../data/Medium_Component_Maps/original_medium.xlsx', sheet_name='RPMI')
dmem = pd.read_excel(
r'./../../data/Medium_Component_Maps/original_medium.xlsx', sheet_name='DMEM')
imdm = pd.read_excel(
r'./../../data/Medium_Component_Maps/original_medium.xlsx', sheet_name='IMDM')
emem = pd.read_excel(
r'./../../data/Medium_Component_Maps/original_medium.xlsx', sheet_name='EMEM')
mcdb105 = pd.read_excel(
r'./../../data/Medium_Component_Maps/original_medium.xlsx', sheet_name='MCDB105')
m199 = pd.read_excel(
r'./../../data/Medium_Component_Maps/original_medium.xlsx', sheet_name='M199')
f12 = pd.read_excel(
r'./../../data/Medium_Component_Maps/original_medium.xlsx', sheet_name='F12')
book = load_workbook(
r'./../../data/Medium_Component_Maps/final_medium.xlsx')
writer = pd.ExcelWriter(
r'./../../data/Medium_Component_Maps/final_medium.xlsx', engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
def make_medium(medium1, medium2, weight=0.5):
"""
args:
weight: the proportionality constant (default = 1:1)
"""
weight1 = weight
weight2 = 1 - weight
medium1 = medium1.copy(deep=True)
medium1['g/L'] = medium1['g/L'].replace('Infinity', np.inf)
medium1['g/L'] = medium1['g/L']*weight1
medium2 = medium2.copy(deep=True)
medium2['g/L'] = medium2['g/L'].replace('Infinity', np.inf)
medium2['g/L'] = medium2['g/L']*weight2
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
combined_medium = pd.concat([medium1, medium2]).groupby(
'Components')['g/L'].sum().reset_index()
<<<<<<< HEAD
=======
rest_of_columns = ["MW", "mM", "BiGG ID",
"Alpha", "LB", "Adjusted LB"]
combined_medium = pd.concat(
[combined_medium, pd.DataFrame(columns=rest_of_columns)], sort=False)
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
return combined_medium
# DMEM-IMDM
dmem_imdm = make_medium(dmem, imdm)
<<<<<<< HEAD
dmem_imdm.to_excel(writer, sheet_name='DMEM-IMDM', index=False)
=======
dmem_imdm.to_excel(writer, sheet_name='DMEM-IMDM', columns=[
"Components", "MW", "g/L", "mM", "BiGG ID", "Alpha", "LB", "Adjusted LB"], index=False)
<<<<<<< HEAD
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
=======
dmem_imdm.to_excel(writer, sheet_name='DMEM-IMDM', index=False)
dmem_imdm.to_excel(writer, sheet_name='DMEM-IMDM', columns=[
"Components", "MW", "g/L", "mM", "BiGG ID", "Alpha", "LB", "Adjusted LB"], index=False)
>>>>>>> 74da9288525a82aac8136d448728b2176d33ce0d
# MCDB105-M199
mcdb105_m199 = make_medium(mcdb105, m199)
mcdb105_m199.to_excel(writer, sheet_name='MCDB105-M199', index=False)
# RPMI-EMEM
rpmi_emem = make_medium(rpmi, emem)
rpmi_emem.to_excel(writer, sheet_name='RPMI-EMEM', index=False)
# RPMI-F12
rpmi_f12 = make_medium(rpmi, f12)
rpmi_f12.to_excel(writer, sheet_name='RPMI-F12', index=False)
<<<<<<< HEAD
=======
dmem_rpmi = make_medium(dmem, rpmi, weight=2/3)
rpmi_f12.to_excel(writer, sheet_name='DMEM-RPMI', index=False)
<<<<<<< HEAD
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
=======
dmem_rpmi = make_medium(dmem, rpmi, weight=2/3)
rpmi_f12.to_excel(writer, sheet_name='DMEM-RPMI', index=False)
>>>>>>> 74da9288525a82aac8136d448728b2176d33ce0d
writer.save()
#make_medium_conditions()
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> 74da9288525a82aac8136d448728b2176d33ce0d
def calculate_alpha():
book = load_workbook(r'./Medium_conditions.xlsx')
writer = pd.ExcelWriter(r'./Medium_conditions.xlsx', engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
rpmi = pd.read_excel(r'./Medium_conditions.xlsx',
sheet_name='RPMI', index_col='Components')
<<<<<<< HEAD
=======
=======
>>>>>>> 74da9288525a82aac8136d448728b2176d33ce0d
def calculate_alpha():
book = load_workbook(
r'./../../data/Medium_Component_Maps/final_medium.xlsx')
writer = pd.ExcelWriter(
r'./../../data/Medium_Component_Maps/final_medium.xlsx', engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
rpmi = pd.read_excel(r'./../../data/Medium_Component_Maps/final_medium.xlsx',
sheet_name='RPMI', index_col='Components')
rpmi = average_all_duplicates(rpmi)
rpmi["LB"] = -1
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
for medium in writer.sheets:
if medium == "Summary":
pass
else:
other_medium = pd.read_excel(
<<<<<<< HEAD
r'./Medium_conditions.xlsx', sheet_name=medium, index_col='Components')
other_medium['alpha'] = other_medium['g/L'].divide(
rpmi['g/L'], axis='index', fill_value=0)
=======
r'./../../data/Medium_Component_Maps/final_medium.xlsx', sheet_name=medium, index_col='Components')
other_medium = average_all_duplicates(other_medium)
other_medium["LB"] = -1
other_medium['alpha'] = other_medium['g/L'].divide(
rpmi['g/L'], axis=0, fill_value=0)
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
other_medium['alpha'] = other_medium['alpha'].replace(np.inf, 10)
other_medium.to_excel(writer, sheet_name=medium, index=True)
writer.save()
#calculate_alpha()
def map_recon1_xchange_to_medium():
<<<<<<< HEAD
book = load_workbook('./Medium_conditions.xlsx')
writer = pd.ExcelWriter('./Medium_conditions.xlsx', engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
metabolite_map = pd.read_csv('./medium_component_mapped_to_recon1.csv')
=======
book = load_workbook(
r'./../../data/Medium_Component_Maps/final_medium.xlsx')
writer = pd.ExcelWriter(
r'./../../data/Medium_Component_Maps/final_medium2.xlsx', engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
metabolite_map = pd.read_csv(
'./../../data/metabolicModel_maps/RECON3_ExchangeReaction_Map.csv')
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
for medium in writer.sheets:
if medium == "Summary":
pass
else:
<<<<<<< HEAD
df = pd.read_excel('./Medium_conditions.xlsx', sheet_name=medium)
merged_df = pd.merge(
df, metabolite_map, how='inner', left_on=['Components'], right_on=['Original IDs'])
merged_df = merged_df.drop_duplicates(keep='first')
merged_df.to_excel(
writer, sheet_name=medium, index=False, header=True, columns=["Components", "MW", "g/L", "mM", "Reaction ID_x", "LB_x", "Metabolite Name_x"])
=======
df = pd.read_excel(
r'./../../data/Medium_Component_Maps/final_medium.xlsx', sheet_name=medium)
merged_df = pd.merge(
df, metabolite_map, how='inner', left_on=['Components'], right_on=['Metabolite Name'])
merged_df = merged_df.drop_duplicates(keep='first')
merged_df.to_excel(
writer, sheet_name=medium, index=False, header=True)
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
writer.save()
#map_recon1_xchange_to_medium()
def scale_LB():
# load the excel file so you don't overwrite the excel sheet
<<<<<<< HEAD
book = load_workbook(r'./Medium_conditions.xlsx')
writer = pd.ExcelWriter(r'./Medium_conditions.xlsx', engine='openpyxl')
=======
book = load_workbook(
r'./../../data/Medium_Component_Maps/final_medium2.xlsx')
writer = pd.ExcelWriter(
r'./../../data/Medium_Component_Maps/final_medium3.xlsx', engine='openpyxl')
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
default_uptake_rate = pd.read_excel(
<<<<<<< HEAD
r'./Medium_conditions.xlsx', sheet_name='RPMI', index_col='Components')
=======
r'./../../data/Medium_Component_Maps/final_medium2.xlsx', sheet_name='RPMI', index_col='Components')
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
for medium in writer.sheets:
if medium == "Summary":
pass
else:
uptake_rate_to_change = pd.read_excel(
<<<<<<< HEAD
r'./Medium_conditions.xlsx', sheet_name=medium, index_col='Components')
uptake_rate_to_change['Adjusted LB'] = default_uptake['LB'].multiply(
=======
r'./../../data/Medium_Component_Maps/final_medium2.xlsx', sheet_name=medium, index_col='Components')
uptake_rate_to_change['Adjusted LB'] = default_uptake_rate['LB'].multiply(
r'./../../data/Medium_Component_Maps/final_medium2.xlsx', sheet_name=medium, index_col='Components')
uptake_rate_to_change['Adjusted LB'] = default_uptake_rate['LB'].multiply(
>>>>>>> 4ad2bfc7bcf688e7fc426cfe727c05970b7421f5
uptake_rate_to_change['alpha'], axis=0, fill_value=1.00)
uptake_rate_to_change.to_excel(
writer, sheet_name=medium, index=True)
writer.save()
#scale_LB()
```
#### File: python/textMining/mineHistoneGenes.py
```python
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import pandas as pd
pages = set()
def getGenesFromGO():
global pages
html = urlopen("http://epifactors.autosome.ru/genes")
bso = BeautifulSoup(html, features="html.parser")
table = bso.find('table')
rows = table.find_all('tr')
columnNames = ["HGNC_Symbol", "HGNC_ID", "Name", "UniProt",
"Function", "Complex", "Substrate", "Product"]
for tr in rows:
td = tr.find_all('td')
row = [i.text for i in td]
row = [i.replace('\n', '') for i in row]
row = [i.replace('(details)', '') for i in row]
del row[3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15]
print(row)
#for link in bso.find_all('td'):
# print(link)
#getGenesFromGO()
def pdHTMLdf(html):
array = pd.read_html(html, flavor='bs4')
df = array[0]
return df
#html = "http://epifactors.autosome.ru/genes"
#df = pdHTMLdf(html)
#df.to_csv('epifactors.csv')
#df = pd.read_csv('./epifactors.csv')
#df = df[~df['Product'].str.contains('#')]
#df.to_csv('cleanEpifactors.csv', index=False)
``` |
{
"source": "aaditkachalia/EzHire-AWS",
"score": 3
} |
#### File: EzHire-AWS/Ezhire-Backend/answer_evaluation_draft_9.py
```python
import pandas as pd
import random
from sentence_transformers import SentenceTransformer
import scipy.spatial
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
questionsdf = pd.read_csv("question-dataset.csv")
topics = questionsdf.topic.unique()
topicsList = topics.tolist()
answerDf = pd.read_csv("answer-dataset.csv")
answerList = answerDf.answer.tolist()
corpus = answerList
# followupQ = False
# add topics the interviewer wants to judge on
starTopic = []
embedder = SentenceTransformer('bert-base-nli-mean-tokens')
corpus_embeddings = embedder.encode(corpus)
resultDf = pd.DataFrame(columns=['questionid', 'topic', 'difficulty', 'question', 'userAnswer', 'idealAnswers', 'score'])
counter = 0
topicScore = 0
currentTopic = ""
startCounter = 0
currentEasyQuestionsList = []
currentMediumQuestionsList = []
currentHardQuestionsList = []
greetings = ["Let's move to a new topic. ", "Great! Moving on, ", "Awesome! Moving forward.. ", "Moving on.. ", "Let's take this discussion forward. "]
fillers = []
lowScoreFillers = ["Are you nervous? Try taking a deep breath and relax. \n ", "Don't stress, you got this! \n "]
goodScoreFillers = ["Great answer, keep going! \n ", "Awesome! \n ", "Looks like you knew this one! \n ", "Good to see you know what you're talking about! \n ", "Absolutely correct! \n "]
def requiredDownloads():
nltk.download('stopwords')
nltk.download('punkt')
main(topics)
def evaluateAnswer(answer):
global counter
global topicScore
global questionsdf
global topics
global answerDf
global answerList
global corpus
global starTopic
global embedder
global corpus_embeddings
global resultDf
global currentTopic
global nextQuestionId
closest_n = 5
idealAnsStr = ""
topFive = []
topFiveScores = []
queries = [answer]
query_embeddings = embedder.encode(queries)
for (query, query_embedding) in zip(queries, query_embeddings):
distances = scipy.spatial.distance.cdist([query_embedding],
corpus_embeddings, 'cosine')[0]
results = zip(range(len(distances)), distances)
results = sorted(results, key=lambda x: x[1])
idealAnswerDf = answerDf.loc[answerDf['qid'] == nextQuestionId]
topic = questionsdf.loc[questionsdf['id'] == nextQuestionId, 'topic'].iloc[0]
nextQuestionDifficulty = questionsdf.loc[questionsdf['id'] == nextQuestionId, 'difficulty'].iloc[0]
nextQuestion = questionsdf.loc[questionsdf['id'] == nextQuestionId, 'question'].iloc[0]
idealAnswerScore = []
idealAnsStr = "; ".join([str(n) for n in idealAnswerDf.answer.tolist()])
for idealAnswer in idealAnswerDf.answer.tolist():
ideal_text_tokens = word_tokenize(idealAnswer)
idealAnswerWithoutSW = [word for word in ideal_text_tokens if not word in stopwords.words()]
answer_tokens = word_tokenize(answer.lower())
answerWithoutSW = [word for word in answer_tokens if not word in stopwords.words()]
wordfreq = []
for w in answerWithoutSW:
wordfreq.append(answerWithoutSW.count(w))
if len(answerWithoutSW) < (len(idealAnswerWithoutSW) / 3) or max(wordfreq)>5:
idealAnswerScore.append(0)
rating = max(idealAnswerScore)
continue
for (idx, distance) in results[0:closest_n]:
topFive.append(corpus[idx].strip())
topFiveScores.append(1 - distance)
if idealAnswer in topFive:
idealIndex = topFive.index(idealAnswer)
rankScore = (5 - idealIndex) * 0.6
print ("Rank Points: ", rankScore)
differenceScore = topFiveScores[0] \
- topFiveScores[idealIndex]
print ('difference score initial: ', differenceScore)
if differenceScore > 0.3:
differenceScore = 0
elif differenceScore == 0:
differenceScore = 1
else:
differenceScore = (5 - differenceScore * 16.67) \
* 0.2
print ('\n Difference Score: ', differenceScore)
similarityScore = 5 * topFiveScores[idealIndex] * 0.2
print ('\n Similarity Score: ', similarityScore)
finalScore = round(rankScore + differenceScore + similarityScore)
print ('\n Total points for this answer: ', finalScore)
idealAnswerScore.append(finalScore)
else:
print("\n 0 points for this answer.")
idealAnswerScore.append(0)
rating = max(idealAnswerScore)
resultDf = resultDf.append({'questionid': nextQuestionId, 'topic': topic, 'difficulty': nextQuestionDifficulty, 'question': nextQuestion, 'userAnswer': answer, 'idealAnswers': idealAnsStr, 'score': rating}, ignore_index = True)
return rating
def generateQuestion(topic, difficulty, followup = False):
global counter
global topicScore
global questionsdf
global topics
global answerDf
global answerList
global corpus
global starTopic
global embedder
global corpus_embeddings
global resultDf
global currentTopic
global nextQuestionId
global currentEasyQuestionsList
global currentMediumQuestionsList
global currentHardQuestionsList
if followup == True:
nextQuestionId = questionsdf.loc[questionsdf['id'] == nextQuestionId, 'followup'].iloc[0]
nextQuestion = questionsdf.loc[questionsdf['id'] == nextQuestionId, 'question'].iloc[0]
followup = False
# return nextQuestion
if difficulty == "E":
nextQuestionId = random.choice(currentEasyQuestionsList)
nextQuestion = questionsdf.loc[questionsdf['id'] == nextQuestionId, 'question'].iloc[0]
currentEasyQuestionsList.remove(nextQuestionId)
elif difficulty == "M":
nextQuestionId = random.choice(currentMediumQuestionsList)
nextQuestion = questionsdf.loc[questionsdf['id'] == nextQuestionId, 'question'].iloc[0]
currentMediumQuestionsList.remove(nextQuestionId)
elif difficulty == "H":
nextQuestionId = random.choice(currentHardQuestionsList)
nextQuestion = questionsdf.loc[questionsdf['id'] == nextQuestionId, 'question'].iloc[0]
currentHardQuestionsList.remove(nextQuestionId)
return nextQuestion
def topicWiseScoring(results):
columns = ["topic", "easy_score", "medium_score", "hard_score", "easy_answered", "medium_answered", "hard_answered", "total_score", "out_of", "proficiency"]
topicScores = pd.DataFrame(columns = columns)
for topic in results.topic.unique():
easy_score = medium_score = hard_score = 0
easy_answered = medium_answered = hard_answered = 0
total_score = 0
proficiency = 0
topicDf = results[results["topic"] == topic]
out_of = 5 * (len(topicDf))
if len(topicDf[topicDf['difficulty'] == "E"]) != 0:
easy = topicDf[topicDf['difficulty'] == "E"]
easy_score = easy.score.sum()
easy_answered = len(easy)
total_score = total_score + easy_score
proficiency = proficiency + ((easy_score/(5*easy_answered))*25)
if len(topicDf[topicDf['difficulty'] == "M"]) != 0:
medium = topicDf[topicDf['difficulty'] == "M"]
medium_score = medium.score.sum()
medium_answered = len(medium)
total_score = total_score + medium_score
proficiency = proficiency + ((medium_score/(5*medium_answered))*40)
if len(topicDf[topicDf['difficulty'] == "H"]) != 0:
hard = topicDf[topicDf['difficulty'] == "H"]
hard_score = hard.score.sum()
hard_answered = len(hard)
total_score = total_score + hard_score
proficiency = proficiency + ((hard_score/(5*hard_answered))*35)
proficiency = round(proficiency, -1)
topicScores = topicScores.append({"topic": topic, "easy_score": easy_score, "medium_score": medium_score, "hard_score": hard_score,
"easy_answered": easy_answered, "medium_answered": medium_answered, "hard_answered": hard_answered,
"total_score": total_score, "out_of": out_of, "proficiency": proficiency}, ignore_index = True)
return topicScores
## MAIN:
def main(answer):
global counter
global topicScore
global questionsdf
global topics
global answerDf
global answerList
global corpus
global starTopic
global embedder
global corpus_embeddings
global resultDf
global topicsList
global currentTopic
global nextQuestionId
global startCounter
global currentEasyQuestionsList
global currentMediumQuestionsList
global currentHardQuestionsList
global greetings
global fillers
global goodScoreFillers
global lowScoreFillers
if startCounter == 0:
currentTopic = random.choice(topicsList)
topicDf = questionsdf[questionsdf['topic'] == currentTopic]
questionsDifficultyDf = topicDf[topicDf['difficulty'] == "E"]
currentEasyQuestionsList = questionsDifficultyDf.id.tolist()
currentEasyQuestionsList = [i for i in currentEasyQuestionsList if i < 9000]
questionsDifficultyDf = topicDf[topicDf['difficulty'] == "M"]
currentMediumQuestionsList = questionsDifficultyDf.id.tolist()
currentMediumQuestionsList = [i for i in currentMediumQuestionsList if i < 9000]
questionsDifficultyDf = topicDf[topicDf['difficulty'] == "H"]
currentHardQuestionsList = questionsDifficultyDf.id.tolist()
currentHardQuestionsList = [i for i in currentHardQuestionsList if i < 9000]
nextQuestion = generateQuestion(currentTopic,"E")
nextQuestion = "Let's get started! Here's your first question: " + nextQuestion
startCounter += 1
else:
currentAnswerScore = evaluateAnswer(answer)
if questionsdf.loc[questionsdf['id'] == nextQuestionId, 'followup'].iloc[0] != 9999 and currentAnswerScore in range(2,5):
nextQuestion = generateQuestion(currentTopic, "F", followup = True)
# return nextQuestion
else:
topicScore = topicScore + currentAnswerScore
if topicScore < 5 and counter < 2 and len(currentEasyQuestionsList) != 0:
counter += 1
nextQuestion = generateQuestion(currentTopic, "E")
if counter == 2:
nextQuestion = random.choice(lowScoreFillers) + nextQuestion
elif topicScore in range(5,15) and counter < 4 and len(currentMediumQuestionsList) != 0:
counter += 1
nextQuestion = generateQuestion(currentTopic, "M")
if counter == 2:
nextQuestion = random.choice(goodScoreFillers) + nextQuestion
elif counter == 4:
nextQuestion = random.choice(lowScoreFillers) + nextQuestion
elif topicScore in range(15,25) and counter < 6 and len(currentHardQuestionsList) != 0:
counter += 1
nextQuestion = generateQuestion(currentTopic, "H")
if counter == 4:
nextQuestion = random.choice(goodScoreFillers) + nextQuestion
elif counter == 6:
nextQuestion = random.choice(lowScoreFillers) + nextQuestion
else:
try:
topicsList.remove(currentTopic)
currentTopic = random.choice(topicsList)
topicDf = questionsdf[questionsdf['topic'] == currentTopic]
questionsDifficultyDf = topicDf[topicDf['difficulty'] == "E"]
currentEasyQuestionsList = questionsDifficultyDf.id.tolist()
currentEasyQuestionsList = [i for i in currentEasyQuestionsList if i < 9000]
questionsDifficultyDf = topicDf[topicDf['difficulty'] == "M"]
currentMediumQuestionsList = questionsDifficultyDf.id.tolist()
currentMediumQuestionsList = [i for i in currentMediumQuestionsList if i < 9000]
questionsDifficultyDf = topicDf[topicDf['difficulty'] == "H"]
currentHardQuestionsList = questionsDifficultyDf.id.tolist()
currentHardQuestionsList = [i for i in currentHardQuestionsList if i < 9000]
counter = 0
topicScore = 0
nextQuestion = generateQuestion(currentTopic,"E")
greet = random.choice(greetings)
nextQuestion = greet + nextQuestion
greetings.remove(greet)
except:
nextQuestion = "Thank you!"
score_chartDf = topicWiseScoring(resultDf)
scores_csv = score_chartDf.to_csv(r"scores.csv",index = None, header=True)
export_csv = resultDf.to_csv(r"results.csv",index = None, header=True)
return nextQuestion
if __name__== "__main__":
ques = main("")
print(ques)
while True:
ans = input("your answer: ")
ques = main(ans)
print(ques)
``` |
{
"source": "aaditkamat/Audily",
"score": 3
} |
#### File: Audily/src/main.py
```python
from flask import Flask, Response, request
from flask import send_file
import gtts, pytesseract, cv2
import numpy as np
from PIL import Image
app = Flask(__name__)
@app.route('/')
def hello():
return "Welcome to the Audily API!"
@app.route('/audify', methods=['POST'])
def audify():
np_arr = np.fromstring(request.data, np.uint8)
img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
full_text = pytesseract.image_to_string(img) # OCR
random_id = "".join([str(np.random.randint(0, 9)) for i in range(8)])
tts = gtts.gTTS(full_text) # TTS
path_to_file = "./all_recordings/recording_{}.mp3".format(random_id)
tts.save(path_to_file)
return send_file(
path_to_file,
mimetype="audio/mp3",
as_attachment=True,
attachment_filename="recording_{}.mp3".format(random_id)
)
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "aaditkamat/calendar",
"score": 4
} |
#### File: aaditkamat/calendar/main.py
```python
import click
import typer
import datetime
import emoji
app = typer.Typer()
SPECIAL_KEYWORDS = ["today", "yesterday", "tomorrow"]
CURRENT_DATE = datetime.datetime.now()
DAYS_OF_THE_MONTH = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
DAY_EMOJIS = {
"Monday": ":confounded_face:",
"Tuesday": ":smirking_face:",
"Wednesday": ":relieved_face:",
"Thursday": ":smiling_face_with_smiling_eyes:",
"Friday": ":winking_face_with_tongue:",
"Saturday": ":squinting_face_with_tongue:",
"Sunday": ":woozy_face:",
}
def convert_to_date_string(keyword: str):
if keyword == "today":
date_string = CURRENT_DATE.strftime("%d/%m/%Y")
elif keyword == "tomorrow":
date_string = (CURRENT_DATE + datetime.timedelta(1)).strftime("%d/%m/%Y")
else:
date_string = (CURRENT_DATE - datetime.timedelta(1)).strftime("%d/%m/%Y")
return date_string
## The function that handles the day command. The day command accepts either a string
## that represents the date in DD/MM/YYYY format or one of the special keywords:
## today, tomorrow and yesterday . Also adds an emoji at the end based
## on what day the date falls on.
@app.command()
def day(date_string: str):
try:
# Allow user to key in special keywords today, tomorrow and yesterday
if date_string.strip().lower() in SPECIAL_KEYWORDS:
date_string = convert_to_date_string(date_string.strip().lower())
date = CURRENT_DATE.strptime(date_string, "%d/%m/%Y")
day = date.strftime("%A")
day += f" {emoji.emojize(DAY_EMOJIS[day])}"
diff = date - CURRENT_DATE
if date_string == CURRENT_DATE.strftime("%d/%m/%Y"):
typer.echo(f"Today is {day}")
elif diff.days < 0:
typer.echo(f"{date.strftime('%B %d %Y')} was a {day}")
else:
typer.echo(f"{date.strftime('%B %d %Y')} will be a {day}")
except ValueError:
raise click.BadArgumentUsage(
"DATE_STRING argument {date_string} is not of the expected format DD/MM/YYYY"
)
def calculate_days_diff(start_date, end_date):
if end_date.day >= start_date.day:
return end_date.day - start_date.day
else:
return (DAYS_OF_THE_MONTH[start_date.month] - start_date.day) + end_date.day
def calculate_months_diff(start_date, end_date):
if end_date.month >= start_date.month:
return end_date.month - start_date.month
else:
return (end_date.month + 12) - start_date.month
def calculate_years_diff(start_date, end_date):
if start_date.month >= end_date.month:
return end_date.year - start_date.year
else:
return end_date.year - start_date.year - 1
## The function that handles the duration command. The duration command accepts two strings
## in DD/MM/YYYY format. The first is the start date and the other is the end date. This
## command outputs the duration between the two dates in terms of days, months and years.
@app.command()
def duration(
start_date_string: str,
end_date_string: str,
):
try:
start_date, end_date = (
CURRENT_DATE.strptime(start_date_string, "%d/%m/%Y"),
CURRENT_DATE.strptime(end_date_string, "%d/%m/%Y"),
)
diff = end_date - start_date
if diff.days < 0:
raise click.BadArgumentUsage(
f"The start date must come after the end date"
)
days_diff = calculate_days_diff(start_date, end_date)
months_diff = calculate_months_diff(start_date, end_date)
years_diff = calculate_years_diff(start_date, end_date)
print(
f"The duration between {start_date_string} and {end_date_string} is: {years_diff} years, {months_diff} months and {days_diff} days."
)
except ValueError:
raise click.BadArgumentUsage(
f"Either the START_DATE_STRING argument {start_date_string} or the END_DATE_STRING argument {end_date_string} is not of the expected format DD/MM/YYYY"
)
if __name__ == "__main__":
app()
``` |
{
"source": "aaditkamat/competitive-programming",
"score": 4
} |
#### File: 2019/Day 1/day1.py
```python
def getTotal(num):
total = 0
while True:
num = num // 3 - 2
if num < 0:
break
total += num
return total
part1 = lambda lst: sum([num // 3 - 2 for num in lst])
part2 = lambda lst: sum([getTotal(num) for num in lst])
lst = []
while True:
try:
lst.append(int(input()))
except EOFError:
break
print(part1(lst))
print(part2(lst))
```
#### File: 2020/Python/day1.py
```python
import argparse
import sys
from typing import List, Tuple, Set, Union
# Part 1
def two_sum(lst: List[int], total: int) -> Union[Tuple[int, int], None] :
container: Set[int] = set()
for num in lst:
if total - num in container:
return (num, total - num)
else:
container.add(num)
# Part 2
def three_sum(lst: List[int], total: int) -> Union[Tuple[int, int, int], None]:
container: Set[int] = set()
for num in lst:
tup = two_sum(lst, total - num)
if tup:
return (num, tup[0], tup[1])
else:
container.add(num)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Solve day1 of AoC 2020")
parser.add_argument(
"input file", metavar="FILE", help="name of input file (day1.in)"
)
args: argparse.Namespace = parser.parse_args()
file_name: str = sys.argv[1]
with open(file_name) as f:
lst: List[int] = list(map(int, f.read().split()))
result1: Union[Tuple[int, int], None] = two_sum(lst, 2020)
if result1:
num1, num2 = result1
print(f"Part 1: The product of {num1} and {num2} is: {num1 * num2}")
result2: Union[Tuple[int, int, int], None] = three_sum(lst, 2020)
if result2:
num1, num2, num3 = result2
print(
f"Part 2: The product of {num1}, {num2} and {num3} is: {num1 * num2 * num3}"
)
```
#### File: competitive-programming/LeetCode/Balanced Binary Tree.py
```python
class Solution:
def height(self, root: TreeNode) -> int:
if not root:
return 0
return 1 + max(self.height(root.left), self.height(root.right))
def isBalanced(self, root: TreeNode) -> bool:
if not root:
return True
leftHeight = self.height(root.left)
rightHeight = self.height(root.right)
if abs(leftHeight - rightHeight) >= 2:
return False
return self.isBalanced(root.left) and self.isBalanced(root.right)
```
#### File: competitive-programming/LeetCode/Construct Binary Tree from Preorder and Inorder Traversal.py
```python
class Solution:
def getMapping(self, inorder: List[int]) -> dict:
mapping = {}
for index in range(len(inorder)):
num = inorder[index]
mapping[num] = index
return mapping
def buildTreeRecursive(self, preorder: List[int], mapping: dict, left: int, right: int) -> TreeNode:
if right < left:
return None
num = preorder.pop(0)
curr = TreeNode(num)
curr.left = self.buildTreeRecursive(preorder, mapping, left, mapping[num] - 1)
curr.right = self.buildTreeRecursive(preorder, mapping, mapping[num] + 1, right)
return curr
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
if not len(preorder) == len(inorder):
return None
mapping = self.getMapping(inorder)
return self.buildTreeRecursive(preorder, mapping, 0, len(inorder) - 1)
```
#### File: competitive-programming/LeetCode/Contains Duplicate.py
```python
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
count_store = {}
for num in nums:
if num in count_store:
return True
else:
count_store[num] = 1
return False
```
#### File: competitive-programming/LeetCode/Count Primes.py
```python
class Solution:
def countPrimes(self, n: int) -> int:
if n == 0 or n == 1:
return 0
sieve = [True] * (n - 2)
ctr, start = 0, 2
for i in range(len(sieve)):
if sieve[i]:
j = 2
while (i + 2) * j <= len(sieve) + 1:
sieve[(i + 2) * j - 2] = False
j += 1
ctr += 1
return ctr
```
#### File: competitive-programming/LeetCode/Defanging an IP Address.py
```python
class Solution:
def defangIPaddr(self, address: str) -> str:
return re.sub("\.", "[.]", address)
```
#### File: competitive-programming/LeetCode/Diet Plan Performance.py
```python
class Solution:
def dietPlanPerformance(self, calories: List[int], k: int, lower: int, upper: int) -> int:
count = 0
window = sum(calories[0: k])
if window < lower:
count -= 1
elif window > upper:
count += 1
for i in range(1, len(calories) - k + 1):
window -= calories[i - 1]
window += calories[i + k - 1]
print(window)
if window < lower:
count -= 1
elif window > upper:
count += 1
return count
```
#### File: competitive-programming/LeetCode/Longest Substring Without Repeating Characters.py
```python
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
count, start, store = 0, 0, {}
for curr in range(len(s)):
char = s[curr]
if char in store:
new_start = store[char] + 1
for prev in range(start, new_start):
del store[s[prev]]
start = new_start
store[char] = curr
count = max(count, curr - start + 1)
return count
```
#### File: competitive-programming/LeetCode/Product of Array Except Self.py
```python
class Solution:
def get_left_products(self, nums: List[int]) -> List[int]:
curr_prod = 1
left_prod = []
for num in nums[: : -1]:
left_prod.append(curr_prod * num)
curr_prod *= num
left_prod.reverse()
left_prod.append(1)
return left_prod
def productExceptSelf(self, nums: List[int]) -> List[int]:
left_prod = self.get_left_products(nums)
curr_prod = 1
result = []
for i in range(len(nums)):
result.append(left_prod[i + 1] * curr_prod)
curr_prod *= nums[i]
return result
```
#### File: competitive-programming/LeetCode/Valid Parentheses.py
```python
class Solution:
def isValid(self, s: str) -> bool:
stack = []
closing = [')', '}', ']']
opening = ['(', '{', '[']
for char in s:
if char in opening:
stack.append(char)
else:
if len(stack) > 0:
compare_char = stack.pop()
if not closing.index(char) == opening.index(compare_char):
return False
else:
return False
return len(stack) == 0
```
#### File: competitive-programming/ProjectEuler/problem_2.py
```python
def solution(num):
i = 1
j = 2
result = 0
while j <= num:
if i % 2 == 0:
result += i
if j % 2 == 0:
result += j
i += j
j += i
return result
print(solution(4000000))
``` |
{
"source": "aaditkamat/dr-ranking-metric",
"score": 3
} |
#### File: dr-ranking-metric/src/preprocessor.py
```python
import codecs
import yaml
from typing import Tuple, Dict
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.python.framework import ops
from model import MF
def sigmoid(x: np.ndarray) -> np.ndarray:
"""Calculate sigmoid."""
return 1 / (1 + np.exp(-x))
def transform_rating(ratings: np.ndarray, eps: float = 0.1) -> np.ndarray:
"""Transform ratings into graded relevance information."""
ratings -= 1
return eps + (1.0 - eps) * (2 ** ratings - 1) / (2 ** np.max(ratings) - 1)
def preprocess_movielens(
power: float = 1.0, seed: int = 12345
) -> Dict[str, np.ndarray]:
"""Load and preprocess ML 100K."""
np.random.seed(seed)
with open("../config.yaml", "rb") as f:
config = yaml.safe_load(f)
val_size = config["val_size"]
hyperparams = config["mf_hyperparams"]
with codecs.open(
f"../data/ml-100k/ml-100k.data", "r", "utf-8", errors="ignore"
) as f:
data = pd.read_csv(f, delimiter="\t", header=None).loc[:, :2]
data.rename(columns={0: "user", 1: "item", 2: "rate"}, inplace=True)
data.user, data.item = data.user - 1, data.item - 1
data = data.values
num_users, num_items = data[:, 0].max() + 1, data[:, 1].max() + 1
user_item_ = (
pd.DataFrame(np.zeros((num_users, num_items)))
.stack()
.reset_index()
.values[:, :2]
)
# generate CVR by MF.
ops.reset_default_graph()
sess = tf.Session()
tf.set_random_seed(seed)
model = MF(
num_users=num_users,
num_items=num_items,
dim=hyperparams["dim"],
eta=hyperparams["eta"],
lam=hyperparams["lam"],
)
# initialise all the TF variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
for _ in np.arange(hyperparams["iters"]):
idx = np.random.choice(np.arange(data.shape[0]), size=hyperparams["batch_size"])
_ = sess.run(
model.apply_grads_mse,
feed_dict={
model.users: data[idx, 0],
model.items: data[idx, 1],
model.labels: np.expand_dims(data[idx, 2], 1),
model.pscore: np.ones((hyperparams["batch_size"], 1)),
},
)
cvr = sess.run(
model.preds,
feed_dict={model.users: user_item_[:, 0], model.items: user_item_[:, 1]},
)
cvr = np.clip(cvr.flatten(), 1, 5)
cvr = transform_rating(cvr, eps=0.1)
cv = np.random.binomial(n=1, p=cvr)
# generate CTR by logistic MF.
all_data = (
pd.DataFrame(np.zeros((num_users, num_items)))
.stack()
.reset_index()
.values[:, :2]
)
pos_data = data[:, :2]
unlabeled_data = np.array(
list(set(map(tuple, all_data)) - set(map(tuple, pos_data))), dtype=int
)
data = np.r_[
np.c_[pos_data, np.ones(pos_data.shape[0])],
np.c_[unlabeled_data, np.zeros(unlabeled_data.shape[0])],
]
ops.reset_default_graph()
sess = tf.Session()
tf.set_random_seed(seed)
model = MF(
num_users=num_users,
num_items=num_items,
dim=hyperparams["dim"],
eta=hyperparams["eta"],
lam=hyperparams["lam"],
)
# initialise all the TF variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
for _ in np.arange(hyperparams["iters"]):
idx = np.random.choice(np.arange(data.shape[0]), size=hyperparams["batch_size"])
_ = sess.run(
model.apply_grads_ce,
feed_dict={
model.users: data[idx, 0],
model.items: data[idx, 1],
model.labels: np.expand_dims(data[idx, 2], 1),
model.pscore: np.ones((hyperparams["batch_size"], 1)),
},
)
ctr = sess.run(
model.preds,
feed_dict={model.users: user_item_[:, 0], model.items: user_item_[:, 1]},
)
ctr = sigmoid(ctr.flatten()) ** power
ct = np.random.binomial(n=1, p=ctr)
train_indicator = np.random.binomial(n=1, p=(1.0 - val_size), size=ct.shape[0])
ct_train, ct_val = ct * train_indicator, ct * (1 - train_indicator)
train = np.c_[user_item_, ct_train * cv]
val = np.c_[user_item_, ct_val * cv, ct_val, cv, ctr * val_size, cvr]
test = np.c_[user_item_, ct * cv, ct, cv, ctr, cvr]
return train, val, test
def preprocess_yahoo_coat(
data: str, val_ratio: float = 0.3, seed: int = 12345
) -> Tuple:
"""Load and preprocess Yahoo! R3 and Coat datasets."""
np.random.seed(seed)
with open("../config.yaml", "rb") as f:
hyperparams = yaml.safe_load(f)["mf_hyperparams"]
if data == "yahoo":
cols = {0: "user", 1: "item", 2: "rate"}
with codecs.open(
f"../data/yahoo/train.txt", "r", "utf-8", errors="ignore"
) as f:
train_ = pd.read_csv(f, delimiter="\t", header=None)
train_.rename(columns=cols, inplace=True)
with codecs.open(f"../data/yahoo/test.txt", "r", "utf-8", errors="ignore") as f:
test_ = pd.read_csv(f, delimiter="\t", header=None)
test_.rename(columns=cols, inplace=True)
for data_ in [train_, test_]:
data_.user, data_.item = data_.user - 1, data_.item - 1
elif data == "coat":
cols = {"level_0": "user", "level_1": "item", 2: "rate", 0: "rate"}
with codecs.open(
f"../data/coat/train.ascii", "r", "utf-8", errors="ignore"
) as f:
train_ = pd.read_csv(f, delimiter=" ", header=None)
train_ = train_.stack().reset_index().rename(columns=cols)
train_ = train_[train_.rate != 0].reset_index(drop=True)
with codecs.open(
f"../data/coat/test.ascii", "r", "utf-8", errors="ignore"
) as f:
test_ = pd.read_csv(f, delimiter=" ", header=None)
test_ = test_.stack().reset_index().rename(columns=cols)
test_ = test_[test_.rate != 0].reset_index(drop=True)
# binarize ratings
for data_ in [train_, test_]:
data_.rate = np.array(data_.rate >= 4, dtype=int)
# estimate propensity score by MF
train, test = train_.values, test_.values
pos_train = train_[train_.rate == 1].values
pos_test = test_[test_.rate == 1].values
# preprocess datasets
unique_user_train, user_counts_train = np.unique(
pos_train[:, 0], return_counts=True
)
unique_user_train = unique_user_train[user_counts_train >= 2]
unique_user_test, user_counts_test = np.unique(pos_test[:, 0], return_counts=True)
unique_user_test = unique_user_test[user_counts_test <= 9]
valid_users = np.intersect1d(unique_user_train, unique_user_test)
train = train[np.array([u in valid_users for u in train[:, 0]])]
test = test[np.array([u in valid_users for u in test[:, 0]])]
train[:, 0] = stats.rankdata(train[:, 0], method="dense") - 1
test[:, 0] = stats.rankdata(test[:, 0], method="dense") - 1
num_users, num_items = train[:, 0].max() + 1, train[:, 1].max() + 1
all_data = (
pd.DataFrame(np.zeros((num_users, num_items)))
.stack()
.reset_index()
.values[:, :2]
)
unobs_data = np.array(
list(set(map(tuple, all_data)) - set(map(tuple, train[:, :2])))
)
train = np.r_[
np.c_[train, np.ones(train.shape[0])],
np.c_[unobs_data, np.zeros((unobs_data.shape[0], 2))],
]
train, val = train_test_split(train, test_size=val_ratio, random_state=seed)
unobs_data = np.array(list(set(map(tuple, all_data)) - set(map(tuple, val[:, :2]))))
val = np.r_[val, np.c_[unobs_data, np.zeros((unobs_data.shape[0], 2))]]
# define the matrix factorization model
ops.reset_default_graph()
sess = tf.Session()
tf.set_random_seed(seed)
model = MF(
num_users=num_users,
num_items=num_items,
dim=hyperparams["dim"],
eta=hyperparams["eta"],
lam=hyperparams["lam"],
)
# initialise all the TF variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
for _ in np.arange(hyperparams["iters"]):
idx = np.random.choice(np.arange(val.shape[0]), size=hyperparams["batch_size"])
_ = sess.run(
model.apply_grads_ce,
feed_dict={
model.users: val[idx, 0],
model.items: val[idx, 1],
model.labels: np.expand_dims(val[idx, 3], 1),
model.pscore: np.ones((hyperparams["batch_size"], 1)),
},
)
# obtain dense user-item matrix
ctr_hat = sess.run(
model.preds,
feed_dict={
model.users: val[:, 0].astype(int),
model.items: val[:, 1].astype(int),
},
)
val = np.c_[val, sigmoid(ctr_hat)]
# estimate relevance parameter (gamma) by MF.
ops.reset_default_graph()
sess = tf.Session()
tf.set_random_seed(seed)
model = MF(
num_users=num_users,
num_items=num_items,
dim=hyperparams["dim"],
eta=hyperparams["eta"],
lam=hyperparams["lam"],
)
# observed data
val_obs = val[val[:, 3] == 1]
# initialise all the TF variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
for _ in np.arange(hyperparams["iters"]):
idx = np.random.choice(
np.arange(val_obs.shape[0]), size=hyperparams["batch_size"]
)
_ = sess.run(
model.apply_grads_ce,
feed_dict={
model.users: val_obs[idx, 0],
model.items: val_obs[idx, 1],
model.labels: np.expand_dims(val_obs[idx, 2], 1),
model.pscore: np.expand_dims(val_obs[idx, 4], 1),
},
)
# obtain dense user-item matrix
gamma_hat = sess.run(
model.preds,
feed_dict={
model.users: val[:, 0].astype(int),
model.items: val[:, 1].astype(int),
},
)
val = np.c_[val, sigmoid(gamma_hat)]
# create test data containing all items
all_data = (
pd.DataFrame(np.zeros((num_users, num_items)))
.stack()
.reset_index()
.values[:, :2]
)
unobs_data = np.array(
list(set(map(tuple, all_data)) - set(map(tuple, test[:, :2])))
)
test = np.r_[
np.c_[test, np.ones(test.shape[0])],
np.c_[unobs_data, np.zeros((unobs_data.shape[0], 2))],
]
avg_test_pscore = test[:, -1].mean()
test = np.c_[test, np.ones(test.shape[0]) * avg_test_pscore]
return train, val, test
``` |
{
"source": "aaditkamat/unbiased-pairwise-rec",
"score": 3
} |
#### File: src/evaluate/metrics.py
```python
from typing import Optional
import numpy as np
eps = 1e-3 # propensity clipping
def dcg_at_k(y_true: np.ndarray, y_score: np.ndarray,
k: int, pscore: Optional[np.ndarray] = None) -> float:
"""Calculate a DCG score for a given user."""
y_true_sorted_by_score = y_true[y_score.argsort()[::-1]]
if pscore is not None:
pscore_sorted_by_score = np.maximum(pscore[y_score.argsort()[::-1]], eps)
else:
pscore_sorted_by_score = np.ones_like(y_true_sorted_by_score)
dcg_score = 0.0
final_score = 0.0
k = k if y_true.shape[0] >= k else y_true.shape[0]
if not np.sum(y_true_sorted_by_score) == 0:
dcg_score += y_true_sorted_by_score[0] / pscore_sorted_by_score[0]
for i in np.arange(1, k):
dcg_score += y_true_sorted_by_score[i] / (pscore_sorted_by_score[i] * np.log2(i + 1))
final_score = dcg_score / np.sum(y_true_sorted_by_score) if pscore is None \
else dcg_score / np.sum(1. / pscore_sorted_by_score[y_true_sorted_by_score > 0])
return final_score
def average_precision_at_k(y_true: np.ndarray, y_score: np.ndarray,
k: int, pscore: Optional[np.ndarray] = None) -> float:
"""Calculate a average precision for a given user."""
y_true_sorted_by_score = y_true[y_score.argsort()[::-1]]
if pscore is not None:
pscore_sorted_by_score = np.maximum(pscore[y_score.argsort()[::-1]], eps)
else:
pscore_sorted_by_score = np.ones_like(y_true_sorted_by_score)
average_precision_score = 0.0
final_score = 0.0
k = k if y_true.shape[0] >= k else y_true.shape[0]
if not np.sum(y_true_sorted_by_score) == 0:
for i in np.arange(k):
if y_true_sorted_by_score[i] > 0:
score_ = np.sum(y_true_sorted_by_score[:i + 1] / pscore_sorted_by_score[:i + 1]) / (i + 1)
average_precision_score += score_
final_score = average_precision_score / np.sum(y_true_sorted_by_score) if pscore is None \
else average_precision_score / np.sum(1. / pscore_sorted_by_score[y_true_sorted_by_score > 0])
return final_score
def recall_at_k(y_true: np.ndarray, y_score: np.ndarray,
k: int, pscore: Optional[np.ndarray] = None) -> float:
"""Calculate a recall score for a given user."""
y_true_sorted_by_score = y_true[y_score.argsort()[::-1]]
if pscore is not None:
pscore_sorted_by_score = np.maximum(pscore[y_score.argsort()[::-1]], eps)
else:
pscore_sorted_by_score = np.ones_like(y_true_sorted_by_score)
final_score = 0.
k = k if y_true.shape[0] >= k else y_true.shape[0]
if not np.sum(y_true_sorted_by_score) == 0:
recall_score = np.sum(y_true_sorted_by_score[:k] / pscore_sorted_by_score[:k])
final_score = recall_score / np.sum(y_true_sorted_by_score) if pscore is None \
else recall_score / np.sum(1. / pscore_sorted_by_score[y_true_sorted_by_score > 0])
return final_score
```
#### File: unbiased-pairwise-rec/src/trainer.py
```python
import yaml
from pathlib import Path
from typing import Tuple
import pandas as pd
import numpy as np
import tensorflow as tf
from scipy import sparse
from tensorflow.python.framework import ops
from evaluate.evaluator import aoa_evaluator
from models.expomf import ExpoMF
from models.recommenders import PairwiseRecommender, PointwiseRecommender
def train_expomf(data: str, train: np.ndarray, num_users: int, num_items: int,
n_components: int = 100, lam: float = 1e-6) -> Tuple:
"""Train the expomf model."""
def tocsr(data: np.array, num_user: int, num_item: int) -> sparse.csr_matrix:
"""Convert data to csr_matrix."""
matrix = sparse.lil_matrix((num_users, num_items))
for (u, i, r) in data[:, :3]:
matrix[u, i] = r
return sparse.csr_matrix(matrix)
path = Path(f'../logs/{data}/expomf/emb')
path.mkdir(parents=True, exist_ok=True)
model = ExpoMF(n_components=n_components, random_state=12345, save_params=False,
early_stopping=True, verbose=False, lam_theta=lam, lam_beta=lam)
model.fit(tocsr(train, num_users, num_items))
np.save(file=str(path / 'user_embed.npy'), arr=model.theta)
np.save(file=str(path / 'item_embed.npy'), arr=model.beta)
return model.theta, model.beta
def train_pointwise(sess: tf.Session, model: PointwiseRecommender, data: str,
train: np.ndarray, val: np.ndarray, test: np.ndarray, pscore: np.ndarray,
max_iters: int = 1000, batch_size: int = 256,
model_name: str = 'wmf', is_optuna: bool = False) -> Tuple:
"""Train and evaluate implicit recommender."""
train_loss_list = []
test_loss_list = []
# initialise all the TF variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
ips = model_name == 'relmf'
# pscore for train
pscore = pscore[train[:, 1].astype(int)]
# positive and unlabeled data for training set
pos_train = train[train[:, 2] == 1]
pscore_pos_train = pscore[train[:, 2] == 1]
num_pos = np.sum(train[:, 2])
unlabeled_train = train[train[:, 2] == 0]
pscore_unlabeled_train = pscore[train[:, 2] == 0]
num_unlabeled = np.sum(1 - train[:, 2])
# train the given implicit recommender
np.random.seed(12345)
for i in np.arange(max_iters):
# positive mini-batch sampling
# the same num. of postive and negative samples are used in each batch
sample_size = np.int(batch_size / 2)
pos_idx = np.random.choice(np.arange(num_pos), size=sample_size)
unl_idx = np.random.choice(np.arange(num_unlabeled), size=sample_size)
# mini-batch samples
train_batch = np.r_[pos_train[pos_idx], unlabeled_train[unl_idx]]
pscore_ = np.r_[pscore_pos_train[pos_idx], pscore_unlabeled_train[unl_idx]] if ips else np.ones(batch_size)
# update user-item latent factors and calculate training loss
_, train_loss = sess.run([model.apply_grads, model.unbiased_loss],
feed_dict={model.users: train_batch[:, 0],
model.items: train_batch[:, 1],
model.labels: np.expand_dims(train_batch[:, 2], 1),
model.scores: np.expand_dims(pscore_, 1)})
train_loss_list.append(train_loss)
# calculate a validation score
unl_idx = np.random.choice(np.arange(num_unlabeled), size=val.shape[0])
val_batch = np.r_[val, unlabeled_train[unl_idx]]
pscore_ = np.r_[pscore[val[:, 1].astype(int)], pscore_unlabeled_train[unl_idx]]
val_loss = sess.run(model.unbiased_loss,
feed_dict={model.users: val_batch[:, 0],
model.items: val_batch[:, 1],
model.labels: np.expand_dims(val_batch[:, 2], 1),
model.scores: np.expand_dims(pscore_, 1)})
u_emb, i_emb = sess.run([model.user_embeddings, model.item_embeddings])
if ~is_optuna:
path = Path(f'../logs/{data}/{model_name}')
(path / 'loss').mkdir(parents=True, exist_ok=True)
np.save(file=str(path / 'loss/train.npy'), arr=train_loss_list)
np.save(file=str(path / 'loss/test.npy'), arr=test_loss_list)
(path / 'emb').mkdir(parents=True, exist_ok=True)
np.save(file=str(path / 'emb/user_embed.npy'), arr=u_emb)
np.save(file=str(path / 'emb/item_embed.npy'), arr=i_emb)
sess.close()
return u_emb, i_emb, val_loss
def train_pairwise(sess: tf.Session, model: PairwiseRecommender, data: str,
train: np.ndarray, val: np.ndarray, test: np.ndarray,
max_iters: int = 1000, batch_size: int = 1024,
model_name: str = 'bpr', is_optuna: bool = False) -> Tuple:
"""Train and evaluate pairwise recommenders."""
train_loss_list = []
test_loss_list = []
# initialise all the TF variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# count the num of training data.
num_train, num_val = train.shape[0], val.shape[0]
np.random.seed(12345)
for i in np.arange(max_iters):
idx = np.random.choice(np.arange(num_train), size=batch_size)
train_batch = train[idx]
# update user-item latent factors
if model_name in 'bpr':
_, loss = sess.run([model.apply_grads, model.loss],
feed_dict={model.users: train_batch[:, 0],
model.pos_items: train_batch[:, 1],
model.scores1: np.ones((batch_size, 1)),
model.items2: train_batch[:, 2],
model.labels2: np.zeros((batch_size, 1)),
model.scores2: np.ones((batch_size, 1))})
elif 'ubpr' in model_name:
_, loss = sess.run([model.apply_grads, model.loss],
feed_dict={model.users: train_batch[:, 0],
model.pos_items: train_batch[:, 1],
model.scores1: np.expand_dims(train_batch[:, 4], 1),
model.items2: train_batch[:, 2],
model.labels2: np.expand_dims(train_batch[:, 3], 1),
model.scores2: np.expand_dims(train_batch[:, 5], 1)})
train_loss_list.append(loss)
# calculate a test loss
test_loss = sess.run(model.ideal_loss,
feed_dict={model.users: test[:, 0],
model.pos_items: test[:, 1],
model.rel1: np.expand_dims(test[:, 3], 1),
model.items2: test[:, 2],
model.rel2: np.expand_dims(test[:, 4], 1)})
test_loss_list.append(test_loss)
# calculate a validation loss
if model_name in 'bpr':
val_loss = sess.run(model.unbiased_loss,
feed_dict={model.users: val[:, 0],
model.pos_items: val[:, 1],
model.scores1: np.ones((num_val, 1)),
model.items2: val[:, 2],
model.labels2: np.zeros((num_val, 1)),
model.scores2: np.ones((num_val, 1))})
elif 'ubpr' in model_name:
val_loss = sess.run(model.unbiased_loss,
feed_dict={model.users: val[:, 0],
model.pos_items: val[:, 1],
model.scores1: np.expand_dims(val[:, 4], 1),
model.items2: val[:, 2],
model.labels2: np.expand_dims(val[:, 3], 1),
model.scores2: np.expand_dims(val[:, 5], 1)})
u_emb, i_emb = sess.run([model.user_embeddings, model.item_embeddings])
if ~is_optuna:
path = Path(f'../logs/{data}/{model_name}')
(path / 'loss').mkdir(parents=True, exist_ok=True)
np.save(file=str(path / 'loss/train.npy'), arr=train_loss_list)
np.save(file=str(path / 'loss/test.npy'), arr=test_loss_list)
(path / 'emb').mkdir(parents=True, exist_ok=True)
np.save(file=str(path / 'emb/user_embed.npy'), arr=u_emb)
np.save(file=str(path / 'emb/item_embed.npy'), arr=i_emb)
sess.close()
return u_emb, i_emb, val_loss
class Trainer:
suffixes = ['cold-user', 'rare-item']
at_k = [3, 5, 8]
cold_user_threshold = 6
rare_item_threshold = 100
def __init__(self, data: str, max_iters: int = 1000, batch_size: int = 12,
eta: float = 0.1, model_name: str = 'bpr') -> None:
"""Initialize class."""
self.data = data
if model_name != 'expomf':
hyper_params = yaml.safe_load(open(f'../conf/hyper_params.yaml', 'r'))[data][model_name]
self.dim = np.int(hyper_params['dim'])
self.lam = hyper_params['lam']
self.weight = hyper_params['weight'] if model_name == 'wmf' else 1.
self.clip = hyper_params['clip'] if model_name == 'relmf' else 0.
self.beta = hyper_params['beta'] if model_name == 'ubpr' else 0.
self.batch_size = batch_size
self.max_iters = max_iters
self.eta = eta
self.model_name = model_name
def run(self, num_sims: int = 10) -> None:
"""Train implicit recommenders."""
train_point = np.load(f'../data/{self.data}/point/train.npy')
val_point = np.load(f'../data/{self.data}/point/val.npy')
test_point = np.load(f'../data/{self.data}/point/test.npy')
pscore = np.load(f'../data/{self.data}/point/pscore.npy')
num_users = np.int(train_point[:, 0].max() + 1)
num_items = np.int(train_point[:, 1].max() + 1)
if self.model_name in ['bpr', 'ubpr']:
train = np.load(f'../data/{self.data}/pair/{self.model_name}_train.npy')
val = np.load(f'../data/{self.data}/pair/{self.model_name}_val.npy')
test = np.load(f'../data/{self.data}/pair/test.npy')
if self.data == 'yahoo':
user_freq = np.load(f'../data/{self.data}/point/user_freq.npy')
item_freq = np.load(f'../data/{self.data}/point/item_freq.npy')
result_list = list()
if self.data == 'yahoo':
cold_user_result_list = list()
rare_item_result_list = list()
for seed in np.arange(num_sims):
tf.set_random_seed(12345)
ops.reset_default_graph()
sess = tf.Session()
if self.model_name in ['ubpr', 'bpr']:
pair_rec = PairwiseRecommender(num_users=num_users, num_items=num_items, dim=self.dim,
lam=self.lam, eta=self.eta, beta=self.beta)
u_emb, i_emb, _ = train_pairwise(sess, model=pair_rec, data=self.data,
train=train, val=val, test=test,
max_iters=self.max_iters, batch_size=self.batch_size,
model_name=self.model_name)
elif self.model_name in ['wmf', 'relmf']:
point_rec = PointwiseRecommender(num_users=num_users, num_items=num_items, weight=self.weight,
clip=self.clip, dim=self.dim, lam=self.lam, eta=self.eta)
u_emb, i_emb, _ = train_pointwise(sess, model=point_rec, data=self.data,
train=train_point, val=val_point, test=test_point, pscore=pscore,
max_iters=self.max_iters, batch_size=self.batch_size,
model_name=self.model_name)
elif self.model_name == 'expomf':
u_emb, i_emb = train_expomf(data=self.data, train=train_point, num_users=num_users, num_items=num_items)
result = aoa_evaluator(user_embed=u_emb, item_embed=i_emb,
test=test_point, model_name=self.model_name, at_k=self.at_k)
result_list.append(result)
if self.data == 'yahoo':
user_idx, item_idx = test_point[:, 0].astype(int), test_point[:, 1].astype(int)
cold_user_idx = user_freq[user_idx] <= self.cold_user_threshold
rare_item_idx = item_freq[item_idx] <= self.rare_item_threshold
cold_user_result = aoa_evaluator(user_embed=u_emb, item_embed=i_emb, at_k=self.at_k,
test=test_point[cold_user_idx], model_name=self.model_name)
rare_item_result = aoa_evaluator(user_embed=u_emb, item_embed=i_emb, at_k=self.at_k,
test=test_point[rare_item_idx], model_name=self.model_name)
cold_user_result_list.append(cold_user_result)
rare_item_result_list.append(rare_item_result)
print(f'#{seed+1}: {self.model_name}...')
ret_path = Path(f'../logs/{self.data}/{self.model_name}/results')
ret_path.mkdir(parents=True, exist_ok=True)
pd.concat(result_list, 1).to_csv(ret_path / f'aoa_all.csv')
if self.data == 'yahoo':
pd.concat(cold_user_result_list, 1).to_csv(ret_path / f'aoa_cold-user.csv')
pd.concat(rare_item_result_list, 1).to_csv(ret_path / f'aoa_rare-item.csv')
``` |
{
"source": "aaditkamat/unsubscribe-emails",
"score": 3
} |
#### File: aaditkamat/unsubscribe-emails/unsubscribe.py
```python
import mailbox
import re
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def get_senders(path):
senders = {}
for message in mailbox.mbox(path):
if 'List-Unsubscribe' in message:
senders[message['From']] = message['List-Unsubscribe']
return senders
def open_link(link):
try:
if re.search(r'(\<)([https{0,1}].+)(\>)', link):
driver = webdriver.Chrome()
driver.get(link.strip('\<\>'))
else:
print(link)
except:
print('Chrome webdriver not installed')
def prompt(senders):
for sender in senders:
tokens = re.split(r'\s\<.+\@.+\.\w+\>', sender)
name = tokens[0].strip(r'\"')
choice = input(f'Do you wish to unsubscribe from emails sent by {name}? ')
if choice.lower() in ['y', 'yes']:
open_link(senders[sender])
if choice.lower() == 'exit':
exit()
if __name__ == '__main__':
senders = get_senders('./mail.mbox')
prompt(senders)
``` |
{
"source": "aaditkapoor/random-code-experiments",
"score": 4
} |
#### File: random-code-experiments/experiments/color.py
```python
from dataclasses import dataclass
from typing import Union
# import rich
@dataclass
class Color:
"""Base Color"""
text: str
def __repr__(self):
raise NotImplementedError
# colors
class Red(Color):
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return f'<red>{self.text}</red>'
class Black(Color):
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return f'<black>{self.text}</black>'
class Green(Color):
def __init__(self, text: str):
super().__init__(text)
def __repr__(self):
return f'<green>{self.text}</green>'
# styles
class Style:
"""Base Style"""
text: str = None
instance: Union[Red, Black, Green,None] = None
def __init__(self, text: str = None, instance: Union[Red, Black, Green, None] = None):
if text and instance:
raise Exception
if not text:
self.instance = instance
self.text = None
else:
self.text = text
self.instance = None
def __repr__(self):
raise NotImplementedError
class Bold(Style):
def __init__(self, text: str = None, instance: Union[Red, Black, Green, None] = None):
super().__init__(text, instance)
def __repr__(self):
if self.text:
return f'<bold>{self.text}</bold>'
else:
return f'<bold>{repr(self.instance)}</bold>'
class Italic(Style):
def __init__(self, text: str = None, instance: Union[Red, Black, Green, None] = None):
super().__init__(text=text, instance=instance)
def __repr__(self):
if self.text:
return f'<italic>{self.text}</italic>'
else:
return f'<italic>{repr(self.instance)}</italic>'
class Underline(Style):
def __init__(self, text: str = None, instance: Union[Red, Black, Green, None] = None):
super().__init__(text=text, instance=instance)
def __repr__(self):
if self.text:
return f'<underline>{self.text}</underline>'
else:
return f'<underline>{repr(self.instance)}</underline>'
# Usage
print(Bold("aadit"))
print(Bold(instance=Red("aadit")))
print(Italic(instance=Bold(instance=Red("aadit"))))
```
#### File: healthup_pilot/healthup_pilot/views.py
```python
from django.shortcuts import render_to_response
from django.http import HttpResponse
from .models import UserModel
from .calculate_cal_mass import *
from .day_sender import *
def home(request):
return render_to_response("index.html")
def register(request):
return render_to_response("register.html")
def about(request):
return render_to_response("about.html")
def setup_diet(request):
# get the data
name = request.GET.get("name","")
email = request.GET.get("email","")
age = request.GET.get("age","")
number = request.GET.get("mobile_number","")
weight = request.GET.get("weight","")
sex = request.GET.get("sex","")
height = request.GET.get("height", "")
fitness_goal = request.GET.get("fitness_goal","")
activeness = request.GET.get("activeness","")
# apply formula to get bmi and daily calorie intake, save it in a variable
# save everything into a model
# get a message variable in the model for the function to get a personalized message
# setup apscheduler to start the clock at 8:00am
bmi = return_body_mass_index_and_find_status(weight, height)
calorie_to_take = calorie_intake(sex, bmi, activeness,weight)
# complete eating profile
UserModel.objects.create()
message = get_complete_eating_profile(name, bmi, calorie_to_take, fitness_goal, activeness, weight)
# save model here and show thank you.html
return render_to_response("thank_you.html")
```
#### File: random-code-experiments/experiments/test.py
```python
__author__ = 'Victor'
from urllib.request import *
import re
url = 'http://www.ratemyprofessors.com/ShowRatings.jsp?tid=306975'
def crawlURL(addedURL):
url = addedURL
html = urlopen(url).read()
teacherData = re.findall(r'\">(.*?)</',html)
output = ''
addStuff = 0
for x in xrange(len(teacherData)):
if teacherData[x] == 'Submit a Correction':
output = 'professor: '
for y in xrange(4):
output += teacherData[x-8+y] + ' '
addStuff = 1
elif teacherData[x] == 'Helpfulness' and addStuff == 1:
output += ': Overall quality: '+ str(teacherData[x-2]) + ': Average grade: ' + str(teacherData[x-1]) + ': Helpfulness: ' + teacherData[x+1]
elif teacherData[x] == 'Easiness' and addStuff == 1:
output += ': Easiness: ' + str(teacherData[x+1])
addStuff = 0
break
print (output)
crawlURL(url)
``` |
{
"source": "aaditkapoor/Randomizer",
"score": 3
} |
#### File: Randomizer/Randomizer/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render
from .User import User, UserData
from . import models
from . import scraper
import json
def home(request):
return render(request, "signup.html")
def login(request):
first_name = request.GET.get("FirstName")
last_name = request.GET.get("LastName")
email = request.GET.get("email")
user = User(first_name = first_name, last_name=last_name, email=email)
userModel = models.UserModel(first_name=user.first_name, last_name=user.last_name, email=user.email, hashcode=user.hashcode)
movies = search("space")
movies_json = [x.to_dict() for x in movies]
jsdata = json.dumps({"results": movies_json})
if models.UserModel.objects.filter(hashcode=user.hashcode).exists():
print(models.UserModel.objects.filter(first_name=first_name,
last_name=last_name, email=email, hashcode=user.hashcode))
return render(request, "food.html",
{"user": first_name, "id": user.hashcode, "is_new_user": False,
"movies": movies, "jsdata": jsdata})
else:
userModel.save()
return render(request, "food.html",
{"user": first_name, "id": user.hashcode, "is_new_user": True,
"movies": movies, "jsdata": jsdata})
def randomize(request):
user_id = request.GET.get("id")
user = User(hashcode=user_id)
def store_data(request):
from datetime import date
today = date.today()
user_id = request.GET.get("hash")
item = request.GET.get("item")
movies = search("space")
movies_json = [x.to_dict() for x in movies]
jsdata = json.dumps({"results": movies_json})
print(user_id)
user = models.UserModel.objects.get(hashcode=user_id)
userData = models.UserDataModel(user=user, clicked_item=item, clicked_date = str(today))
userData.save()
if models.UserModel.objects.filter(hashcode=user.hashcode).exists():
print(models.UserModel.objects.filter(first_name=user.first_name,
last_name=user.last_name, email=user.email, hashcode=user.hashcode))
return render(request, "food.html",
{"user": user.first_name, "id": user.hashcode, "is_new_user": False, 'jsdata': jsdata})
def search(search_term):
s = scraper.Scraper(search_term)
s.search()
return s.pick_movie(n=8)
``` |
{
"source": "aaditkapoor/tic-tac-toe-ml-project",
"score": 3
} |
#### File: aaditkapoor/tic-tac-toe-ml-project/preprocessing.py
```python
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Imputer
# Loading data
data = pd.read_csv("../tic-tac-toe.data.txt", sep = ",")
data_copy = pd.read_csv("../tic-tac-toe.data.txt", sep = ",") # for further use
# View the data
data.head()
# Assigning columns
data.columns = ["first_row_left", "first_row_middle", "first_row_right", "center_row_left", "center_row_middle", "center_row_right", "bottom_row_left", "bottom_row_middle", "bottom_row_right", "is_win"]
data_copy.columns = ["first_row_left", "first_row_middle", "first_row_right", "center_row_left", "center_row_middle", "center_row_right", "bottom_row_left", "bottom_row_middle", "bottom_row_right", "is_win"]
def return_features_labels():
global data
global data_copy
# As we can see the the different move options, we perform label encoding.
mapping_for_moves = {'x':1, "o":0} # For b, we put mean of the data.
mapping_for_wins = {"positive":1, "negative":0} # Positive is win, negative is lose
data.is_win = data.is_win.map(mapping_for_wins)
data_copy.is_win = data_copy.is_win.map(mapping_for_wins)
data = data.drop(columns=["is_win"], axis=1)
for i in data.columns: # Applying map to all the columns except is_win.
data[i] = data[i].map(mapping_for_moves)
# Extracting features and labels
features = data.values
labels = data_copy.is_win.values
# Filling missing values aka "b" with the mean
features = (Imputer().fit_transform(features))
features = features.astype(np.int)
labels = labels.astype(np.int)
return features, labels
``` |
{
"source": "aaditkapoor/TwilioCovidTracker",
"score": 3
} |
#### File: aaditkapoor/TwilioCovidTracker/app.py
```python
from backend.parser import BaseParser, CountryParser, isValidMessage
from flask import Flask
from flask import request
from flask import render_template
from twilio.twiml.messaging_response import MessagingResponse
from backend.sms import API, Sender
from backend.models.data import Data
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/about/")
def about():
return render_template("about.html")
@app.route("/api/country/<country_name>")
def message(country_name):
api = API(country_name)
api.getAll()
return api.buildMessage()
# helper method
def getMessage(country:str) -> str:
api = API(country)
api.getAll()
return api.buildMessage()
@app.route("/sms", methods=['GET', 'POST'])
def sms_reply():
"""Respond to incoming messages with a friendly SMS."""
# Start our response
resp = MessagingResponse()
message = request.values.get('Body', None)
message = message.lower()
# If message is not False
if message:
if (isValidMessage(message)):
parser = CountryParser(BaseParser(message))
# parse the message
parser.parse()
# get country
country = parser.getText()
messageToSend = getMessage(country)
# Add a message
resp.message(messageToSend)
else:
resp.message("Could not understand. Try show me stats for <Country Name>.")
return str(resp)
# If no message
else:
return str(resp.message("Could not understand."))
```
#### File: backend/config/secret.py
```python
import os
import datetime
import logging
# Your Account Sid and Auth Token from twilio.com/console
# DANGER! This is insecure. See http://twil.io/secure
KEYS = {}
KEYS["account_sid"] = '<Add_your_sid>'
KEYS["auth_token"] = '<Add_your_token>'
KEYS["from_"] = "<paste_phone_number"
KEYS["to"] = ""
# some helper methods too
def convertDateToDay(date:str) -> str:
"""
Convert date into a format of <day> and <date>.
(str) -> str
"""
converted = datetime.datetime.strptime(date,"%Y-%m-%dT%H:%M:%SZ")
return f'{converted.strftime("%D")} {converted.strftime("%A")}'
```
#### File: aaditkapoor/TwilioCovidTracker/build.py
```python
import os
import json
import argparse
import enum
from typing import Dict
import logging
class Error(enum.Enum):
COMMAND_NOT_FOUND_ERROR = "Command not found! Check build.json"
parser = argparse.ArgumentParser(description="command matching build.json")
BUILD_JSON = "build.json"
class Build:
def __init__(self, build_file:str):
try:
self.build_file = open(build_file, "r")
except OSError as error:
print(error)
self.commands = {}
def parse(self) -> bool:
json_data = json.loads(self.build_file.read())
for key in json_data.keys():
self.commands[key] = json_data[key]
return True
def run(self, key:str):
if key in self.commands.keys():
os.system(self.commands.get(key, Error.COMMAND_NOT_FOUND_ERROR))
else:
logging.error(f'{key}: command not found in build.json')
if __name__ == "__main__":
parser.add_argument("run", help="Run the command")
args = parser.parse_args()
command = args.run
# build the object, parse and run command
builder = Build(BUILD_JSON)
builder.parse()
builder.run(command)
``` |
{
"source": "Aaditya7789/checkov",
"score": 2
} |
#### File: tests/kustomize/test_runner.py
```python
import dis
import inspect
import os
import unittest
from pathlib import Path
from typing import Dict, Any, List
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.runner_filter import RunnerFilter
from checkov.kustomize.runner import Runner
from checkov.common.output.report import Report
class TestRunnerValid(unittest.TestCase):
@unittest.skipIf(os.name == "nt", "Skipping Kustomize test for windows OS.")
def test_record_relative_path_with_relative_dir(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_dir_path = os.path.join(current_dir, "runner", "resources")
# this is the relative path to the directory to scan (what would actually get passed to the -d arg)
dir_rel_path = os.path.relpath(scan_dir_path).replace('\\', '/')
runner = Runner()
runner.templateRendererCommand = "kustomize"
runner.templateRendererCommandOptions = "build"
checks_allowlist = ['CKV_K8S_37']
report = runner.run(root_folder=dir_rel_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework='kustomize', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertGreater(len(all_checks), 0) # ensure that the assertions below are going to do something
for record in all_checks:
# Kustomize deals with absolute paths
#self.assertEqual(record.repo_file_path in record.file_path)
self.assertIn(record.repo_file_path, record.file_path)
@unittest.skipIf(os.name == "nt", "Skipping Kustomize test for windows OS.")
def test_record_relative_path_with_direct_oberlay(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
scan_dir_path = Path(__file__).parent / "runner/resources/overlays/dev"
# this is the relative path to the directory to scan (what would actually get passed to the -d arg)
dir_rel_path = os.path.relpath(scan_dir_path).replace('\\', '/')
runner = Runner()
runner.templateRendererCommand = "kustomize"
runner.templateRendererCommandOptions = "build"
checks_allowlist = ['CKV_K8S_37']
report = runner.run(root_folder=dir_rel_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework='kustomize', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertGreater(len(all_checks), 0) # ensure that the assertions below are going to do something
for record in all_checks:
# Kustomize deals with absolute paths
#self.assertEqual(record.repo_file_path in record.file_path)
self.assertIn(record.repo_file_path, record.file_path)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "aaditya99983/citation-prediction",
"score": 2
} |
#### File: citation-prediction/tests/test_model_build.py
```python
import unittest
import numpy as np
from citeomatic.corpus import Corpus
from citeomatic.features import Featurizer, DataGenerator
from citeomatic.models.layers import triplet_loss
from citeomatic.models.options import ModelOptions
from citeomatic.utils import import_from
from tests.test_corpus import build_test_corpus
import keras.backend as K
create_model = import_from("citeomatic.models.citation_ranker", "create_model")
embedder_create_model = import_from("citeomatic.models.paper_embedder", "create_model")
class TestModelBuild(unittest.TestCase):
@classmethod
def setUpClass(cls):
build_test_corpus('/tmp/foo.json', '/tmp/foo.sqlite')
corpus = Corpus.load('/tmp/foo.sqlite')
options = ModelOptions(**{})
featurizer = Featurizer(max_title_len=options.max_title_len, max_abstract_len=options.max_abstract_len)
featurizer.fit(corpus, max_df_frac=1.0)
options.n_features = featurizer.n_features
options.n_authors = featurizer.n_authors
options.n_venues = featurizer.n_venues
options.n_keyphrases = featurizer.n_keyphrases
cls.corpus = corpus
cls.featurizer = featurizer
cls.options = options
def test_build_paper_embedder_sum(self):
try:
models = embedder_create_model(self.options)
assert 'embedding' in models
assert 'citeomatic' in models
self._test_train(models)
assert True
except Exception:
assert False
def test_build_magdir(self):
try:
models = embedder_create_model(self.options)
self.options.use_magdir = False
assert 'embedding' in models
assert 'citeomatic' in models
self._test_train(models)
assert True
except Exception:
assert False
def test_build_paper_embedder_cnn(self):
try:
self.options.embedding_type = 'cnn'
models = embedder_create_model(self.options)
assert 'embedding' in models
assert 'citeomatic' in models
self._test_train(models)
assert True
except Exception:
assert False
def test_build_paper_embedder_cnn2(self):
try:
self.options.embedding_type = 'cnn2'
models = embedder_create_model(self.options)
assert 'embedding' in models
assert 'citeomatic' in models
self._test_train(models)
assert True
except Exception:
assert False
def test_cnn(self):
self.options.embedding_type = 'cnn'
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_lstm(self):
self.options.embedding_type = 'lstm'
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_build_paper_embedder_lstm(self):
try:
self.options.embedding_type = 'lstm'
models = embedder_create_model(self.options)
assert 'embedding' in models
assert 'citeomatic' in models
self._test_train(models)
assert True
except Exception:
assert False
def test_build_train_ranker(self):
try:
models = create_model(self.options)
assert models['embedding'] is None
assert 'citeomatic' in models
self._test_train(models)
except Exception:
assert False
def test_use_author(self):
self.options.use_authors = True
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_use_venue(self):
self.options.use_venue = True
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_use_keyphrases(self):
self.options.use_keyphrases = True
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_use_citations(self):
self.options.use_citations = True
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
self.options.use_citations = False
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_use_sparse(self):
self.options.use_sparse = True
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_siamese(self):
self.options.use_src_tgt_embeddings = True
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def _test_train(self, models: dict):
model = models['citeomatic']
model.compile(optimizer='nadam', loss=triplet_loss)
dg = DataGenerator(self.corpus, self.featurizer, candidate_selector=TestCandidateSelector())
training_generator = dg.triplet_generator(paper_ids=self.corpus.train_ids, batch_size=2)
model.fit_generator(training_generator, steps_per_epoch=1, epochs=10)
K.clear_session()
class TestCandidateSelector(object):
def confidence(self, doc_id, candidate_ids):
return np.ones(len(candidate_ids))
``` |
{
"source": "Aaditya-Bhatia/Search_Engine_Query-Advertisment_Management",
"score": 3
} |
#### File: Aaditya-Bhatia/Search_Engine_Query-Advertisment_Management/2_build_vocab.py
```python
import pandas as pd
import os
import spacy
import unidecode
from nltk.corpus import stopwords
import string
import pickle
import argparse
nlp = spacy.load("en_core_web_sm", disable=['parser', 'tagger', 'ner'])
stops = stopwords.words("english")
punctuations = string.punctuation
for p in punctuations:
stops.append(p)
stops.append('')
# arg parser
parser = argparse.ArgumentParser(description='Builds the models to test accuracy')
parser.add_argument('-l', '--lab_d', default='reference_data.csv')
parser.add_argument('-d', '--dirName', default=None)
args = parser.parse_args()
dirName = args.dirName
if not dirName:
current_directory = os.getcwd()
dirName = f'{current_directory}/Data/'
dirHeirarchy = f'{dirName}Brands/'
Saves = f'{dirName}SavedModels/'
lab_d = args.lab_d
def getVocab(comment):
lemmatized = []
tokens = nlp(unidecode.unidecode(str(comment).lower()))
for token in tokens:
token = token.lemma_
if token not in stops:
lemmatized.append(token)
return lemmatized
if __name__ == "__main__":
lab_df = pd.read_csv(lab_d)
for brand in os.listdir(dirHeirarchy):
brand_path = os.path.join(dirHeirarchy, brand)
if not os.path.isdir(brand_path):
continue
allVocab = []
for root, dirs, files in os.walk(brand_path):
for file in files:
if file.endswith(".csv"):
fileName = os.path.join(root, file)
# print('Getting vocab from' , fileName)
csv = pd.read_csv(fileName)
csv['query_vocab'] = csv['query'].apply(getVocab)
query_vocab_list = csv['query_vocab'].sum()
csv['brand_vocab'] = csv['brand'].apply(getVocab)
brand_vocab_list = csv['brand_vocab'].sum()
csv['product_vocab'] = csv['product_name'].apply(getVocab)
product_vocab_list = csv['product_vocab'].sum()
allVocab = list(set(query_vocab_list + brand_vocab_list + product_vocab_list + allVocab))
# saving all vocab for the specific brand
with open('{}{}_vocab.txt'.format(Saves, brand), 'wb') as fp:
pickle.dump(allVocab, fp)
```
#### File: Aaditya-Bhatia/Search_Engine_Query-Advertisment_Management/3b_build_specific_models.py
```python
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
import numpy as np
import argparse
import pandas as pd
import pickle
from sklearn.feature_extraction.text import CountVectorizer
# arg parser
parser = argparse.ArgumentParser(description='Builds the models to test accuracy')
parser.add_argument('-d', '--dirName', default=None)
args = parser.parse_args()
dirName = args.dirName
if not dirName:
current_directory = os.getcwd()
dirName = f'{current_directory}/Data/'
dirHeirarchy = f'{dirName}Brands/'
Saves = f'{dirName}SavedModels/'
def testTrainAccuracyCheck(df, Y, name):
X_train, X_test, y_train, y_test = train_test_split(df, Y, test_size=0.33)
rf = RandomForestClassifier(n_estimators=250, n_jobs=-1, verbose=0)
rf.fit(X_train, y_train)
pred = rf.predict(X_test)
accuracy = metrics.accuracy_score(y_test, pred)
if accuracy < 0.5:
accuracy = 1 - accuracy
# print("\nAccuracy of {} model: {:.4f}".format(name, accuracy))
return accuracy
def getFeatureImportances(rf, X):
print('\nGetting feature importances....\n')
importances = rf.feature_importances_
indices = np.argsort(importances)[::-1]
names = X.columns
print("Important words are:")
for f in range(X.shape[1]):
if f < 20:
print(f + 1, names[indices[f]])
else:
break
def getRandomForestModel(df, Y):
model = RandomForestClassifier(n_estimators=250, n_jobs=-1, verbose=0)
model.fit(df, Y)
return model
def vectorize(brand_vocab_list, brand_df):
cv = CountVectorizer()
word_count_vector = cv.fit(brand_vocab_list)
sparse_matrix = word_count_vector.transform(brand_df.query_lemma)
return pd.DataFrame(sparse_matrix.toarray(), columns=cv.get_feature_names())
if __name__ == "__main__":
unprocessed_brands = []
unprocessed_products = []
for brand in os.listdir(dirHeirarchy):
brand_path = os.path.join(dirHeirarchy, brand)
if not os.path.isdir(brand_path):
continue
print("\nBuilding models for {}......".format(brand))
# Vectorizing the query
brand_df = pd.read_csv(os.path.join(brand_path, brand) + '.csv')
with open('{}{}_vocab.txt'.format(Saves, brand), 'rb') as fp:
brand_vocab_list = pickle.load(fp)
df = vectorize(brand_vocab_list, brand_df)
# training the random forest model
Y = brand_df.brand_check
X = df
# X['clicks'] = brand_df['clicks']
# X['ctr'] = brand_df['ctr']
# X['impressions'] = brand_df['impressions']
accuracy = testTrainAccuracyCheck(df, Y, brand)
# checking if conditions are met
if accuracy > 0.6 and Y.nunique() == 2:
# print('saving {} model'.format(brand))
model = getRandomForestModel(df, Y)
pickle.dump(model, open('{}/{}_model.sav'.format(Saves, brand), 'wb'))
# getFeatureImportances(model, df)
else:
print("Inadequate model features for brand:", brand)
unprocessed_brands.append(brand)
# making the models for product
for product in os.listdir(brand_path):
product_dir = os.path.join(brand_path, product)
if os.path.isdir(product_dir):
product_df = pd.read_csv(os.path.join(product_dir, product) + '.csv')
# bilding the word vectors
df = vectorize(brand_vocab_list, product_df)
# preparing independent variables
X = df
# X['clicks'] = product_df['clicks']
# X['ctr'] = product_df['ctr']
# X['impressions'] = product_df['impressions']
# preparing dependent variables
Y = product_df.product_check
# checking if we get a good fit
accuracy = testTrainAccuracyCheck(df, Y, product)
# checking if conditions are met
if accuracy > 0.6 and Y.nunique() == 2:
print("\nBuilding models for {}......".format(product))
model = getRandomForestModel(df, Y)
pickle.dump(model, open('{}/{}_model.sav'.format(Saves, product), 'wb'))
# getFeatureImportances(model, df)
else:
print("Inadequate model features for product:", product)
unprocessed_products.append(product)
```
#### File: Aaditya-Bhatia/Search_Engine_Query-Advertisment_Management/3c_retrain.py
```python
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
import numpy as np
import argparse
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
import unidecode
import spacy
import pickle
nlp = spacy.load("en_core_web_sm", disable=['parser', 'tagger', 'ner'])
import string
from nltk.corpus import stopwords
stops = stopwords.words("english")
punctuations = string.punctuation
for p in punctuations:
stops.append(p)
stops.append('')
#add args here
#Saves = '/Users/aadityabhatia/Documents/GitHub.nosync/ad-management/Data/SavedModels/'
#df = pd.read_csv('/Users/aadityabhatia/Documents/GitHub.nosync/ad-management/Data/Labeled Data_20200410_v3.csv')
#df = df[df['brand'].str.contains('Acer|Samsung')] # get temp csv
# arg parser
parser = argparse.ArgumentParser(description='Uses model files to test accuracy')
parser.add_argument('-f', '--file', default='reference_data.csv')
parser.add_argument('-d', '--dirName', default=None)
args = parser.parse_args()
df = pd.read_csv(args.file)
dirName = args.dirName
if not dirName:
current_directory = os.getcwd()
dirName = f'{current_directory}/Data/'
dirHeirarchy = f'{dirName}Brands/'
Saves = f'{dirName}SavedModels/'
def testTrainAccuracyCheck(X, Y, name):
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33)
rf = RandomForestClassifier(n_estimators=250, n_jobs=-1, verbose=0)
rf.fit(X_train, y_train)
pred = rf.predict(X_test)
accuracy = metrics.accuracy_score(y_test, pred)
if accuracy < 0.5:
accuracy = 1 - accuracy
return accuracy
def getRandomForestModel(df, Y):
model = RandomForestClassifier(n_estimators=250, n_jobs=-1, verbose=0)
model.fit(df, Y)
return model
def vectorize(brand_vocab_list, brand_df):
cv = CountVectorizer()
word_count_vector = cv.fit(brand_vocab_list)
sparse_matrix = word_count_vector.transform(brand_df.query_lemma)
return pd.DataFrame(sparse_matrix.toarray(), columns=cv.get_feature_names())
def getLemma(comment):
lemmatized = []
tokens = nlp(unidecode.unidecode(str(comment).lower()))
for token in tokens:
token = token.lemma_
if token not in stops:
lemmatized.append(token)
return ' '.join(lemmatized)
def getVocab(comment):
lemmatized = []
tokens = nlp(unidecode.unidecode(str(comment).lower()))
for token in tokens:
token = token.lemma_
if token not in stops:
lemmatized.append(token)
return lemmatized
if __name__ == "__main__":
df['query_lemma'] = df['query'].apply(getLemma)
df['query_vocab'] = df['query'].apply(getVocab)
brands_grp = df.groupby('brand', as_index=0)
for brand, brand_df in brands_grp:
if len(brand_df) < 6:
continue
with open('{}{}_vocab.txt'.format(Saves, brand), 'rb') as fp:
brand_vocab_list = pickle.load(fp)
# updating the vocab
brand_vocab_list = list(set(brand_vocab_list + brand_df['query_vocab'].sum()))
#saving the updated vocab
with open('{}{}_vocab.txt'.format(Saves, brand), 'wb') as fp:
pickle.dump(brand_vocab_list, fp)
# making embeddings
X = vectorize(brand_vocab_list, brand_df)
# X['clicks'] = brand_df['clicks']
# X['ctr'] = brand_df['ctr']
# X['impressions'] = brand_df['impressions']
Y = brand_df.brand_check
#getting the accuracy if acc < 0.6 we dont save the model
accuracy = testTrainAccuracyCheck(X, Y, brand)
# overriding the brand model
if accuracy > 0.6 and Y.nunique() == 2:
model = getRandomForestModel(X, Y)
pickle.dump(model, open('{}/{}_model.sav'.format(Saves, brand), 'wb'))
# getting the products
product_grp = brand_df.groupby('product_id')
for product, product_df in product_grp:
if len(product_df) > 5:
X = vectorize(brand_vocab_list, product_df)
# X['clicks'] = brand_df['clicks']
# X['ctr'] = brand_df['ctr']
# X['impressions'] = brand_df['impressions']
Y = product_df.product_check
accuracy = testTrainAccuracyCheck(X, Y, product)
# overriding the product model
if accuracy > 0.6 and Y.nunique() == 2:
print(accuracy, brand, product)
model = getRandomForestModel(X, Y)
pickle.dump(model, open('{}/{}_model.sav'.format(Saves, product), 'wb'))
``` |
{
"source": "aadityabhatia/url-expander",
"score": 3
} |
#### File: aadityabhatia/url-expander/app.py
```python
from google.appengine.api import memcache, urlfetch
import json
import logging
import time
import urllib
import urlparse
import webapp2
cacheDurationServiceList = 604800
cacheDurationTempRedirect = 86400
def getExpandURLServiceList():
longURLServiceListURL = "http://api.longurl.org/v2/services?format=json"
serviceList = memcache.Client().get("serviceList")
if not serviceList:
serviceList = json.loads(urlfetch.fetch(longURLServiceListURL).content).keys()
serviceList.append("s.eagull.net")
serviceList.append("t.eagull.net")
memcache.Client().set("serviceList", serviceList, time=cacheDurationServiceList)
return serviceList
def expandURL(url):
memcacheClient = memcache.Client()
expandURLObject = memcacheClient.get(url)
shortenerKnown = False
if expandURLObject:
return expandURLObject
serviceList = getExpandURLServiceList()
if urlparse.urlparse(url).hostname in serviceList:
logging.info("ExpandURL hostname known: %s", url)
shortenerKnown = True
response = urlfetch.fetch(url, method=urlfetch.HEAD, follow_redirects=False, allow_truncated=True)
logging.info("ExpandURL response code: %s", response.status_code)
if response.status_code == 405:
response = urlfetch.fetch(url, follow_redirects=False, allow_truncated=True)
logging.info("ExpandURL response code: %s", response.status_code)
code = response.status_code
if code == 301 or (shortenerKnown and (code == 302 or code == 303 or code == 307)):
longURL = response.headers['Location']
logging.info("ExpandURL response Location: %s", response.headers['Location'])
else:
longURL = url
if response.status_code == 301:
cacheDuration = 0
else:
cacheDuration = cacheDurationTempRedirect
expandURLObject = {'longURL': longURL, 'statusCode': response.status_code, 'shortenerKnown': shortenerKnown, 'fetched': int(time.time())}
memcacheClient.set(url, expandURLObject, time=cacheDuration)
return expandURLObject
class AppRequestHandler(webapp2.RequestHandler):
def get(self, url=''):
if not url.strip():
self.redirect("https://github.com/dragonsblaze/url-expander#readme")
return
shortURL = urllib.unquote(url)
expandURLObject = expandURL(shortURL)
self.response.out.write(json.dumps(expandURLObject))
app = webapp2.WSGIApplication([('/(.*)', AppRequestHandler)], debug=False)
``` |
{
"source": "aadityac15/Gator-Trader",
"score": 3
} |
#### File: server/model/location.py
```python
from model import db
class Location(db.Model):
location_id = db.Column("location_id", db.INT, primary_key=True, nullable=False, autoincrement=True)
description = db.Column("description", db.VARCHAR(length=255), nullable=False)
thumbnail = db.Column("thumbnail", db.VARCHAR(length=255), nullable=True)
created_by = db.Column("created_by", db.INT, db.FOREIGNKEY("user.user_id"), nullable=False)
@property
def serialize(self):
return {
"location_id": self.listing_id,
"description": self.description,
"thumbnail": self.thumbnail,
"created_by": self.created_by
}
```
#### File: server/routes/listing.py
```python
from flask import Blueprint, request, jsonify
import os, datetime
from model.listing import Listing
from model import db
listing_blueprint = Blueprint('listing',
__name__,
static_folder ='../client',
template_folder='../client/public/listing')
RELATIVE_IMAGES_PATH = 'client/public/images/{}.png'
@listing_blueprint.route('/listing', methods=['GET'])
def get_listing():
"""
Gets individual listing
:param listing_id
:return: JSON Serialized listing object
"""
listing_id = request.args.get('listing_id')
result = Listing.query.get(listing_id)
return jsonify({
'listing': result.serialize
})
@listing_blueprint.route('/create_listing', methods=['POST'])
def post_listing():
"""
Creates listing
:param title
:param description
:param type, category of object
:param price
:param thumbnail
:param created_by, id of user who created listing
:return: JSON of listing_id and created_on datetime
"""
title = request.form.get('title')
description = request.form.get('description')
type = request.form.get('type')
price = request.form.get('price')
thumbnail = request.files['file']
created_on = datetime.datetime.now()
last_edited_on = created_on
created_by = request.form.get('created_by')
new_listing = Listing(title=title,
description=description,
type=type,
price=price,
created_on=created_on,
last_edited_on=last_edited_on,
created_by=created_by)
db.session.add(new_listing)
db.session.commit()
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '../{}'.format(RELATIVE_IMAGES_PATH.format(new_listing.listing_id)))
thumbnail.save(filename)
new_listing.thumbnail = RELATIVE_IMAGES_PATH.format(new_listing.listing_id)
db.session.commit()
return jsonify({
'listing_id': new_listing.listing_id,
'created_on': new_listing.created_on
})
@listing_blueprint.route('/listing', methods=['PUT'])
def put_listing():
"""
Edits listing
:param listing_id, REQUIRED
:params Any other editable field for listing
:return: JSON listing_id and last_edited_on
"""
listing_id = request.form.get('listing_id')
listing = Listing.query.get(listing_id)
title = request.form.get('title')
description = request.form.get('description')
type = request.form.get('type')
price = request.form.get('price')
thumbnail = request.form.get('thumbnail')
if title:
listing.title = title
if description:
listing.description = description
if type:
listing.type = type
if price:
listing.price = price
if thumbnail:
listing.thumbnail = thumbnail
listing.last_edited_on = datetime.datetime.now()
db.session.commit()
return jsonify({
'listing_id': listing_id,
'last_edited_on': listing.last_edited_on
})
@listing_blueprint.route('/edit_listing_approval', methods=['PUT'])
def edit_listing_approval():
"""
Approves or Denies a listing
:param listing_id
:param approval_status
:return:
"""
listing_id = request.json.get('listing_id')
approved = request.json.get('approval_status')
listing = Listing.query.get(listing_id)
listing.approved = approved;
listing.last_edited_on = datetime.datetime.now()
db.session.commit()
return jsonify({
'listing_id': listing_id,
'last_edited_on': listing.last_edited_on,
'approved': listing.approved
})
```
#### File: server/routes/message.py
```python
from flask import Blueprint, request, jsonify
import datetime
from model.message import Message
from model.listing import Listing
from model.user import User
from model import db
message_blueprint = Blueprint('message_server',
__name__,
static_folder='../client',
template_folder='../client/public/message')
@message_blueprint.route('/my_messages', methods=['GET'])
def get_my_messages():
"""
Gets all messages sent to a user
:param user_id
:return:
"""
user_id = request.args.get('user_id')
messages = Message.query.filter_by(sent_to=user_id)
messages = map(_reformat_message, messages)
return jsonify({
'messages': list(messages)
})
@message_blueprint.route('/send_message', methods=['POST'])
def send_message():
"""
Sends message to user
:param sent_by
:param sent_to
:param message_body
:param from_admin
:return:
"""
sent_by = request.form.get('sent_by')
sent_to = request.form.get('sent_to')
message_body = request.form.get('message_body')
from_admin = True if request.form.get('from_admin') == 'True' else False
listing_id = request.form.get('listing_id')
new_message = Message(
sent_by=sent_by,
sent_to=sent_to,
message_body=message_body,
from_admin=from_admin,
timestamp=datetime.datetime.now(),
listing_id=listing_id
)
db.session.add(new_message)
db.session.commit()
return jsonify({
'message_id': new_message.message_id,
'timestamp': new_message.timestamp
})
@message_blueprint.route('/message', methods=['GET'])
def get_message():
"""
Get message by message_id
:param message_id
:return:
"""
message_id = request.args.get('message_id')
message = Message.query.get(message_id)
if not message:
return jsonify({
'error': 'No message found'
})
return jsonify({
'message': _reformat_message(message)
})
def _reformat_message(message):
if message:
sender_username = User.query.get(message.sent_by).username
listing_name = Listing.query.get(message.listing_id).title
return {
"listing_name": listing_name,
"message_body": message.message_body,
"sender_username": sender_username,
"timestamp": message.timestamp
}
return message
``` |
{
"source": "aadityachapagain/Descent_py",
"score": 3
} |
#### File: Descent_py/reddit_datasets/main.py
```python
import requests
from bs4 import BeautifulSoup
import os
from collections import defaultdict
import hashlib
import tqdm
import time
import logging
import shutil
import argparse
import bz2
import json
import lzma
import zstandard as zstd
import io
from langdetect import detect
from unmark.main import unmark
import re
import traceback
parser = argparse.ArgumentParser(description='download reddit datasets from pushshift.io')
parser.add_argument('--dpath', type=str,
help= 'destination path to download datasets')
args = parser.parse_args()
logger = logging.getLogger("main")
FORMAT = '%(asctime)-15s %(name)s %(levelname)s %(message)s'
logging.basicConfig(format=FORMAT)
logger.setLevel("INFO")
data_ext = ['.bz2', '.xz','.zst']
reddit_link = "https://files.pushshift.io/reddit/submissions/"
datasets_link = defaultdict(lambda : {})
hash_link = 'https://files.pushshift.io/reddit/submissions/sha256sums.txt'
def find_url(string):
# findall() has been used
# with valid conditions for urls in string
regex = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"
url = re.findall(regex,string)
return [x[0] for x in url]
def preprocess_handler(dpath: str):
logger.info(f"pre-processing {dpath}")
if dpath.lower().endswith('.bz2'):
read_bz2_dataset(dpath)
elif dpath.lower().endswith('.xz'):
read_lzma_dataset(dpath)
elif dpath.lower().endswith('.zst'):
read_zstandered_data(dpath)
else:
logger.info("File not supported ... ")
logger.info(f"Done preprocessing {dpath} to {''.join(dpath.split('.')[:-1]) +'.txt'}")
def preprocess_text(data: dict):
# check if sumbission is over 18 or not
if data['over_18']:
return False
# convert markdown to plain text
text = unmark(data['selftext'].strip())
# check if there is any url or not
if len(find_url(text)) > 0:
return False
if text.strip() == '':
return False
if len(text.strip()) <= 5:
return False
# check if text start with non-ASCII character
if text.strip().lower() == '[deleted]' or text.strip().lower() == '[removed]':
return False
if ord(text[0]) > 128:
return False
# remove mulitple spaces into single space
text = re.sub('\s+',' ',text)
if detect(text) != 'en':
return False
# check if there is no spaces in text and no of characeters in it is more than 2040
if ' ' not in text and len(text) > 2040:
return False
return text
def read_bz2_dataset(path):
new_path = ''.join(path.split('.')[:-1]) +'.txt'
with open(new_path, 'w') as fw:
with bz2.open(path) as fp:
for line in fp:
try:
line = json.loads(line.decode("utf-8"))
line = preprocess_text(line)
if line:
fw.write(line + '\n')
except:
traceback.print_exc()
logger.info(f'getting error at line {line}')
def read_lzma_dataset(path):
new_path = ''.join(path.split('.')[:-1]) +'.txt'
with open(new_path, 'w') as fw:
with lzma.open(path) as compressed:
for line in compressed:
try:
line = json.loads(line.decode("utf-8"))
line = preprocess_text(line)
if line:
fw.write(line + '\n')
except:
traceback.print_exc()
logger.info(f'getting error at line {line}')
def read_zstandered_data(path):
new_path = ''.join(path.split('.')[:-1]) +'.txt'
with open(new_path, 'w') as fw:
with open(path, 'rb') as fh:
dctx = zstd.ZstdDecompressor()
stream_reader = dctx.stream_reader(fh)
text_stream = io.TextIOWrapper(stream_reader, encoding='utf-8')
for line in text_stream:
try:
line = json.loads(line)
line = preprocess_text(line)
if line:
fw.write(line + '\n')
except:
traceback.print_exc()
logger.info(f'getting error at line {line}')
def download(url, path, fname, redownload=False, num_retries=5):
"""
Download file using `requests`.
If ``redownload`` is set to false, then will not download tar file again if it is
present (default ``False``).
"""
outfile = os.path.join(path, fname)
if not os.path.isdir(os.path.dirname(outfile)):
os.makedirs(os.path.dirname(outfile))
download = not os.path.isfile(outfile) or redownload
logger.info(f"Downloading {url} to {outfile}")
retry = num_retries
exp_backoff = [2 ** r for r in reversed(range(retry))]
pbar = tqdm.tqdm(unit='B', unit_scale=True, desc='Downloading {}'.format(fname))
while download and retry > 0:
resume_file = outfile + '.part'
resume = os.path.isfile(resume_file)
if resume:
resume_pos = os.path.getsize(resume_file)
mode = 'ab'
else:
resume_pos = 0
mode = 'wb'
response = None
with requests.Session() as session:
try:
header = (
{'Range': 'bytes=%d-' % resume_pos, 'Accept-Encoding': 'identity'}
if resume
else {}
)
response = session.get(url, stream=True, timeout=5, headers=header)
# negative reply could be 'none' or just missing
if resume and response.headers.get('Accept-Ranges', 'none') == 'none':
resume_pos = 0
mode = 'wb'
CHUNK_SIZE = 32768
total_size = int(response.headers.get('Content-Length', -1))
# server returns remaining size if resuming, so adjust total
total_size += resume_pos
pbar.total = total_size
done = resume_pos
with open(resume_file, mode) as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if total_size > 0:
done += len(chunk)
if total_size < done:
# don't freak out if content-length was too small
total_size = done
pbar.total = total_size
pbar.update(len(chunk))
break
except (
requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout,
):
retry -= 1
pbar.clear()
if retry > 0:
pl = 'y' if retry == 1 else 'ies'
logger.debug(
f'Connection error, retrying. ({retry} retr{pl} left)'
)
time.sleep(exp_backoff[retry])
else:
logger.error('Retried too many times, stopped retrying.')
finally:
if response:
response.close()
if retry <= 0:
raise RuntimeError('Connection broken too many times. Stopped retrying.')
if download and retry > 0:
pbar.update(done - pbar.n)
if done < total_size:
raise RuntimeError(
f'Received less data than specified in Content-Length header for '
f'{url}. There may be a download problem.'
)
move(resume_file, outfile)
pbar.close()
return outfile
def move(path1, path2):
"""
Rename the given file.
"""
shutil.move(path1, path2)
class DownloadableFile:
"""
A class used to abstract any file that has to be downloaded online.
Any task that needs to download a file needs to have a list RESOURCES
that have objects of this class as elements.
This class provides the following functionality:
- Download a file from a URL / Google Drive
- Untar the file if zipped
- Checksum for the downloaded file
- Send HEAD request to validate URL or Google Drive link
An object of this class needs to be created with:
- url <string> : URL or Google Drive id to download from
- file_name <string> : File name that the file should be named
- hashcode <string> : SHA256 hashcode of the downloaded file
- zipped <boolean> : False if the file is not compressed
- from_google <boolean> : True if the file is from Google Drive
"""
def __init__(self, url, file_name, hashcode, zipped=True, from_google=False):
self.url = url
self.file_name = file_name
self.hashcode = hashcode
self.compressed = zipped
self.from_google = from_google
def checksum(self, dpath):
"""
Checksum on a given file.
:param dpath: path to the downloaded file.
"""
sha256_hash = hashlib.sha256()
with open(os.path.join(dpath, self.file_name), "rb") as f:
for byte_block in iter(lambda: f.read(65536), b""):
sha256_hash.update(byte_block)
if sha256_hash.hexdigest() != self.hashcode:
# remove_dir(dpath)
raise AssertionError(
f"[ Checksum for {self.file_name} from \n{self.url}\n"
"does not match the expected checksum. Please try again. ]"
)
else:
logger.debug("Checksum Successful")
def download_file(self, dpath):
out_file = download(self.url, dpath, self.file_name)
if self.hashcode:
self.checksum(dpath)
# if self.compressed:
# extract(dpath, self.file_name)
return out_file
def collect_hash():
res = requests.get(hash_link)
hashes = res.content.decode("utf-8").strip()
for hash_to_file in hashes.split('\n'):
hash_to_file = hash_to_file.strip().split()
datasets_link[hash_to_file[1]]['hash'] = hash_to_file[0]
def is_recommended_link(link):
for ext in data_ext:
if link.endswith(ext):
return link
return False
def get_all_downloadable_links():
res = requests.get(reddit_link)
content = BeautifulSoup(res.content, 'html5lib')
for link in content.find_all('a'):
_link = link.get('href')
_link = is_recommended_link(_link)
if _link:
_link = os.path.split(_link)[-1]
datasets_link[_link]['link'] = os.path.join(reddit_link, _link)
if __name__ == "__main__":
collect_hash()
download_path = args.dpath
get_all_downloadable_links()
for k,v in datasets_link.items():
if v.get('link', False):
fd = DownloadableFile(
v['link'], k, None
)
outfile = fd.download_file(args.dpath)
preprocess_handler(outfile)
"""
data preprocesss
* convert markup to plain text checked
* Remove comments/posts from Bots checked
* Remove comments/posts from non-English checked
* remove comments/posts marked as delete or removed checked
* remove comments/posts longer than 128 BPE tokens. will do this during loading data in model using tokenizer
* remove longer than 2040 characters and doesnot contain spaces. checked
* remove Shorter than 5 character. checked
* remove comments/posts with contains a URL. checked
* remove comments/posts starts with a non-ASCII. checked
* remove comments further than depth 7 in the thread. since we are not pretraining we might not need this
* remove child unsafe posts and comments. checked
"""
``` |
{
"source": "aadityachapagain/DotaCalc",
"score": 3
} |
#### File: DotaCalc/dotaCrawler/server.py
```python
from flask import render_template
import connexion
from flask_cors import CORS
# create the application instance
app = connexion.FlaskApp(__name__,specification_dir='./')
# Read the swagger.yml file to configure the endpoints
app.add_api('swagger.yml')
# add CORS support
# CORS(app.app)
cors = CORS(app.app, resources=r"/api/*")
# Create a URL route in our application
@app.route('/')
def home():
"""
This function just responds to the browser ULR
localhost:5000/
:return: the rendered template 'home.html'
"""
return render_template('home.html')
def runServer():
app.run(host='localhost', port=3001, debug=True)
# if we are running in the standalone mode, run the application
if __name__ == '__main__':
runServer()
``` |
{
"source": "aadityachapagain/Mephisto",
"score": 2
} |
#### File: mephisto/abstractions/blueprint.py
```python
from abc import ABC, abstractmethod
from mephisto.operations.utils import find_or_create_qualification
from typing import (
ClassVar,
Optional,
List,
Dict,
Any,
Type,
ClassVar,
Union,
Iterable,
Callable,
Tuple,
TYPE_CHECKING,
)
from dataclasses import dataclass, field
from omegaconf import MISSING, DictConfig
from mephisto.data_model.exceptions import (
AgentReturnedError,
AgentDisconnectedError,
AgentTimeoutError,
AgentShutdownError,
)
from mephisto.data_model.constants.assignment_state import AssignmentState
if TYPE_CHECKING:
from mephisto.data_model.agent import Agent, OnboardingAgent
from mephisto.data_model.task_run import TaskRun
from mephisto.data_model.assignment import Assignment, InitializationData
from mephisto.data_model.unit import Unit
from mephisto.data_model.packet import Packet
from mephisto.data_model.worker import Worker
from argparse import _ArgumentGroup as ArgumentGroup
from mephisto.operations.logger_core import get_logger
logger = get_logger(name=__name__)
@dataclass
class BlueprintArgs:
_blueprint_type: str = MISSING
onboarding_qualification: str = field(
default=MISSING,
metadata={
"help": (
"Specify the name of a qualification used to block workers who fail onboarding, "
"Empty will skip onboarding."
)
},
)
block_qualification: str = field(
default=MISSING,
metadata={
"help": ("Specify the name of a qualification used to soft block workers.")
},
)
@dataclass
class SharedTaskState:
"""
Base class for specifying additional state that can't just
be passed as Hydra args, like functions and objects
"""
onboarding_data: Dict[str, Any] = field(default_factory=dict)
task_config: Dict[str, Any] = field(default_factory=dict)
validate_onboarding: Callable[[Any], bool] = field(
default_factory=lambda: (lambda x: True)
)
qualifications: List[Any] = field(default_factory=list)
worker_can_do_unit: Callable[["Worker", "Unit"], bool] = field(
default_factory=lambda: (lambda worker, unit: True)
)
on_unit_submitted: Callable[["Unit"], None] = field(
default_factory=lambda: (lambda unit: None)
)
class TaskBuilder(ABC):
"""
Class to manage building a task of a specific type in a directory
that will be used to deploy that task.
"""
def __init__(self, task_run: "TaskRun", args: "DictConfig"):
self.args = args
self.task_run = task_run
def __new__(cls, task_run: "TaskRun", args: "DictConfig") -> "TaskBuilder":
"""Get the correct TaskBuilder for this task run"""
from mephisto.operations.registry import get_blueprint_from_type
if cls == TaskBuilder:
# We are trying to construct an TaskBuilder, find what type to use and
# create that instead
correct_class = get_blueprint_from_type(task_run.task_type).TaskBuilderClass
return super().__new__(correct_class)
else:
# We are constructing another instance directly
return super().__new__(cls)
@abstractmethod
def build_in_dir(self, build_dir: str) -> None:
"""
Build the server for the given task run into the provided directory
"""
raise NotImplementedError()
class TaskRunner(ABC):
"""
Class to manage running a task of a specific type. Includes
building the dependencies to a directory to be deployed to
the server, and spawning threads that manage the process of
passing agents through a task.
"""
def __init__(
self, task_run: "TaskRun", args: "DictConfig", shared_state: "SharedTaskState"
):
self.args = args
self.shared_state = shared_state
self.task_run = task_run
self.running_assignments: Dict[str, Tuple["Assignment", List["Agent"]]] = {}
self.running_units: Dict[str, Tuple["Unit", "Agent"]] = {}
self.running_onboardings: Dict[str, "OnboardingAgent"] = {}
self.is_concurrent = False
# TODO(102) populate some kind of local state for tasks that are being run
# by this runner from the database.
self.block_qualification = args.blueprint.get("block_qualification", None)
if self.block_qualification is not None:
find_or_create_qualification(task_run.db, self.block_qualification)
def __new__(
cls, task_run: "TaskRun", args: "DictConfig", shared_state: "SharedTaskState"
) -> "TaskRunner":
"""Get the correct TaskRunner for this task run"""
if cls == TaskRunner:
from mephisto.operations.registry import get_blueprint_from_type
# We are trying to construct an AgentState, find what type to use and
# create that instead
correct_class = get_blueprint_from_type(task_run.task_type).TaskRunnerClass
return super().__new__(correct_class)
else:
# We are constructing another instance directly
return super().__new__(cls)
def launch_onboarding(self, onboarding_agent: "OnboardingAgent") -> None:
"""
Validate that onboarding is ready, then launch. Catch disconnect conditions
"""
onboarding_id = onboarding_agent.get_agent_id()
if onboarding_id in self.running_onboardings:
logger.debug(f"Onboarding {onboarding_id} is already running")
return
logger.debug(f"Onboarding {onboarding_id} is launching with {onboarding_agent}")
# At this point we're sure we want to run Onboarding
self.running_onboardings[onboarding_id] = onboarding_agent
try:
self.run_onboarding(onboarding_agent)
onboarding_agent.mark_done()
except (
AgentReturnedError,
AgentTimeoutError,
AgentDisconnectedError,
AgentShutdownError,
):
self.cleanup_onboarding(onboarding_agent)
except Exception as e:
print(f"Unhandled exception in onboarding {onboarding_agent}: {repr(e)}")
import traceback
traceback.print_exc()
self.cleanup_onboarding(onboarding_agent)
del self.running_onboardings[onboarding_id]
return
def launch_unit(self, unit: "Unit", agent: "Agent") -> None:
"""
Validate the unit is prepared to launch, then run it
"""
if unit.db_id in self.running_units:
logger.debug(f"{unit} is already running")
return
logger.debug(f"{unit} is launching with {agent}")
# At this point we're sure we want to run the unit
self.running_units[unit.db_id] = (unit, agent)
try:
self.run_unit(unit, agent)
except (
AgentReturnedError,
AgentTimeoutError,
AgentDisconnectedError,
AgentShutdownError,
) as e:
# A returned Unit can be worked on again by someone else.
if (
unit.get_status() != AssignmentState.EXPIRED
and unit.get_assigned_agent().db_id == agent.db_id
):
logger.debug(f"Clearing {agent} from {unit} due to {e}")
unit.clear_assigned_agent()
self.cleanup_unit(unit)
except Exception as e:
logger.exception(f"Unhandled exception in unit {unit}: {repr(e)}")
import traceback
traceback.print_exc()
self.cleanup_unit(unit)
self.shared_state.on_unit_submitted(unit)
del self.running_units[unit.db_id]
return
def launch_assignment(
self, assignment: "Assignment", agents: List["Agent"]
) -> None:
"""
Validate the assignment is prepared to launch, then run it
"""
if assignment.db_id in self.running_assignments:
logger.debug(f"Assignment {assignment} is already running")
return
logger.debug(f"Assignment {assignment} is launching with {agents}")
# At this point we're sure we want to run the assignment
self.running_assignments[assignment.db_id] = (assignment, agents)
try:
self.run_assignment(assignment, agents)
except (
AgentReturnedError,
AgentTimeoutError,
AgentDisconnectedError,
AgentShutdownError,
) as e:
# TODO(#99) if some operator flag is set for counting complete tasks, launch a
# new assignment copied from the parameters of this one
disconnected_agent_id = e.agent_id
for agent in agents:
if agent.db_id != e.agent_id:
agent.update_status(AgentState.STATUS_PARTNER_DISCONNECT)
else:
# Must expire the disconnected unit so that
# new workers aren't shown it
agent.get_unit().expire()
self.cleanup_assignment(assignment)
except Exception as e:
logger.exception(
f"Unhandled exception in assignment {assignment}: {repr(e)}"
)
import traceback
traceback.print_exc()
self.cleanup_assignment(assignment)
for unit in assignment.get_units():
self.shared_state.on_unit_submitted(unit)
del self.running_assignments[assignment.db_id]
return
@staticmethod
def get_data_for_assignment(assignment: "Assignment") -> "InitializationData":
"""
Finds the right data to get for the given assignment.
"""
return assignment.get_assignment_data()
@abstractmethod
def get_init_data_for_agent(self, agent: "Agent"):
"""
Return the data that an agent will need for their task.
"""
raise NotImplementedError()
def filter_units_for_worker(self, units: List["Unit"], worker: "Worker"):
"""
Returns the list of Units that the given worker is eligible to work on.
Some tasks may want more direct control of what units a worker is
allowed to work on, so this method should be overridden by children
classes.
"""
return units
def shutdown(self):
"""
Updates the status of all agents tracked by this runner to throw a ShutdownException,
ensuring that all the threads exit correctly and we can cleanup properly.
"""
for _unit, agent in self.running_units.values():
agent.shutdown()
for _assignment, agents in self.running_assignments.values():
for agent in agents:
agent.shutdown()
for onboarding_agent in self.running_onboardings.values():
onboarding_agent.shutdown()
# TaskRunners must implement either the unit or assignment versions of the
# run and cleanup functions, depending on if the task is run at the assignment
# level rather than on the the unit level.
def run_onboarding(self, agent: "OnboardingAgent"):
"""
Handle setup for any resources to run an onboarding task. This
will be run in a background thread, and should be tolerant to being
interrupted by cleanup_onboarding.
Only required by tasks that want to implement onboarding
"""
raise NotImplementedError()
def cleanup_onboarding(self, agent: "OnboardingAgent"):
"""
Handle cleaning up the resources that were being used to onboard
the given agent.
"""
raise NotImplementedError()
def run_unit(self, unit: "Unit", agent: "Agent"):
"""
Handle setup for any resources required to get this unit running.
This will be run in a background thread, and should be tolerant to
being interrupted by cleanup_unit.
Only needs to be implemented by non-concurrent tasks
"""
raise NotImplementedError()
def cleanup_unit(self, unit: "Unit"):
"""
Handle ensuring resources for a given assignment are cleaned up following
a disconnect or other crash event
Does not need to be implemented if the run_unit method is
already error catching and handles its own cleanup
"""
raise NotImplementedError()
def run_assignment(self, assignment: "Assignment", agents: List["Agent"]):
"""
Handle setup for any resources required to get this assignment running.
This will be run in a background thread, and should be tolerant to
being interrupted by cleanup_assignment.
Only needs to be implemented by concurrent tasks
"""
raise NotImplementedError()
def cleanup_assignment(self, assignment: "Assignment"):
"""
Handle ensuring resources for a given assignment are cleaned up following
a disconnect or other crash event
Does not need to be implemented if the run_assignment method is
already error catching and handles its own cleanup
"""
raise NotImplementedError()
# TODO(#101) what is the best method for creating new ones of these for different task types
# in ways that are supported by different backends? Perhaps abstract additional
# methods into the required db interface? Move any file manipulations into a
# extra_data_handler subcomponent of the MephistoDB class?
class AgentState(ABC):
"""
Class for holding state information about work by an Agent on a Unit, currently
stored as current task work into a json file.
Specific state implementations will need to be created for different Task Types,
as different tasks store and load differing data.
"""
# Possible Agent Status Values
STATUS_NONE = "none"
STATUS_ACCEPTED = "accepted"
STATUS_ONBOARDING = "onboarding"
STATUS_WAITING = "waiting"
STATUS_IN_TASK = "in task"
STATUS_COMPLETED = "completed"
STATUS_DISCONNECT = "disconnect"
STATUS_TIMEOUT = "timeout"
STATUS_PARTNER_DISCONNECT = "partner disconnect"
STATUS_EXPIRED = "expired"
STATUS_RETURNED = "returned"
STATUS_APPROVED = "approved"
STATUS_SOFT_REJECTED = "soft_rejected"
STATUS_REJECTED = "rejected"
def __new__(cls, agent: Union["Agent", "OnboardingAgent"]) -> "AgentState":
"""Return the correct agent state for the given agent"""
if cls == AgentState:
from mephisto.data_model.agent import Agent
from mephisto.operations.registry import get_blueprint_from_type
# We are trying to construct an AgentState, find what type to use and
# create that instead
if isinstance(agent, Agent):
correct_class = get_blueprint_from_type(agent.task_type).AgentStateClass
else:
correct_class = get_blueprint_from_type(
agent.task_type
).OnboardingAgentStateClass
return super().__new__(correct_class)
else:
# We are constructing another instance directly
return super().__new__(cls)
@staticmethod
def complete() -> List[str]:
"""Return all final Agent statuses which will not be updated by the supervisor"""
return [
AgentState.STATUS_COMPLETED,
AgentState.STATUS_DISCONNECT,
AgentState.STATUS_TIMEOUT,
AgentState.STATUS_EXPIRED,
AgentState.STATUS_RETURNED,
AgentState.STATUS_SOFT_REJECTED,
AgentState.STATUS_APPROVED,
AgentState.STATUS_REJECTED,
]
@staticmethod
def valid() -> List[str]:
"""Return all valid Agent statuses"""
# TODO(#97) write a test that ensures all AgentState statuses are here
return [
AgentState.STATUS_NONE,
AgentState.STATUS_ACCEPTED,
AgentState.STATUS_ONBOARDING,
AgentState.STATUS_WAITING,
AgentState.STATUS_IN_TASK,
AgentState.STATUS_COMPLETED,
AgentState.STATUS_DISCONNECT,
AgentState.STATUS_TIMEOUT,
AgentState.STATUS_PARTNER_DISCONNECT,
AgentState.STATUS_EXPIRED,
AgentState.STATUS_RETURNED,
AgentState.STATUS_SOFT_REJECTED,
AgentState.STATUS_APPROVED,
AgentState.STATUS_REJECTED,
]
# Implementations of an AgentState must implement the following:
@abstractmethod
def __init__(self, agent: "Agent"):
"""
Create an AgentState to track the state of an agent's work on a Unit
Implementations should initialize any required files for saving and
loading state data somewhere.
If said file already exists based on the given agent, load that data
instead.
"""
raise NotImplementedError()
@abstractmethod
def set_init_state(self, data: Any) -> bool:
"""Set the initial state for this agent"""
raise NotImplementedError()
@abstractmethod
def get_init_state(self) -> Optional[Any]:
"""
Return the initial state for this agent,
None if no such state exists
"""
raise NotImplementedError()
@abstractmethod
def load_data(self) -> None:
"""
Load stored data from a file to this object
"""
raise NotImplementedError()
@abstractmethod
def get_data(self) -> Dict[str, Any]:
"""
Return the currently stored data for this task in the format
expected by any frontend displays
"""
raise NotImplementedError()
def get_parsed_data(self) -> Any:
"""
Return the portion of the data that is relevant to a human
who wants to parse or analyze the data
Utility function to handle stripping the data of any
context that is only important for reproducing the task
exactly. By default is just `get_data`
"""
return self.get_data()
@abstractmethod
def save_data(self) -> None:
"""
Save the relevant data from this Unit to a file in the expected location
"""
raise NotImplementedError()
@abstractmethod
def update_data(self, packet: "Packet") -> None:
"""
Put new current Unit data into this AgentState
"""
# TODO(#100) maybe refine the signature for this function once use cases
# are fully scoped
# Some use cases might just be appending new data, some
# might instead prefer to maintain a final state.
# Maybe the correct storage is of a series of actions taken
# on this Unit? Static tasks only have 2 turns max, dynamic
# ones may have multiple turns or steps.
raise NotImplementedError()
def get_task_start(self) -> Optional[float]:
"""
Return the start time for this task, if it is available
"""
return 0.0
def get_task_end(self) -> Optional[float]:
"""
Return the end time for this task, if it is available
"""
return 0.0
class OnboardingRequired(object):
"""
Compositional class for blueprints that may have an onboarding step
"""
@staticmethod
def get_failed_qual(qual_name: str) -> str:
"""Returns the wrapper for a qualification to represent failing an onboarding"""
return qual_name + "-failed"
def init_onboarding_config(
self, task_run: "TaskRun", args: "DictConfig", shared_state: "SharedTaskState"
):
self.onboarding_qualification_name: Optional[str] = args.blueprint.get(
"onboarding_qualification", None
)
self.onboarding_data = shared_state.onboarding_data
self.use_onboarding = self.onboarding_qualification_name is not None
self.onboarding_qualification_id = None
if self.onboarding_qualification_name is not None:
db = task_run.db
found_qualifications = db.find_qualifications(
self.onboarding_qualification_name
)
if len(found_qualifications) == 0:
self.onboarding_qualification_id = db.make_qualification(
self.onboarding_qualification_name
)
else:
self.onboarding_qualification_id = found_qualifications[0].db_id
# We need to keep a separate qualification for failed onboarding
# to push to a crowd provider in order to prevent workers
# who have failed from being shown our task
self.onboarding_failed_name = self.get_failed_qual(
self.onboarding_qualification_name
)
found_qualifications = db.find_qualifications(self.onboarding_failed_name)
if len(found_qualifications) == 0:
self.onboarding_failed_id = db.make_qualification(
self.onboarding_failed_name
)
else:
self.onboarding_failed_id = found_qualifications[0].db_id
def get_onboarding_data(self, worker_id: str) -> Dict[str, Any]:
"""
If the onboarding task on the frontend requires any specialized data, the blueprint
should provide it for the user.
As onboarding qualifies a worker for all tasks from this blueprint, this should
generally be static data that can later be evaluated against.
"""
return self.onboarding_data
def validate_onboarding(
self, worker: "Worker", onboarding_agent: "OnboardingAgent"
) -> bool:
"""
Check the incoming onboarding data and evaluate if the worker
has passed the qualification or not. Return True if the worker
has qualified.
"""
return True
class Blueprint(ABC):
"""
Configuration class for the various parts of building, launching,
and running a task of a specific task. Provides utility functions
for managing between the three main components, which are separated
into separate classes in acknowledgement that some tasks may have
particularly complicated processes for them
"""
AgentStateClass: ClassVar[Type["AgentState"]]
OnboardingAgentStateClass: ClassVar[Type["AgentState"]] = AgentState # type: ignore
TaskRunnerClass: ClassVar[Type["TaskRunner"]]
TaskBuilderClass: ClassVar[Type["TaskBuilder"]]
ArgsClass: ClassVar[Type["BlueprintArgs"]] = BlueprintArgs
SharedStateClass: ClassVar[Type["SharedTaskState"]] = SharedTaskState
supported_architects: ClassVar[List[str]]
BLUEPRINT_TYPE: str
def __init__(
self, task_run: "TaskRun", args: "DictConfig", shared_state: "SharedTaskState"
):
self.args = args
self.shared_state = shared_state
self.frontend_task_config = shared_state.task_config
@classmethod
def assert_task_args(cls, args: DictConfig, shared_state: "SharedTaskState"):
"""
Assert that the provided arguments are valid. Should
fail if a task launched with these arguments would
not work
"""
return
def get_frontend_args(self) -> Dict[str, Any]:
"""
Specifies what options should be fowarded
to the client for use by the task's frontend
"""
return self.frontend_task_config
@abstractmethod
def get_initialization_data(
self,
) -> Iterable["InitializationData"]:
"""
Get all of the data used to initialize tasks from this blueprint.
Can either be a simple iterable if all the assignments can
be processed at once, or a Generator if the number
of tasks is unknown or changes based on something running
concurrently with the job.
"""
raise NotImplementedError
``` |
{
"source": "AadityaDeshpande/ExamSurveillance",
"score": 3
} |
#### File: AadityaDeshpande/ExamSurveillance/graph.py
```python
import matplotlib.pyplot as plt
import numpy as np
import time
def bargraph(a,b,c):
glob = [a,b,c]
objects = ('fps', 'away', 'screen')
y_pos = np.arange(len(objects))
performance = [glob[0],glob[1],glob[2]]
plt.bar(y_pos, performance, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel('total')
plt.title('student analysis')
plt.show()
#time.sleep(5)
#plt.close()
bargraph(0,0,0)
#if __name__ == '__main__':
# bargraph(0,0,0)
```
#### File: AadityaDeshpande/ExamSurveillance/Remote_runexam.py
```python
import cv2
import multiprocessing
from multiprocessing import Process
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
import time
import csv
#from graph import bargraph
import sys
from logger import log,initlogger
from line import liner
from analysis import analysisCSV
import requests
def exam(ip):
cap = cv2.VideoCapture(0)
faces,eyes = [],[]
frame_no = 0
look_screen = 0
look_away = 0
# Create the haar cascade
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
eyesCascade = cv2.CascadeClassifier("haarcascade_eye_tree_eyeglasses.xml")
#ip = input("Please Enter IP Address to connect with \n")
while(True):
# Capture frame-by-frame
#ret, frame = cap.read()
img_res = requests.get("http://"+ip+":8080/shot.jpg")
img_arr = np.array(bytearray(img_res.content), dtype = np.uint8)
frame = cv2.imdecode(img_arr,-1)
frame_no = frame_no + 1
glob[0] = frame_no
#print("Current Frame number is: ",frame_no)
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.25, #1.25 1.1 perfect-> 1.25
minNeighbors=9, #5 9-> only single face
minSize=(30, 30) #30 30
#flags = cv2.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 0), 2)
#croping face and finding eyes only in that region
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = eyesCascade.detectMultiScale(
gray,
scaleFactor=1.22, # perfect -> 1.2 |1.3 1.1
minNeighbors=7, #perfect->4 5
minSize=(30, 30) #30 30
#flags = cv2.CV_HAAR_SCALE_IMAGE)
)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(frame, (ex, ey), (ex+ew, ey+eh), (138,43,226), 2)
#if len(faces) >= 1:
if len(eyes) == 2:
print("STATUS ",frame_no," : person LOOKING at SCREEN")
look_screen = look_screen+1
glob[2] = look_screen
elif len(eyes) == 0:
print("STATUS ",frame_no," : NO EYES DETECTED")
elif len(eyes) == 1:
print("STATUS ",frame_no," : person LOOKING AWAY")
look_away = look_away+1
glob[1] = look_away
#frame = cv2.resize(frame, (600,600))
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'): #if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
#cap.release()
cv2.destroyAllWindows()
def p2(): #Creating a Log files about student analysis
'''with open('logfile1.csv','w',newline='') as csvfile:
fieldnames = ['frame_number', 'look_away', 'look_screen']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
while True:
writer.writerow({'frame_number': int(glob[0]), 'look_away': int(glob[1]), 'look_screen': int(glob[2])})
time.sleep()
'''
while True:
log(glob[0],glob[1],glob[2])
time.sleep(0.05)
if __name__ == '__main__':
#exam()
glob = multiprocessing.Array('i', 4)
# 0th location contains frame_no 1st-> look_away 2nd -> look_screen
ip = input("Please Enter IP Address to connect with \n")
exam = Process(target = exam, args=(ip,))
p2 = Process(target = p2)
print("prepairing....")
sys.stdout.flush()
time.sleep(1)
exam.start()
time.sleep(2)
initlogger()
#print("started") 2nd process goes here
p2.start()
exam.join()
if not(exam.is_alive()):
#print("Time to terminate process 2")
p2.terminate()
#bargraph(glob[0],glob[1],glob[2])
#p2.join()
print("\nBlue line = frame number\nGreen line = number of time screen look")
print("Orange line = number of time away look")
analysisCSV()
liner('stdlog.csv')
#for drawing bar graph
#print("Analysing Data....")
#sys.stdout.flush()
#time.sleep(1)
#bargraph(glob[0],glob[1],glob[2])
``` |
{
"source": "AadityaDeshpande/Project-centric-mail",
"score": 3
} |
#### File: Project-centric-mail/ProjectCentricMail/auto.py
```python
import csv
import re
import time
import collections
import os
def is_spam(mail):
module_dir = os.path.dirname(__file__)
pattern = r"^http:"
spam_words = ['free','paid','discount','price','off','cheap','trade','.inc','limited','exchange','xchange','flat','latest','new','999','available','lose','win','winning','loss','sale','sponser','income','dob','loan','earn','money','login','gold','silver','100000','spin','hurry','advertisement','smartpicks',]
sensitive_words = ['password','credit','loan','debit','username','e-mail','g-mail','click','address','phone','limited','privacy','policy','delivery','free','discount','99','sponser','loan','bank','details','pin','otp','subscribe','www.','enter','voucher','vouchers','gmail','email','$','antivirus','+','years','interested','chatting','profile','100','coupon']
# encrypted words for profanity filter #starts from zero
alp = "abcdefghijklmnopqrstuvwxyz*"
w1 = alp[18]+alp[4]+alp[23]
w2 = alp[15]+alp[14]+alp[17]+alp[13]
w3 = alp[23]+alp[23]+alp[23]
w4 = alp[13]+alp[20]+alp[3]+alp[4]
w5 = alp[1]+alp[14]+alp[14]+alp[1]
w6 = alp[21]+alp[0]+alp[6]+alp[4]+alp[13]+alp[0]
w7 = alp[21]+alp[0]+alp[6]+alp[8]+alp[13]+alp[0]
w8 = alp[1]+alp[20]+alp[19]+alp[19]
w9 = alp[15]+alp[4]+alp[13]+alp[8]+alp[18]
w10 = alp[7]+alp[14]+alp[13]+alp[4]+alp[24]+alp[12]+alp[14]+alp[14]+alp[13]
w11 = alp[26]
w12 = alp[7]+alp[14]+alp[19]+alp[8]+alp[19]+alp[18]
prof = [w1,w2,w3,w4,w5,w6,w7,w8,w9,w10,w11,w12]
#sure_shot spam words that occure only in spam mails
sure_shot = ['spam']
spam_score = 0
# spam score ,if it increases higher than the normal then mail will be marked as spam
#mail = str(input("Enter mail here : "))
#mail = mail.lower()
con = 0 #counter to change condition
# empty message detection
if mail == "" :
con = 1
return(True)
# For cyrillic characters -
c = "ёяшертыуиопющэъасдфгчйкльжзхцвбнмЁЯШЕРТЫУИОПЮЩЭЪАСДФГЧЙКЛЬЖЗХЦВБНМ"
if con == 0:
for char in c:
if char in mail:
return(True)
# profantity filter
if con == 0:
for word in prof:
if word in mail:
return(True)
# for sureshot words in email
if con== 0:
for word in sure_shot:
if word in mail:
return(True)
# for sensative words
if con == 0:
for word in sensitive_words:
if word in mail:
spam_score +=1 #1 #0.5
break
# for spam score
for word in spam_words:
if word in mail:
spam_score +=2 #2 #1
#pattern matching for a non secure links
if con== 0:
for word in mail:
if re.match(pattern,word):
spam_score +=5
#Calculations for spam
spam_level = 100*(spam_score/len(mail))
# for final decision
if con == 0:
if spam_level >= 10: #10
return(True)
else:
return(False)
def run(username):
final={}
final_list=[]
csv.field_size_limit(100000000)
cnt=0
scn=0
module_dir = os.path.dirname(__file__)
f = open(os.path.join(module_dir,f'csvfile/{username}.csv'))
reader = csv.reader(f)
for m_id,Sender,Subject,Date,Snippet,Message_body in reader:
if scn==0:
scn+=1
continue
mail = Subject+Snippet+Message_body
mail = mail.lower()
mail = mail.replace('\n',' ')
mail = mail.replace('\t',' ')
mail = mail.replace('?',' ')
mail = mail.replace('!',' ')
mail = mail.replace('%',' ')
mail = mail.replace('$',' $ ')
mail = mail.split(' ')
final['m_id']=m_id
#time.sleep(.500)
#print(mail)
if (is_spam(mail)):
#print("--------------------------------------------------------------")
#print(m_id," YES it is a spam")
final['is_spam']=True
cnt+=1
scn+=1
else:
#print(m_id," NO it is not a spam")
final['is_spam']=False
scn+=1
#print(mail)
#if(is_spam(mail)):
#print(mail)
#print("--------------------------------------------------------------")
final_list.append(final)
final={}
#returns false when a normal mail
#returns true when a spam mail
#print("total no of spams are: ",cnt)
#print("Scanned number of mails",scn-1)
module_dir = os.path.dirname(__file__)
#final_list.append(final)
with open(os.path.join(module_dir,f'csvfile/{username}_output.csv'), 'w', encoding='utf-8', newline = '') as csvfile:
fieldnames = ['m_id','is_spam']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter = ',')
writer.writeheader()
for val in final_list:
writer.writerow(val)
return f'{username}_output.csv'
```
#### File: Project-centric-mail/ProjectCentricMail/tests.py
```python
from django.test import TestCase,Client
from django.db.models import Max
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.urls import reverse
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from ProjectCentricMail.forms import SignUpForm
from ProjectCentricMail.caller import gmail_read
from ProjectCentricMail.auto import run
from ProjectCentricMail.merge import mer
from ProjectCentricMail.models import MessageInfo,ProjectClassify,ProjectNames
from ProjectCentricMail.sort import read
from ProjectCentricMail.formalinformal import PCM_formalinformal
import csv
import os
# Create your tests here.
class ProjectCentricMailCase(TestCase):
def setUp(self):
user = User.objects.create_user(username='ayushbansal323',
email='<EMAIL>',
password='<PASSWORD>')
user.save()
user = User.objects.create_user(username='ayushbansal321',
email='<EMAIL>',
password='<PASSWORD>')
user.save()
project=ProjectNames.objects.create(username='ayushbansal323',projectname = "cn")
project.save()
project=ProjectNames.objects.create(username='ayushbansal323',projectname = "dbms")
project.save()
def test_login(self):
c = Client()
response = c.post('/login', {'username': 'ayushbansal323', 'password': '<PASSWORD>'})
self.assertEqual(response.status_code, 302)
def test_invalid_login(self):
c = Client()
response2 = c.post('/login', {'username': 'ayushbansal323', 'password': '<PASSWORD>'})
self.assertEqual(response2.status_code, 200)
self.assertEqual(response2.context['message'], "Invalid credentials.")
def test_Spam(self):
c = Client()
response0=c.post('/login', {'username': 'ayushbansal323', 'password': '<PASSWORD>'})
self.assertEqual(response0.status_code, 302)
response = c.get('/spam')
self.assertEqual(response.status_code, 200)
def test_FormalInformal(self):
c = Client()
response0=c.post('/login', {'username': 'ayushbansal323', 'password': '<PASSWORD>'})
self.assertEqual(response0.status_code, 302)
response = c.get('/formalinformal')
self.assertEqual(response.status_code, 200)
def test_Project(self):
c = Client()
response0=c.post('/login', {'username': 'ayushbansal323', 'password': '<PASSWORD>'})
self.assertEqual(response0.status_code, 302)
response = c.post('/project', {'projectname':'toc'})
self.assertEqual(response.status_code, 200)
response2 = c.get('/project/cn')
self.assertEqual(response2.status_code, 200)
def test_logout(self):
c = Client()
response0=c.post('/login', {'username': 'ayushbansal323', 'password': '<PASSWORD>'})
self.assertEqual(response0.status_code, 302)
response = c.get('/logout')
self.assertEqual(response.status_code, 200)
def test_delete(self):
c = Client()
response0=c.post('/login', {'username': 'ayushbansal323', 'password': '<PASSWORD>'})
self.assertEqual(response0.status_code, 302)
response = c.get('/delete')
self.assertEqual(response.status_code, 200)
```
#### File: Project-centric-mail/ProjectCentricMail/tp.py
```python
from ProjectCentricMail.models import MessageInfo,ProjectClassify
import csv
import os
#m_id="1664eedf6a81dfe6"
#b="<NAME> <<EMAIL>>"
#c="formal_informal machine learning"
#d="2018-10-06"
#e="<NAME> New device signed in to <EMAIL> Your Google Account was just signed in to from a new Linux device. You're getting this email to make sure it was you. Check"
#f=""
#g="ayushbansal323"
#z = MessageInfo(m_id=a,Sender=b,Subject=c,Date=d,Snippet=e,Message_body=f,Username=g)
#c = MessageInfo.objects.filter(m_id=m_id)
#module_dir = os.path.dirname(__file__)
username="ayushbansal323"
f = open(f'{username}_final.csv')
reader = csv.reader(f)
for m_id,Sender,Subject,Date,Snippet,Message_body,is_spam in reader:
if m_id!="m_id":
query = MessageInfo.objects.filter(m_id=m_id)
if not query:
print(Date)
tuplem = MessageInfo(m_id=m_id,Sender=Sender,Subject=Subject,Date=Date,Snippet=Snippet,Message_body=Message_body,Username=username)
tuplem.save()
mid = MessageInfo.objects.get(m_id=m_id)
tuplep=ProjectClassify(m_id=mid,Spam=is_spam)
tuplep.save()
#query = MessageInfo.objects.filter(Username="ayushbansal323")
#c = ProjectClassify.objects.filter(Spam=True).values()
for i in query:
c = ProjectClassify.objects.filter(m_id=i)
c.Spam
#https://simpleisbetterthancomplex.com/tutorial/2017/02/18/how-to-create-user-sign-up-view.html
def insert_spam(reader):
for m_id,Sender,Subject,Date,Snippet,Message_body,is_spam in reader:
if m_id!="m_id":
query = MessageInfo.objects.filter(m_id=m_id)
if not query:
print(Date)
tuplem = MessageInfo(m_id=m_id,Sender=Sender,Subject=Subject,Date=Date,Snippet=Snippet,Message_body=Message_body,Username=username)
tuplem.save()
mid = MessageInfo.objects.get(m_id=m_id)
tuplep=ProjectClassify(m_id=mid,Spam=is_spam)
tuplep.save()
```
#### File: Project-centric-mail/ProjectCentricMail/views.py
```python
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.urls import reverse
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from ProjectCentricMail.forms import SignUpForm
from ProjectCentricMail.caller import gmail_read
from ProjectCentricMail.auto import run
from ProjectCentricMail.merge import mer
from ProjectCentricMail.models import MessageInfo,ProjectClassify,ProjectNames
from ProjectCentricMail.sort import read
from ProjectCentricMail.formalinformal import PCM_formalinformal
import csv
import os
import threading
# Create your views here.
def index(request):
if not request.user.is_authenticated:
return render(request, "ProjectCentricMail/login.html", {"message": None})
context = {
"user": request.user
}
r = User.objects.get(username=request.user)
print(r.email)
processThread = threading.Thread(target=gmail_read, args=[request.user]) # <- 1 element list
processThread.start()
print(request.user)
return render(request, "ProjectCentricMail/user.html", context)
def signup_view(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('index')
else:
form = SignUpForm()
return render(request, 'ProjectCentricMail/signup.html', {'form': form})
def login_view(request):
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
processThread = threading.Thread(target=gmail_read, args=[request.user]) # <- 1 element list
processThread.start()
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "ProjectCentricMail/login.html", {"message": "Invalid credentials." , "notetype":"danger"})
def logout_view(request):
logout(request)
return render(request, "ProjectCentricMail/login.html", {"message": "Logged out." , "notetype":"info"})
def spam_view(request):
if not request.user.is_authenticated:
return render(request, "ProjectCentricMail/login.html", {"message": None})
username=request.user
processThread = threading.Thread(target=gmail_read, args=[request.user]) # <- 1 element list
processThread.start()
run(username)
mer(username)
print(request.user)
module_dir = os.path.dirname(__file__)
f = open(os.path.join(module_dir,f'csvfile/{username}_final.csv'))
reader = csv.reader(f)
r = User.objects.get(username=request.user)
context = {
"user": request.user ,
"reader":reader ,
"email":r.email
}
return render(request, "ProjectCentricMail/mails.html", context)
def project_view(request):
if not request.user.is_authenticated:
return render(request, "ProjectCentricMail/login.html", {"message": None})
if request.method == 'POST':
projectname = request.POST["projectname"]
count = ProjectNames.objects.filter(username=request.user,projectname=projectname).count()
if count == 0 :
tuplep = ProjectNames(username=request.user,projectname=projectname)
tuplep.save()
projectname = ProjectNames.objects.filter(username=request.user)
context = {
"user": request.user ,
"projectname":projectname ,
}
return render(request, "ProjectCentricMail/projectnames.html", context)
def projectsort_view(request,projectname):
if not request.user.is_authenticated:
return render(request, "ProjectCentricMail/login.html", {"message": None})
projects = ProjectNames.objects.filter(username=request.user)
processThread = threading.Thread(target=gmail_read, args=[request.user]) # <- 1 element list
processThread.start()
listp=[]
n=ProjectNames.objects.filter(username=request.user).count()
for i in projects:
listp.append(i.projectname)
read(request.user,listp,n)
module_dir = os.path.dirname(__file__)
f = open(os.path.join(module_dir,f'csvfile/{request.user}_finalproject.csv'))
reader = csv.reader(f)
r = User.objects.get(username=request.user)
context = {
"user": request.user,
"reader":reader,
"projectname":projectname,
"email":r.email
}
return render(request, "ProjectCentricMail/project.html", context)
def formalinformal_view(request):
if not request.user.is_authenticated:
return render(request, "ProjectCentricMail/login.html", {"message": None})
processThread = threading.Thread(target=gmail_read, args=[request.user]) # <- 1 element list
processThread.start()
PCM_formalinformal(request.user)
module_dir = os.path.dirname(__file__)
f = open(os.path.join(module_dir,f'csvfile/{request.user}_finalformal.csv'))
reader = csv.reader(f)
r = User.objects.get(username=request.user)
context = {
"user": request.user,
"reader":reader,
"email":r.email
}
return render(request, "ProjectCentricMail/formalinformal.html", context)
def about_view(request):
if not request.user.is_authenticated:
return render(request, "ProjectCentricMail/login.html", {"message": None})
context = {
"user": request.user
}
r = User.objects.get(username=request.user)
print(r.email)
processThread = threading.Thread(target=gmail_read, args=[request.user]) # <- 1 element list
processThread.start()
print(request.user)
return render(request, "ProjectCentricMail/about.html", context)
def del_user(request):
try:
username = request.user
u = User.objects.get(username = username)
u.delete()
render(request, 'ProjectCentricMail/login.html',{"message":"Account Deleted","notetype":"info"})
except User.DoesNotExist:
messages.error(request, "User doesnot exist")
return render(request, 'ProjectCentricMail/login.html')
except Exception as e:
return render(request, 'ProjectCentricMail/login.html',{"message":e.message,"notetype":"info"})
return render(request, 'ProjectCentricMail/login.html',{"message":"Account Deleted","notetype":"info"})
``` |
{
"source": "aadityaganapathy/uReplicator",
"score": 2
} |
#### File: bin/MyKafka/UController.py
```python
import json, requests, glob, os
import time
from subprocess import call, Popen, PIPE
from MyKafka.ConfigCursor import ConfigCursor
from MyKafka.ULogger import ULogger
class UController():
def __init__(self, controller_configs, config_cursor):
self.controller_configs = controller_configs
self.config_cursor = config_cursor
self.config_cursor.generate_controller_config(controller_configs)
self.logger = ULogger()
def connect_controllers(self, controllerPorts=None):
"""Establish connection to all Helix controllers"""
if controllerPorts == None:
controllerPorts = self.__get_all_controller_ports()
# We only want the controller ports that are not currently running
controllerPorts = self.get_offline_controllers(controllerPorts=controllerPorts)
if len(controllerPorts) > 0:
controller_configs = self.__get_controller_configs(controllerPorts=controllerPorts)
self.__run_controllers(controller_configs)
self.__poll_controllers(controllerPorts)
def get_controllers(self, controllerPorts=[]):
controllers = []
if len(controllerPorts) == 0:
controllerPorts = self.__get_all_controller_ports()
for port in controllerPorts:
controller = {}
controller_json = self.__get_controller_configs(controllerPorts=[port])[0]
controller['srcZKPort'] = controller_json['srcZKPort']
controller['destZKPort'] = controller_json['destZKPort']
controller['controllerName'] = controller_json['controllerName']
controller['controllerPort'] = port
controller['controllerActive'] = self.controller_running(port)
controller['activeWorkers'] = self.get_worker_count(port)
controller['topics'] = self.get_topics(port)
controllers.append(controller)
return controllers
def get_topics(self, controllerPort):
"""Returns all whitelisted topics"""
if self.controller_running(controllerPort):
response = requests.get(f"http://localhost:{controllerPort}/topics")
# uReplicator get topics response is not straightforward, so parse it
return self.__parse_get_topics_response(response)
else:
self.logger.log_inactive_controller(controllerPort)
def whitelist_topics(self, topics, controllerPort):
"""Whitelist topics to be mirrored"""
if self.controller_running(controllerPort):
for topic in topics:
self.__whitelist_topic(topic, controllerPort, partitions=8)
else:
self.logger.log_inactive_controller(controllerPort)
def blacklist_topics(self, topics, controllerPort):
"""Blacklist topics from being mirrored"""
if self.controller_running(controllerPort):
for topic in topics:
self.__blacklist_topic(topic, controllerPort)
else:
self.logger.log_inactive_controller(controllerPort)
def run_workers(self, controllerPort, worker_count):
"""Run worker_count number of workers"""
self.remove_workers(controllerPort)
self.__delete_worker_configs(controllerPort)
self.__generate_worker_configs(controllerPort, worker_count, offset=0)
self.__run_worker_instances(controllerPort)
def add_workers(self, workers_count):
"""Add additional workers"""
print("")
def shutdown_controller(self, controllerPort):
"""Shuts down the specified controller"""
if self.controller_running(controllerPort):
controller_pid = self.__get_controller_pid(controllerPort)
call(f"kill -9 {controller_pid}", shell=True)
print(f"[INFO]: Killed controller on port {controllerPort}")
self.remove_workers(controllerPort)
else:
self.logger.log_inactive_controller(controllerPort, status="INFO")
def remove_workers(self, controllerPort, workers_count=-1):
"""Removes specified number of workers, or all workers if workers_count is omitted"""
worker_pids = self.__get_worker_pids(controllerPort)
if workers_count > 0:
worker_pids = worker_pids[:workers_count]
elif workers_count > len(worker_pids):
self.logger.log_invalid_worker_input_count(len(worker_pids), workers_count)
return
for pid in worker_pids:
call(f"kill -9 {pid}", shell=True)
print(f"[INFO]: Killed {len(worker_pids)} workers from controller on port {controllerPort}")
def get_worker_count(self, controllerPort):
"""Returns the number of workers currently running"""
return len(self.__get_worker_pids(controllerPort))
def controller_running(self, port):
"""Returns true if the specified controller is running, false otherwise"""
try:
requests.get(f"http://localhost:{port}/topics")
return True
except requests.exceptions.RequestException:
return False
def get_offline_controllers(self, controllerPorts=None):
"""Returns a list of inactive controllers"""
if controllerPorts == None:
controllerPorts == self.__get_all_controller_ports()
for port in controllerPorts:
if self.controller_running(port):
self.logger.log_active_controller(port)
controllerPorts.remove(port)
return controllerPorts
def __parse_get_topics_response(self, response):
"""Parse response for get topics REST call"""
# Example response: "currently serving topics [topic1, topic2, ...]" -> so do some manipulation to extract the list
content = response.content.decode("utf-8")
try:
list_string_form = content[content.index('['):content.index(']')+1]
csv = list_string_form.strip('[]')
# stripping '[]' from [topic1, topic2, ...] will lead to whitespaces in topics e.g (['topic1',' topic2', ' topic3' ) -> so remove whitespaces
return [topic.strip(' ') for topic in csv.split(',')]
except ValueError:
return list()
def __get_worker_pids(self, controllerPort):
"""Returns the pids of all workers"""
proc1 = Popen(f"pgrep -f 'Dapp_name=uReplicator-Worker_{controllerPort}'", shell=True, stdout=PIPE)
out = proc1.communicate()[0]
out = out.decode("utf-8").split("\n")[:-2] # [:-2] because empty string and some non PID number is included in list
return out
def __get_controller_pid(self, controllerPort):
"""Returns the pids of all workers"""
proc1 = Popen(f"pgrep -f 'Dapp_name=uReplicator-Controller_{controllerPort}'", shell=True, stdout=PIPE)
out = proc1.communicate()[0]
out = out.decode("utf-8").split('\n')[0]
return out
def __run_worker_instances(self, controllerPort):
"""Runs the workers attatched to a controller"""
helix_configs = self.__get_worker_configs(controllerPort)
output_path = self.__get_controller_path(controllerPort)
for helix_config in helix_configs:
print(f"[EXECUTING]: nohup ./bin/pkg/start-worker-example1.sh {output_path} {helix_config} uReplicator-Worker_{controllerPort} > /dev/null 2>&1 &")
call(f"nohup ./bin/pkg/start-worker-example1.sh {output_path} {helix_config} uReplicator-Worker_{controllerPort} > /dev/null 2>&1 &", shell=True)
def __get_worker_configs(self, controllerPort):
"""Returns all helix_*.properties paths for a specified controller """
controller_path = self.__get_controller_path(controllerPort)
return glob.glob(f"{controller_path}/helix*")
def __generate_worker_configs(self, controllerPort, worker_count, offset=0):
"""Generates the helix_*.properties files"""
controller_json = self.__get_controller_configs(controllerPorts=[controllerPort])[0]
output_path = self.__get_controller_path(controllerPort)
for count in range(offset, int(worker_count) + offset):
with open(os.path.join(output_path, f"helix_{count}.properties"), 'w') as config_file:
config_file.write(f"zkServer={controller_json['srcZKPort']}\n")
config_file.write(f"instanceId=helixWorker_{count}\n")
config_file.write(f"helixClusterName={controller_json['controllerName']}\n")
def __delete_worker_configs(self, controllerPort):
"""Deletes all worker configs (helix.properties) for the specified controller"""
controller_path = self.__get_controller_path(controllerPort)
for filename in glob.glob(f"{controller_path}/helix*"):
os.remove(filename)
def __get_all_controller_ports(self):
"""Returns a list of all controller ports"""
controllers_json_list = self.__get_controller_configs()
ports = list()
for controller in controllers_json_list:
ports.append(controller['controllerPort'])
return ports
def __whitelist_topic(self, topic, port, partitions=8):
topic_data = {"topic": topic, "numPartitions": partitions}
print(f"EXECUTING: curl -X POST -d '{json.dumps(topic_data)}' http://localhost:{port}/topics")
try:
response = requests.post(f"http://localhost:{port}/topics", data=json.dumps(topic_data))
except requests.exceptions.RequestException:
self.logger.log_failed_controller_connection(port)
res = "failed"
if response.status_code < 300:
self.logger.log_whitelist_topic(topic)
res = "success"
else:
# topic is already whitelisted
if self.__topic_whitelisted:
self.logger.log_repeat_whitelist_topic(topic)
# something else went wrong
else:
self.logger.log_failed_whitelist_topic(topic)
res = "failed"
return res
def __topic_whitelisted(self, topic, port):
topics = self.get_topics(port)
if topic in topics:
return True
return False
def __blacklist_topic(self, topic, port):
print(f"DELETE http://localhost:{port}/topics/{topic}")
try:
res = requests.delete(f"http://localhost:{port}/topics/{topic}")
except requests.exceptions.RequestException:
self.logger.log_failed_controller_connection(port)
res = None
return res
def __run_controllers(self, controllers):
"""Function to run all specified controllers"""
for controller in controllers:
path = f"{self.__get_controller_path(controller['controllerPort'])}/controllerConfig.json"
call(f"nohup ./bin/pkg/start-controller-example1.sh {path} {controller['controllerPort']} > /dev/null 2>&1 &", shell=True)
def __get_controller_configs(self, controllerPorts=[]):
"""Returns list of controller configs as JSON"""
controllers_json = list()
with open(self.controller_configs) as f:
data = json.load(f)
for controller in data['controllers']:
if len(controllerPorts) == 0 or int(controller['controllerPort']) in controllerPorts or str(controller['controllerPort']) in controllerPorts:
controllers_json.append(controller)
return controllers_json
def __get_controller_path(self, controllerPort):
controller_json = self.__get_controller_configs(controllerPorts=[controllerPort])[0]
src_cluster_port = controller_json['srcZKPort'].split(":")[-1]
path = f"{self.config_cursor.get_output_config_path(src_cluster_port)}/controller"
return path
def __poll_controllers(self, controllerPorts):
retries = 3
for controllerPort in controllerPorts:
while self.controller_running(controllerPort) == False and retries >= 0:
retries -= 1
time.sleep(0.5)
``` |
{
"source": "Aadityajoshi151/discord-pekofy-bot",
"score": 3
} |
#### File: discord-pekofy-bot/extensions/pekofy.py
```python
from discord.ext import commands
from discord import Embed
import random
from modules import pekofication
import replies
class Pekofy(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.cooldown(1, 5.0, commands.BucketType.user)
async def pekofy(self, ctx):
if ctx.message.reference: # reference is a reply to a message
message = ctx.message.reference.resolved
else:
return await ctx.send(random.choice(replies.handling.no_reference))
if message.embeds:
response = await pekofication.pekofy_embed(message.embeds[-1])
else:
response = await pekofication.pekofy(message.clean_content)
if response == "TOO_MANY_PEKOS":
return await ctx.send(replies.handling.limit_reached)
if response == message.clean_content:
return await ctx.send(replies.handling.no_change)
if isinstance(response, Embed):
await ctx.send(embed=response)
else:
await ctx.send(response)
@commands.command()
@commands.cooldown(1, 10.0, commands.BucketType.guild)
async def pekopasta(self, ctx):
if ctx.channel.nsfw:
await ctx.send(replies.copypasta.nsfw)
else:
await ctx.send(replies.copypasta.sfw)
def setup(bot):
bot.add_cog(Pekofy(bot))
``` |
{
"source": "Aadityajoshi151/pyffmpeg",
"score": 2
} |
#### File: Aadityajoshi151/pyffmpeg/make_releases.py
```python
import sys
import os
from time import sleep
version = os.environ['GITHUB_REF'].split('/')[-1]
print(f'version: {version}')
_, token = sys.argv
cwd = os.path.realpath('.')
# Login to GH
with open('token.txt', 'w') as tok:
tok.write(token)
print('Finished writing token file')
cmd = 'gh auth login --with-token < token.txt'
os.system(cmd)
print('Authenticated')
def main():
BRANCHES = ('build-darwin', 'build-linux', 'build-windows')
for branch in BRANCHES:
tag = version + branch.replace('build', '')
cmd1 = f'gh release create {tag} --target {branch} -p'
os.system(cmd1)
print('sleeping for a minute')
sleep(600)
print('Should delete tags')
for branch in BRANCHES:
tag = version + branch.replace('build', '')
cmd1 = f'gh release delete {tag} --yes'
os.system(cmd1)
print('done deleting tags')
main()
print('All Done')
```
#### File: pyffmpeg/pyffmpeg/__init__.py
```python
import os
import threading
from time import sleep
from typing import Optional
from subprocess import Popen, PIPE, STDOUT
from platform import system
from lzma import decompress
from base64 import b64decode, b64encode
from .pseudo_ffprobe import FFprobe
from .misc import Paths, fix_splashes, SHELL
class FFmpeg():
"""
Provide methods for working with FFmpeg
"""
def __init__(self, directory="."):
"""
Init function
"""
self.save_dir = directory
self.overwrite = True
self.loglevels = (
'quiet', 'panic', 'fatal', 'error', 'warning',
'info', 'verbose', 'debug', 'trace')
self.loglevel = 'fatal'
self._log_level_stmt = '-loglevel'
if self.overwrite:
self._over_write = '-y'
else:
self._over_write = '-n'
# Progress
self.report_progress = False
self._in_duration: float = 0.0
self._progress: int = 0
self.onProgressChanged = self.progressChangeMock
# instances are store according to function names
self._ffmpeg_instances = {}
self._ffmpeg_file = Paths().load_ffmpeg_bin()
self.error = ''
def convert(self, input_file, output_file):
"""
Converts and input file to the output file
"""
if os.path.isabs(output_file):
# absolute file
out = output_file
else:
# not an absolute file
out = os.path.join(self.save_dir, output_file)
inf = input_file.replace("\\", "/")
if self.loglevel not in self.loglevels:
msg = 'Warning: "{}" not an ffmpeg loglevel flag.' +\
' Using fatal instead'
print(msg.format(self.loglevel))
self.loglevel = 'fatal'
options = "{} -loglevel {} "
options = options.format(self._ffmpeg_file, self.loglevel)
options += "{} -i {} {}"
options = options.format(self._over_write, inf, out)
if self.report_progress:
f = FFprobe(inf)
d = f.duration.replace(':', '')
self._in_duration = float(d)
self.monitor(out)
outP = Popen(
options, shell=SHELL, stdin=PIPE, stdout=PIPE, stderr=PIPE)
self._ffmpeg_instances['convert'] = outP
self.error = str(outP.stderr.read(), 'utf-8')
return out
def get_ffmpeg_bin(self):
"""
Get the ffmpeg executable file. This is the fullpath to the
binary distributed with pyffmpeg. There is only one at a time.
"""
return self._ffmpeg_file
def get_fps(self, input_file):
"""
Returns the frame per second rate of an input file
"""
fprobe = FFprobe(input_file)
fps = fprobe.fps
return fps
def monitor(self, fn: str):
m_thread = threading.Thread(target=self._monitor, args=[fn])
m_thread.daemon = True
m_thread.start()
def _monitor(self, fn: str):
print('Monitoring Spirit started')
sleep(1)
dura = 0.0
while dura < self._in_duration:
try:
f = FFprobe(fn)
d = f.duration.replace(':', '')
dura = float(d)
except:
dura = 0.0
self.progress = dura / self._in_duration * 100
sleep(0.1)
def options(self, opts):
"""
Allows user to pass any other command line options to the FFmpeg executable
eg.: command line options of 'ffmpeg -i a.mp4 b.mp3'
will be passed by user as: opts: '-i a.mp4 b.mp3'
"""
if isinstance(opts, list):
options = fix_splashes(opts)
# Add ffmpeg and overwrite variable
options.insert(0, self._over_write)
if self.loglevel not in self.loglevels:
msg = 'Warning: "{}" not an ffmpeg loglevel flag.' +\
' Using fatal instead'
print(msg.format(self.loglevel))
self.loglevel = 'fatal'
options = ' '.join(options)
options = ' '.join(['-loglevel', self.loglevel, options])
else:
options = opts
# Add ffmpeg and overwrite variable
# handle overwrite
if self._over_write not in options:
options = " ".join([self._over_write, options])
# handle loglevel
if self._log_level_stmt not in options:
if self.loglevel not in self.loglevels:
msg = 'Warning: "{}" not an ffmpeg loglevel flag.' +\
' Using fatal instead'
print(msg.format(self.loglevel))
self.loglevel = 'fatal'
if self.loglevel != 'fatal':
options = " ".join(
[options])
# add ffmpeg
options = " ".join([self._ffmpeg_file, options])
out = Popen(options, shell=SHELL, stdin=PIPE, stdout=PIPE, stderr=PIPE)
self._ffmpeg_instances['options'] = out
self.error = str(out.stderr.read(), 'utf-8')
return True
@property
def progress(self):
return self._progress
@progress.setter
def progress(self, percent):
self._progress = int(percent)
self.onProgressChanged(self._progress)
def progressChangeMock(self, progress):
pass
def quit(self, function: Optional[str] = ''):
"""
Allows for any running process of ffmpeg started by pyffmpeg
to be terminated
"""
if function:
inst = self._ffmpeg_instances[function]
output = inst.communicate(b'q')
# Quit all instances
else:
for inst in self._ffmpeg_instances.values():
output = inst.communicate(b'q')
print('out: ', output)
``` |
{
"source": "Aadityajoshi151/pyinit",
"score": 4
} |
#### File: pyinit/src/pyinit.py
```python
import os
print(os.getcwd())
path = input("Please enter the path\n")
project_name = input("Please enter project name\n")
project_name = project_name.replace(" ","-") # Replaces spaces with dashes
os.chdir(path) #Changes current working directory to the path provided
os.mkdir(project_name) #Creates folder with project name
path = path+"/"+project_name
os.chdir(path) #Changes current working directory to the folder created
os.system("git init") #Initializes git repository
os.mkdir("assets")
print("Assets folder created") #Creates assets folder
os.mkdir("src")
print("src folder created") #Creates src folder
myfile = open("README.md","w") #Creates README file
myfile.write(f"## {project_name.replace('-',' ')}") #Writes project name in h2 in README file. Spaces are allowed.
myfile.close()
print("README file created")
myfile = open("requirements.txt","w") #Creates requirements file
myfile.close()
print("requirements file created")
myfile = open(".gitignore","w") #Creates gitignore file
myfile.close()
print("gitignore file created")
path = path+"/src"
os.chdir(path) #Moves into the src folder
content = """#imports
def main():
print("Main Begins")
if __name__ == "__main__":
main()
"""
myfile = open(f"{project_name}.py","w") #Creates empty python file with project name
myfile.write(content)
myfile.close()
print(f"{project_name}.py file created")
print("All Done!")
``` |
{
"source": "AadityaJ/_schwarzenegger-chan_",
"score": 3
} |
#### File: AadityaJ/_schwarzenegger-chan_/replacer.py
```python
from nltk.corpus import wordnet
from nltk.tokenize import word_tokenize
from random import randint
import nltk.data
class Replacer(object):
def __init__(self, text):
self.text = text
self.output = ""
def tokenize(self):
# Load the pretrained neural net
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# Get the list of words from the entire text
words = word_tokenize(self.text)
# Identify the parts of speech
tagged = nltk.pos_tag(words)
#print tagged
for i in range(0,len(words)):
replacements = []
# Only replace nouns with nouns, vowels with vowels etc.
for syn in wordnet.synsets(words[i]):
# Do not attempt to replace proper nouns or determiners
if tagged[i][1] == 'NNP' or tagged[i][1] == 'DT':
break
# The tokenizer returns strings like NNP, VBP etc
# but the wordnet synonyms has tags like .n.
# So we extract the first character from NNP ie n
# then we check if the dictionary word has a .n. or not
word_type = tagged[i][1][0].lower()
if syn.name().find("."+word_type+"."):
# extract the word only
r = syn.name()[0:syn.name().find(".")]
replacements.append(r)
if len(replacements) > 0:
# Choose a random replacement
replacement = replacements[randint(0,len(replacements)-1)]
self.output = self.output + " " + replacement
else:
# If no replacement could be found, then just use the
# original word
self.output = self.output + " " + words[i]
def get_output(self):
#print the result
return (self.output)
'''
r = Replacer("Your desire is inside spiritual fulfillment")
r.tokenize()
print r.get_output()
'''
``` |
{
"source": "AadityaMunjal/image-processing-algorithms",
"score": 3
} |
#### File: image-processing-algorithms/algorithms/Grayscale.py
```python
def grayscale(image):
for row in range(image.shape[0]):
for col in range(image.shape[1]):
avg = sum(image[row][col][i] for i in range(3)) // 3
image[row][col] = [avg for _ in range(3)]
``` |
{
"source": "aaditya-panik/rule-engine",
"score": 2
} |
#### File: lib/rule_engine/parser.py
```python
import ast as ast_
import collections
import threading
from . import ast
from . import errors
import ply.lex as lex
import ply.yacc as yacc
literal_eval = ast_.literal_eval
class ParserBase(object):
"""
A base class for parser objects to inherit from. This does not provide any
grammar related definitions.
"""
precedence = ()
"""The precedence for operators."""
tokens = ()
reserved_words = {}
"""
A mapping of literal words which are reserved to their corresponding grammar
names.
"""
__mutex = threading.Lock()
def __init__(self, debug=False):
"""
:param bool debug: Whether or not to enable debugging features when
using the ply API.
"""
self.debug = debug
self.context = None
# Build the lexer and parser
self._lexer = lex.lex(module=self, debug=self.debug)
self._parser = yacc.yacc(module=self, debug=self.debug, write_tables=self.debug)
def parse(self, text, context, **kwargs):
"""
Parse the specified text in an abstract syntax tree of nodes that can
later be evaluated.
:param str text: The grammar text to parse into an AST.
:param context: A context for specifying parsing and evaluation options.
:type context: :py:class:`~rule_engine.engine.Context`
:return: The parsed AST statement.
:rtype: :py:class:`~rule_engine.ast.Statement`
"""
kwargs['lexer'] = kwargs.pop('lexer', self._lexer)
with self.__mutex:
self.context = context
result = self._parser.parse(text, **kwargs)
self.context = None
return result
class Parser(ParserBase):
"""
The parser class for the rule grammar. This class contains many ply specific
members to define the various components of the grammar allowing it to be
parsed and reduced into an abstract syntax tree (AST). Once the AST has been
constructed it can then be evaluated multiple times. To make the evaluation
more efficient, nodes within the AST that are able to be reduced are while
the parsing is taking place. This reduction phase involves evaluation,
causing :py:exc:`~rule_engine.errors.EvaluationError` exceptions to be
raised during parsing.
"""
op_names = {
# arithmetic operators
'+': 'ADD', '-': 'SUB',
'**': 'POW', '*': 'MUL',
'/': 'TDIV', '//': 'FDIV', '%': 'MOD',
# bitwise operators
'&': 'BWAND', '|': 'BWOR', '^': 'BWXOR',
'<<': 'BWLSH', '>>': 'BWRSH',
# comparison operators
'==': 'EQ', '=~': 'EQ_FZM', '=~~': 'EQ_FZS',
'!=': 'NE', '!~': 'NE_FZM', '!~~': 'NE_FZS',
'>': 'GT', '>=': 'GE',
'<': 'LT', '<=': 'LE',
# logical operators
'and': 'AND', 'or': 'OR', 'not': 'NOT',
# other operators
'.': 'ATTR',
'&.': 'ATTR_SAFE',
'in': 'IN',
}
reserved_words = {
# booleans
'true': 'TRUE',
'false': 'FALSE',
# float constants
'inf': 'FLOAT_INF',
'nan': 'FLOAT_NAN',
# null
'null': 'NULL',
# operators
'and': 'AND',
'in': 'IN',
'or': 'OR',
'not': 'NOT',
}
tokens = (
'DATETIME', 'FLOAT', 'STRING', 'SYMBOL',
'LPAREN', 'RPAREN', 'QMARK', 'COLON', 'COMMA',
'LBRACKET', 'RBRACKET', 'LBRACE', 'RBRACE'
) + tuple(set(list(reserved_words.values()) + list(op_names.values())))
t_ignore = ' \t'
# Tokens
t_BWAND = r'\&'
t_BWOR = r'\|'
t_BWXOR = r'\^'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_EQ = r'=='
t_NE = r'!='
t_QMARK = r'\?'
t_COLON = r'\:'
t_ADD = r'\+'
t_SUB = r'\-'
t_MOD = r'\%'
t_COMMA = r'\,'
t_LBRACKET = r'((?<=\S)&)?\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_FLOAT = r'0(b[01]+|o[0-7]+|x[0-9a-fA-F]+)|[0-9]+(\.[0-9]*)?([eE][+-]?[0-9]+)?|\.[0-9]+([eE][+-]?[0-9]+)?'
# attributes must be valid symbol names so the right side is more specific
t_ATTR = r'(?<=\S)\.(?=[a-zA-Z_][a-zA-Z0-9_]*)'
t_ATTR_SAFE = r'(?<=\S)&\.(?=[a-zA-Z_][a-zA-Z0-9_]*)'
# tokens are listed from lowest to highest precedence, ones that appear
# later are effectively evaluated first
# see: https://en.wikipedia.org/wiki/Order_of_operations#Programming_languages
precedence = (
('left', 'OR'),
('left', 'AND'),
('right', 'NOT'),
('left', 'BWOR'),
('left', 'BWXOR'),
('left', 'BWAND'),
('right', 'QMARK', 'COLON'),
('nonassoc', 'EQ', 'NE', 'EQ_FZM', 'EQ_FZS', 'NE_FZM', 'NE_FZS', 'GE', 'GT', 'LE', 'LT', 'IN'), # Nonassociative operators
('left', 'ADD', 'SUB'),
('left', 'BWLSH', 'BWRSH'),
('left', 'MUL', 'TDIV', 'FDIV', 'MOD'),
('left', 'POW'),
('right', 'UMINUS'),
('left', 'ATTR', 'ATTR_SAFE'),
)
def t_POW(self, t):
r'\*\*?'
if t.value == '*':
t.type = 'MUL'
return t
def t_FDIV(self, t):
r'\/\/?'
if t.value == '/':
t.type = 'TDIV'
return t
def t_LT(self, t):
r'<([=<])?'
t.type = {'<': 'LT', '<=': 'LE', '<<': 'BWLSH'}[t.value]
return t
def t_GT(self, t):
r'>([=>])?'
t.type = {'>': 'GT', '>=': 'GE', '>>': 'BWRSH'}[t.value]
return t
def t_EQ_FZS(self, t):
r'=~~?'
if t.value == '=~':
t.type = 'EQ_FZM'
return t
def t_NE_FZS(self, t):
r'!~~?'
if t.value == '!~':
t.type = 'NE_FZM'
return t
def t_DATETIME(self, t):
r'd(?P<quote>["\'])([^\\\n]|(\\.))*?(?P=quote)'
t.value = t.value[1:]
return t
def t_STRING(self, t):
r's?(?P<quote>["\'])([^\\\n]|(\\.))*?(?P=quote)'
if t.value[0] == 's':
t.value = t.value[1:]
return t
def t_SYMBOL(self, t):
r'\$?[a-zA-Z_][a-zA-Z0-9_]*'
if t.value in ('if', 'elif', 'else', 'for', 'while'):
raise errors.RuleSyntaxError("syntax error (the {} keyword is reserved for future use)".format(t.value))
t.type = self.reserved_words.get(t.value, 'SYMBOL')
return t
def t_newline(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
raise errors.RuleSyntaxError("syntax error (illegal character {0!r})".format(t.value[0]), t)
# Parsing Rules
def p_error(self, token):
raise errors.RuleSyntaxError('syntax error', token)
def p_statement_expr(self, p):
'statement : expression'
p[0] = ast.Statement(self.context, p[1])
def p_expression_getattr(self, p):
"""
object : object ATTR SYMBOL
| object ATTR_SAFE SYMBOL
"""
op_name = self.op_names.get(p[2])
p[0] = ast.GetAttributeExpression(self.context, p[1], p[3], safe=op_name == 'ATTR_SAFE').reduce()
def p_expression_object(self, p):
"""
expression : object
"""
p[0] = p[1]
def p_expression_ternary(self, p):
"""
expression : expression QMARK expression COLON expression
"""
condition, _, case_true, _, case_false = p[1:6]
p[0] = ast.TernaryExpression(self.context, condition, case_true, case_false).reduce()
def p_expression_arithmetic(self, p):
"""
expression : expression ADD expression
| expression SUB expression
| expression MOD expression
| expression MUL expression
| expression FDIV expression
| expression TDIV expression
| expression POW expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = ast.ArithmeticExpression(self.context, op_name, left, right).reduce()
def p_expression_bitwise(self, p):
"""
expression : expression BWAND expression
| expression BWOR expression
| expression BWXOR expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = ast.BitwiseExpression(self.context, op_name, left, right).reduce()
def p_expression_bitwise_shift(self, p):
"""
expression : expression BWLSH expression
| expression BWRSH expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = ast.BitwiseShiftExpression(self.context, op_name, left, right).reduce()
def p_expression_contains(self, p):
"""
expression : expression IN expression
| expression NOT IN expression
"""
if len(p) == 4:
member, _, container = p[1:4]
p[0] = ast.ContainsExpression(self.context, container, member).reduce()
else:
member, _, _, container = p[1:5]
p[0] = ast.ContainsExpression(self.context, container, member).reduce()
p[0] = ast.UnaryExpression(self.context, 'NOT', p[0]).reduce()
def p_expression_comparison(self, p):
"""
expression : expression EQ expression
| expression NE expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = ast.ComparisonExpression(self.context, op_name, left, right).reduce()
def p_expression_arithmetic_comparison(self, p):
"""
expression : expression GT expression
| expression GE expression
| expression LT expression
| expression LE expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = ast.ArithmeticComparisonExpression(self.context, op_name, left, right).reduce()
def p_expression_fuzzy_comparison(self, p):
"""
expression : expression EQ_FZM expression
| expression EQ_FZS expression
| expression NE_FZM expression
| expression NE_FZS expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = ast.FuzzyComparisonExpression(self.context, op_name, left, right).reduce()
def p_expression_logic(self, p):
"""
expression : expression AND expression
| expression OR expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = ast.LogicExpression(self.context, op_name, left, right).reduce()
def p_expression_group(self, p):
'object : LPAREN expression RPAREN'
p[0] = p[2]
def p_expression_negate(self, p):
'expression : NOT expression'
p[0] = ast.UnaryExpression(self.context, 'NOT', p[2]).reduce()
def p_expression_symbol(self, p):
'object : SYMBOL'
name = p[1]
scope = None
if name[0] == '$':
scope = 'built-in'
name = name[1:]
p[0] = ast.SymbolExpression(self.context, name, scope=scope).reduce()
def p_expression_uminus(self, p):
'expression : SUB expression %prec UMINUS'
names = {'-': 'UMINUS'}
p[0] = ast.UnaryExpression(self.context, names[p[1]], p[2]).reduce()
# Literal expressions
def p_expression_boolean(self, p):
"""
expression : TRUE
| FALSE
"""
p[0] = ast.BooleanExpression(self.context, p[1] == 'true')
def p_expression_datetime(self, p):
'object : DATETIME'
p[0] = ast.DatetimeExpression.from_string(self.context, literal_eval(p[1]))
def p_expression_float(self, p):
'expression : FLOAT'
p[0] = ast.FloatExpression(self.context, float(literal_eval(p[1])))
def p_expression_float_nan(self, p):
'expression : FLOAT_NAN'
p[0] = ast.FloatExpression(self.context, float('nan'))
def p_expression_float_inf(self, p):
'expression : FLOAT_INF'
p[0] = ast.FloatExpression(self.context, float('inf'))
def p_expression_null(self, p):
'object : NULL'
# null is an object because of the safe operator
p[0] = ast.NullExpression(self.context)
def p_expression_set(self, p):
"""
object : LBRACE ary_members RBRACE
| LBRACE ary_members COMMA RBRACE
"""
p[0] = ast.SetExpression(self.context, tuple(p[2])).reduce()
def p_expression_string(self, p):
'object : STRING'
p[0] = ast.StringExpression(self.context, literal_eval(p[1]))
def p_expression_array(self, p):
"""
object : LBRACKET RBRACKET
| LBRACKET ary_members RBRACKET
| LBRACKET ary_members COMMA RBRACKET
"""
if len(p) < 4:
p[0] = ast.ArrayExpression(self.context, tuple()).reduce()
else:
p[0] = ast.ArrayExpression(self.context, tuple(p[2])).reduce()
def p_expression_array_members(self, p):
"""
ary_members : expression
| ary_members COMMA expression
"""
if len(p) == 2:
deque = collections.deque()
deque.append(p[1])
else:
deque = p[1]
deque.append(p[3])
p[0] = deque
def p_expression_mapping(self, p):
"""
object : LBRACE RBRACE
| LBRACE map_members RBRACE
| LBRACE map_members COMMA RBRACE
"""
if len(p) < 4:
p[0] = ast.MappingExpression(self.context, tuple()).reduce()
else:
p[0] = ast.MappingExpression(self.context, tuple(p[2])).reduce()
def p_expression_mapping_member(self, p):
"""
map_member : expression COLON expression
"""
p[0] = (p[1], p[3])
def p_expression_mapping_members(self, p):
"""
map_members : map_member
| map_members COMMA map_member
"""
return self.p_expression_array_members(p)
def p_expression_getitem(self, p):
"""
object : object LBRACKET expression RBRACKET
"""
container, lbracket, item = p[1:4]
p[0] = ast.GetItemExpression(self.context, container, item, safe=lbracket == '&[').reduce()
def p_expression_getslice(self, p):
"""
object : object LBRACKET COLON RBRACKET
| object LBRACKET COLON expression RBRACKET
| object LBRACKET expression COLON RBRACKET
| object LBRACKET expression COLON expression RBRACKET
"""
container = p[1]
safe = p[2] == '&['
colon_index = p[1:].index(':')
if colon_index == 2 and len(p) == 5:
start, stop = None, None
elif colon_index == 2 and len(p) == 6:
start, stop = None, p[4]
elif colon_index == 3 and len(p) == 6:
start, stop = p[3], None
elif colon_index == 3 and len(p) == 7:
start, _, stop = p[3:6]
else:
raise errors.RuleSyntaxError('invalid get slice expression')
p[0] = ast.GetSliceExpression(self.context, container, start, stop, safe=safe).reduce()
```
#### File: tests/ast/value_is.py
```python
import collections
import unittest
import rule_engine.ast as ast
__all__ = ('ValueIsTests',)
inf = float('inf')
nan = float('nan')
class ValueIsTests(unittest.TestCase):
_Case = collections.namedtuple('_Case', ('value', 'numeric', 'real', 'integer', 'natural'))
cases = (
# value numeric real integer natural
_Case(-inf, True, False, False, False),
_Case(-1.5, True, True, False, False),
_Case(-1.0, True, True, True, False),
_Case(-1, True, True, True, False),
_Case(0, True, True, True, True ),
_Case(1, True, True, True, True ),
_Case(1.0, True, True, True, True ),
_Case(1.5, True, True, False, False),
_Case(inf, True, False, False, False),
_Case(nan, True, False, False, False),
_Case(True, False, False, False, False),
_Case(False, False, False, False, False),
_Case('', False, False, False, False),
_Case(None, False, False, False, False),
)
def test_value_is_integer_number(self):
for case in self.cases:
self.assertEqual(ast.is_integer_number(case.value), case.integer)
def test_value_is_natural_number(self):
for case in self.cases:
self.assertEqual(ast.is_natural_number(case.value), case.natural)
def test_value_is_numeric(self):
for case in self.cases:
self.assertEqual(ast.is_numeric(case.value), case.numeric)
def test_value_is_real_number(self):
for case in self.cases:
self.assertEqual(ast.is_real_number(case.value), case.real)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "aadityaparashar/led-iot",
"score": 3
} |
#### File: aadityaparashar/led-iot/main.py
```python
import socket
from machine import Pin
led_pin = Pin(5, Pin.OUT)
CONTENT = """\
HTTP/1.0 200 OK
Content-Type: text/html
<html>
<head>
</head>
<body>
<p>Hello #%d from MicroPython!</p>
<a href="/toggle">Click here to toggle LED hooked to pin 5</a>
</body>
</html>
"""
def main():
s = socket.socket()
ai = socket.getaddrinfo("0.0.0.0", 8080)
print("Bind address info:", ai)
addr = ai[0][-1]
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(addr)
s.listen(5)
print("Listening, connect your browser to http://<this_host>:8080/")
counter = 0
while True:
sock, addr = s.accept()
print("Client address:", addr)
stream = sock.makefile("rwb")
req = stream.readline().decode("ascii")
method, path, protocol = req.split(" ")
print("Got", method, "request for", path)
if path == "/toggle":
led_pin.value(1-led_pin.value())
while True:
h = stream.readline().decode("ascii").strip()
if h == "":
break
print("Got HTTP header:", h)
stream.write((CONTENT % counter).encode("ascii"))
stream.close()
sock.close()
counter += 1
print()
main() # Press Ctrl-C to stop web server
``` |
{
"source": "Aadityaprabu002/GoogleMeet_Attendance_Bot",
"score": 3
} |
#### File: GoogleMeet_Attendance_Bot/AttendanceBot/attendancebot.py
```python
import GoogleBot
from GoogleBot import time
from TimeTable import datetime
def WaitToStart(subject):
s_time = subject['start'].split('-')
TimeNow = datetime.datetime.now()
while int(s_time[0]) >= int(TimeNow.strftime('%H')) and int(s_time[1]) > int(TimeNow.strftime('%M')):
time.sleep(5)
TimeNow = datetime.datetime.now()
print('REACHED START TIME')
def WaitToEnd(subject):
TimeNow = datetime.datetime.now()
e_time = subject['end'].split('-')
while int(e_time[0]) >= int(TimeNow.strftime('%H')) and int(e_time[1]) > int(TimeNow.strftime('%M')) :
time.sleep(5)
TimeNow = datetime.datetime.now()
print('REACHED END TIME')
def main():
B = GoogleBot.GB()
B.set_day_schedule()
B.gmail_Login()
for sub in B.subject_order_list:
subject = B.schedule[sub]
print('Subject:',sub)
WaitToStart(subject)
B.gm_Toggle_and_JoinMeeting(subject['URL'],subject,sub)
WaitToEnd(subject)
B.gm_LeaveMeeting(subject,sub)
if __name__ == "__main__":
main()
``` |
{
"source": "aadityapritam00/Library_Management_project",
"score": 4
} |
#### File: aadityapritam00/Library_Management_project/book.py
```python
import time
class Book:
def __init__(self):
self.bname=input("Book Name\t:")
self.isbn=input("ISBN No \t:")
self.no_copies=int(input("No of Books:"))
def view_all_book(self):
print("------------------------------📚 Book Records 📖---------------------------------")
print("Loading.....................")
time.sleep(3)
print(f"Book Name: {self.bname}\nISBN No:{self.isbn}\nNo of Books{self.no_copies}")
print("______________________________________***____________________________________________")
```
#### File: aadityapritam00/Library_Management_project/facualty.py
```python
import time
class Facualty:
def __init__(self):
self.fname=input("Enter Your Name \t\t:")
self.fid=input("Enter your Id \t\t:")
self.issued_book_facualty=[]
def req_book(self):
book_name=input("Enter the book name to put request")
def view_profile_facualty(self):
print("\n--------------------------------FACUALTY DASHBOARD----------------------------------------")
time.sleep(3)
print(f"\n\tName: {self.name}\tFacualty Id: {self.fid}\tBook Issued: {self.issued_book_facualty}")
print("____________________________________________x___________________________________________")
```
#### File: aadityapritam00/Library_Management_project/functionality.py
```python
from book import *
from student import *
import facualty as fac
import pickle
import time
import os,sys
import os
# ############################################# Book #################################################################
def request_book():
flag=0
flag1=0
lb=[]
ls=[]
utype=input(" press: 'S' IF You Are Student\t\tF: IF You Are Facualty\t\t'E' for Exit\n")
if utype=='S':
bn=input("enter book name or ISBN No to put the Request: ")
with open("book.pkl",'rb') as f:
while(True):
try:
obj=pickle.load(f)
for i in obj:
if bn==i.bname or bn==i.isbn:
if i.no_copies==1:
flag=1
else:
flag1=1
lb.append(i)
except EOFError:
break
if(flag==1 or flag1==1):
for i in lb:
if(bn==i.bname or bn==i.isbn):
if flag==1:
lb.remove(i)
else:
i.no_copies-=1
break
with open("book.pkl",'wb') as f:
pass
with open("book.pkl",'ab') as f:
pickle.dump(lb,f)
sn=input("Enter your Name or Enrollment No:")
with open("student_record.pkl",'rb') as fs:
ss=pickle.load(fs)
for i in ss:
if i.name==sn or i.enroll==sn:
i.issued_book.append(bn)
with open("student_record.pkl",'wb') as fs:
pickle.dump(ss,fs)
print("Requested Successfully !")
break
else:
print("No such Books found")
elif utype=='F':
bn=input("enter book name or ISBN No to put the Request: ")
with open("book.pkl",'rb') as f:
while(True):
try:
obj=pickle.load(f)
for i in obj:
if bn==i.bname or bn==i.isbn:
if i.no_copies==1:
flag=1
else:
flag1=1
lb.append(i)
except EOFError:
break
if(flag==1 or flag1==1):
for i in lb:
if(bn==i.bname or bn==i.isbn):
if flag==1:
lb.remove(i)
else:
i.no_copies-=1
break
with open("book.pkl",'wb') as f:
pass
with open("book.pkl",'ab') as f:
pickle.dump(lb,f)
sn=input("Enter Facualty Name or Id :")
with open("facualty_record.pkl",'rb') as fs:
ss=pickle.load(fs)
for i in ss:
if i.fname==sn or i.fid==sn:
i.issued_book_facualty.append(bn)
with open("facualty_record.pkl",'wb') as fs:
pickle.dump(ss,fs)
print("Requested Successfully !")
break
elif utype=='E':
sys.exit()
else:
print("wrong key entered ❌")
def search_book():
print("1>Enter Book Name or ISBN No to search: ")
with open("book.pkl",'rb') as f:
sb=input("Enter the Book Name or ISBN No to search")
while(True):
try:
obj=pickle.load(f)
for i in obj:
if i.bname==sb or i.isbn==sb:
print("Book found")
print("Name\t\tISBN No\t\tAvailable copies")
print(i.bname,"\t\t",i.isbn,"\t\t",i.no_copies)
print("Select the key:\t1>Request Book\t2>Back\t3>Exit")
inp4=int(input())
if inp4==1:
request_book()
elif inp4==2:
break
elif inp4==3:
sys.exit()
else:
print("wrong key entered,try again")
return
except EOFError:
print("record search completed")
break
def show_all_book():
print("****************************Book Records***************************")
with open ("book.pkl",'rb') as f:
if os.path.isfile("book.pkl")==True:
print("Book Name\t\tISBN No\t\t\tNo of Books")
while(True):
try:
objs=pickle.load(f)
for i in objs:
print(i.bname,"\t\t\t",i.isbn,"\t\t\t",i.no_copies)
except EOFError:
print("\nAll Book Displayed\n")
break
else:
print("No file found")
return
def remove_book():
obj=None
k=input("Enter Book ISBN or Book Name to Remove")
l=[]
f=0
g=0
with open("book.pkl",'rb') as f:
while(True):
try:
obj=pickle.load(f)
for i in obj:
if k==i.bname or k==i.isbn:
if i.no_copies==1:
f=1
else:
g=1
l.append(i)
except EOFError:
break
if(f==1 or g==1):
for i in l:
if(k==i.bname or k==i.isbn):
if f==1:
l.remove(i)
else:
i.no_copies-=1
break
with open("book.pkl",'wb') as f:
pass
with open("book.pkl",'ab') as f:
pickle.dump(l,f)
print("removed successfully")
else:
print("No such Books found")
def add_book():
print("Please, Specify the Book detail to add")
lst=[]
obj2=Book()
lst.append(obj2)
with open("book.pkl",'ab') as f:
pickle.dump(lst,f)
print("Book Added🙍♂️\n")
def modify_book():
obj=None
while(True):
print("1>Modify Book Name\n2>Modify Isbn no:\n3>Back\n4>Exit")
inp=int(input())
if inp==1:
with open("book.pkl",'rb') as f:
flag=0
l=[]
bn=input("Enter book Name:")
while(True):
try:
obj=pickle.load(f)
for i in obj:
if bn==i.bname:
flag=1
# i.bname=input("ENTERN NEW NAME")
# l.append(i)
# else:
l.append(i)
# i.bname=input("Enter the new name you want to modify")
# print("Name modified successfully")
# else:
# print("No such book found")
# break
except EOFError:
# print("Updated ucessfully")
break
if(flag==1):
for i in l:
if bn==i.bname:
i.bname=input("enter new name")
break
with open("book.pkl",'wb') as f:
pass
with open("book.pkl",'ab') as f:
pickle.dump(l,f)
print("Updated ucessfully")
elif inp==2:
print("🚫Access denied,you can't change ISBN No of a book")
elif inp==3:
break
elif inp==4:
sys.exit()
else:
print("Wrong key entered, enter again")
break
# ############################################# Student ####################################################
def add_student():
lst1=[]
obj1=Student()
lst1.append(obj1)
with open("student_record.pkl",'ab') as f:
pickle.dump(lst1,f)
print("Student Added🙍♂️\n")
def eliminate_student():
flag=0
lst=[]
while(True):
print("1>Eliminate by Student Name\n2>Eliminate By Enrollment No:\n3>Exit")
inp=int(input())
if inp==1:
sm=input("Enter the name of student to eliminate: ")
with open("student_record.pkl",'rb') as f:
while(True):
try:
obj=pickle.load(f)
for i in obj:
if i.name==sm:
flag=1
lst.append(i)
except EOFError:
print("File end")
break
if flag==1:
for i in lst:
if i.name==sm:
lst.remove(i)
with open("student_record.pkl",'wb') as f:
pass
with open("student_record.pkl",'ab') as f:
pickle.dump(lst,f)
print("student is eliminated from record")
break
if inp==2:
en=input("Enter the enrollment No to eliminate student:")
with open("student_record.pkl",'rb') as f:
while(True):
try:
obj=pickle.load(f)
for i in obj:
if i.enroll==en:
flag=1
else:
print("student of this enrollmebt no not found")
break
lst.append(i)
except EOFError:
print("File end")
break
if flag==1:
for i in lst:
if i.enroll==en:
lst.remove(i)
with open("student_record.pkl",'wb') as f:
pass
with open("student_record.pkl",'ab') as f:
pickle.dump(lst,f)
break
if inp==3:
break
def update_student():
lss=[]
fg=0
fg1=0
print("1>Update Student Name\n2>Update Semester\n3>Update Branch\n4>Issue Book\n5>Exit")
inp=int(input())
if inp==1:
bn=input("Enter the student name to update")
with open("student_record.pkl",'rb') as f:
while(True):
try:
obj=pickle.load(f)
for i in obj:
if i.name==bn:
fg=1
# i.name=input("Enter Student's New Name:")
lss.append(i)
except EOFError:
print("End")
break
if fg==1:
for i in lss:
if i.name==bn:
i.name=input("Enter New Name of Student: ")
break
with open("student_record.pkl",'wb') as f:
pass
with open("student_record.pkl",'ab') as f:
pickle.dump(lss,f)
elif inp==2:
bn=input("Enter the student's semester to update")
with open("student_record.pkl",'rb') as f:
while(True):
try:
obj=pickle.load(f)
for i in obj:
if i.sem==bn:
fg=1
# i.name=input("Enter Student's New Name:")
lss.append(i)
except EOFError:
print("End")
break
if fg==1:
for i in lss:
if i.sem==bn:
i.sem=int(input("Enter New Name of Student(in integer only): "))
with open("student_record.pkl",'wb') as f:
pass
with open("student_record.pkl",'ab') as f:
pickle.dump(lss,f)
# bn=input("Enter the Name of student whose Enrollment No is update:")
# with open("student_record.pkl",'rb') as f:
# while(True):
# try:
# obj=pickle.load(f)
# for i in obj:
# if i.name==bn:
# i.enroll=input("New Enroll No:")
# else:
# print("student not exist,plz try again")
# break
# except EOFError:
# print("student record End")
# break
# with open("student_record.pkl",'ab') as f:
# pickle.dump(obj,f)
elif inp==3:
bn=input("Enter the student Branch to change: ")
with open("student_record.pkl",'rb') as f:
while(True):
try:
obj=pickle.load(f)
for i in obj:
if i.branch==bn:
fg=1
# i.name=input("Enter Student's New Name:")
lss.append(i)
except EOFError:
print("End")
break
if fg==1:
for i in lss:
if i.branch==bn:
i.branch=input("Enter New Name of Student: ")
with open("student_record.pkl",'wb') as f:
pass
with open("student_record.pkl",'ab') as f:
pickle.dump(lss,f)
elif inp==4:
bn=input("Enter Student name to issue book:")
with open("student_record.pkl",'rb') as f:
while(True):
try:
obj=pickle.load(f)
for i in obj:
if i.name==bn:
fg=1
else:
print("Student not found ,plz try again")
break
except EOFError:
print("students records End")
break
if fg==1:
for i in lss:
if i.name==bn:
ib=input("Enter the book name to issue")
with open("book.pkl",'rb') as fb:
b=pickle.load(fb)
for j in b:
if j.bname==ib:
i.issued_book=ib
with open("student_record.pkl",'wb') as f:
pass
with open("student_record.pkl",'ab') as f:
pickle.dump(lss,f)
elif inp==5:
sys.exit()
else:
print("Wrong key entered, please try correct key")
return
def view_all_student():
print("\n****************************Students Records***************************")
with open ("student_record.pkl",'rb') as f:
if os.path.isfile("student_record.pkl")==True:
print("\nStudent Name\t\tEnrollment No\t\tSemester\t\tBranch\t\t\tIssued Books")
while(True):
try:
objk=pickle.load(f)
for i in objk:
print(i.name,"\t",i.enroll,"\t",i.sem,"\t\t\t",i.branch,"\t\t\t\t\t",i.issued_book)
except EOFError:
print("\nAll Students Displayed\n")
return
else:
print("No file found")
def view_search_student():
f1=0
with open("student_record.pkl",'rb') as f:
if os.path.isfile("student_record.pkl")==True:
sname=input("Enter Name or Enrollment No to view profile:")
while(True):
try:
objk=pickle.load(f)
# sname=input("Enter Name or Enrollment No to view profile:")
for i in objk:
if i.name==sname or i.enroll==sname:
print("\nName\tEnrollment No\tSemester\tBranch\t\tIssued Books")
print(i.name,"\t",i.enroll,"\t",i.sem,"\t",i.branch,"\t",i.issued_book)
f1=1
break
if(f1==1):
break
except EOFError:
print("File is searched, nothing found")
break
else:
print("There is no such file to search")
return
# ##########################################-Facualty-##################################################################3
def add_facualty():
lst2=[]
obj1=fac.Facualty()
lst2.append(obj1)
with open("facualty_record.pkl",'ab') as f:
pickle.dump(lst2,f)
print("Facualty Added successfully")
def remove_facualty():
obj=None
fl=0
lt=[]
while(True):
print("1>Remove by Facualty Name\n2>Remove By Facualty Id:\n3>Exit")
inp=int(input())
if inp==1:
fm=input("Enter the name of Facualty to eliminate: ")
with open("facualty_record.pkl",'rb') as f:
while(True):
try:
obj=pickle.load(f)
for i in obj:
if i.fname==fm:
fl=1
lt.append(i)
except EOFError:
print("File end")
break
if fl==1:
for i in lt:
if i.fname==fm:
lt.remove(i)
with open("facualty_record.pkl",'wb') as f:
pass
with open("facualty_record.pkl",'ab') as f:
pickle.dump(lt,f)
print("facualty is eliminated from record")
break
if inp==2:
ide=input("Enter the Facualty Idt No to eliminate facualty:")
with open("facualty_record.pkl",'rb') as f:
while(True):
try:
obj=pickle.load(f)
for i in obj:
if i.fid==ide:
fl=1
else:
print("facualty not found")
break
lt.append(i)
except EOFError:
print("File end")
break
if fl==1:
for i in lt:
if i.fid==ide:
lt.remove(i)
with open("facualty_record.pkl",'wb') as f:
pass
with open("facualty_record.pkl",'ab') as f:
pickle.dump(lt,f)
break
if inp==3:
break
def view_all_facualty():
print("\n****************************Facualty Records***************************")
with open ("facualty_record.pkl",'rb') as ff:
if os.path.isfile("facualty_record.pkl")==True:
print("Facualty Name\tFacualty Id\tIssued Books")
while(True):
try:
obj=pickle.load(ff)
for i in obj:
print(i.fname,"\t",i.fid,"\t\t",i.issued_book_facualty)
except EOFError:
print("\nAll Facualty Displayed\n")
return
else:
print("No file found")
def view_search_facualty():
fac=0
with open("facualty_record.pkl",'rb') as f:
fkname=input("Enter Name or Facualty Id No to view profile:")
if os.path.isfile("facualty_record.pkl")==True:
while(True):
try:
obj=pickle.load(f)
for i in obj:
if i.fname==fkname or i.fid==fkname:
print("Name\tFacualty Id\t\tIssued Books")
print(i.fname,"\t",i.fid,"\t\t",i.issued_book_facualty)
fac=1
break
if fac==1:
break
except EOFError:
print("File is searched, nothing found")
return
else:
print("There is no such file to search")
return
def update_facualty():
obj=None
while(True):
print("1>Update Facualty Name\n2>Update Facualty Id :\n3>Exit")
inp=int(input())
if inp==1:
with open("facualty_record.pkl",'rb') as f:
flag=0
l=[]
bn=input("Enter facualty Name:")
while(True):
try:
obj=pickle.load(f)
for i in obj:
if bn==i.fname:
flag=1
# i.bname=input("ENTERN NEW NAME")
# l.append(i)
# else:
l.append(i)
# i.bname=input("Enter the new name you want to modify")
# print("Name modified successfully")
# else:
# print("No such book found")
# break
except EOFError:
# print("Updated ucessfully")
break
if(flag==1):
for i in l:
if bn==i.fname:
i.fname=input("Enter New name")
break
with open("facualty_record.pkl",'wb') as f:
pass
with open("facualty_record.pkl",'ab') as f:
pickle.dump(l,f)
print("Updated ucessfully")
elif inp==2:
print("🚫Access denied,you can't change Facualty Id")
elif inp==3:
break
else:
print("Wrong key entered, enter again")
break
``` |
{
"source": "Aaditya-Raj7/PyWisher",
"score": 3
} |
#### File: PyWisher/uploader/models.py
```python
from django.db import models
# Create your models here.
class Person(models.Model):
sender = models.CharField(max_length=100)
receiver = models.CharField(max_length=100)
email = models.EmailField()
bday = models.DateField()
year = models.IntegerField()
def __str__(self):
return self.receiver
``` |
{
"source": "aadityarautela/nand2tetris",
"score": 3
} |
#### File: 06/hackasm/parser.py
```python
import tokenize
import sys
A_COMMAND = 0
C_COMMAND = 1
L_COMMAND = 2
NOT_A_COMMAND = 3
class Parse(object):
A_COMMAND = 0
C_COMMAND = 1
L_COMMAND = 2
NOT_A_COMMAND = 3
def __init__(self,filename):
self.commands = []
self.curr_command = ""
self.linum = 0
self.tokens = []
self.curr_token = 1
with open(filename, 'r') as f:
self.commands = f.readlines()
self.commands = [str.strip(x) for x in self.commands]
self.commands = [x for x in self.commands if x!='']
self.commands = [x for x in self.commands if x[0]!='/']
tmp_commands = []
for cmd in self.commands:
tmp_cmd = ""
for letter in cmd:
if letter == ' ' or letter == '/' or letter == '\t':
break
else:
tmp_cmd+=letter
tmp_commands.append(tmp_cmd)
self.commands = tmp_commands
with open(filename+'.bak', 'w') as f:
f.writelines(["%s\n" %command for command in self.commands])
with open(filename+'.bak', 'r') as f:
self.commands = f.readlines()
with open(filename+'.bak', 'rb') as f:
tokens = tokenize.tokenize(f.readline)
for token in tokens:
self.tokens.append(token)
self.tokens = [x for x in self.tokens if x.type != tokenize.INDENT]
self.curr_command = self.commands[0]
def hasMoreCommands(self):
if self.linum == len(self.commands):
return False
return True
def advance(self):
hasmorecom = self.hasMoreCommands()
if hasmorecom:
self.linum = self.linum + 1
try:
self.curr_command = self.commands[self.linum]
i = self.curr_token
while self.tokens[i].type != tokenize.NEWLINE:
i = i+1
i = i+1 #To move to next token
self.curr_token = i
except:
pass
def commandType(self):
if self.curr_command[0] == "@":
return A_COMMAND
elif self.curr_command[0] == "(":
return L_COMMAND
elif self.curr_command[0] == "/":
return NOT_A_COMMAND
else:
return C_COMMAND
def symbol(self):
com_type = self.commandType()
if com_type == A_COMMAND or com_type == L_COMMAND:
return self.tokens[self.curr_token+1].string
def dest(self):
com_type = self.commandType()
if com_type == C_COMMAND:
if self.tokens[self.curr_token+1].string == '=':
return self.tokens[self.curr_token].string
else:
return "null"
def jump(self):
com_type = self.commandType()
if com_type == C_COMMAND:
semicolon_token = -1
i = self.curr_token
while self.tokens[i].type != tokenize.NEWLINE:
if self.tokens[i].string == ';':
semicolon_token = i
break
else:
i=i+1
if semicolon_token != -1:
return self.tokens[semicolon_token+1].string
else:
return "null"
def comp(self):
com_type = self.commandType()
if com_type == C_COMMAND:
tmp_comp = self.curr_command
tmp_comp = tmp_comp.replace("\n","")
dest = self.dest()
jump = self.jump()
if dest != "null" and jump != "null":
tmp_comp = tmp_comp.replace("=","",1)
tmp_comp = tmp_comp.replace(dest, "",1)
tmp_comp = tmp_comp.replace(jump, "",1)
tmp_comp = tmp_comp.replace(";", "",1)
elif dest != "null" and jump == "null":
tmp_comp = tmp_comp.replace("=","",1)
tmp_comp = tmp_comp.replace(dest, "",1)
elif dest == "null" and jump != "null":
tmp_comp = self.tokens[self.curr_token].string
return tmp_comp
def getSymbol(self):
sym = self.curr_command.replace("(","")
sym = sym.replace(")","")
sym = sym.replace("\n","")
sym = sym.replace("@", "")
return sym
#x = Parse("Add1.asm")
#x.advance()
#x.advance()
#print(x.comp())
```
#### File: VMT/VMT/VMTranslator.py
```python
import CodeWriter, Const, Parser, os, sys
class VMT(object):
def __init__(self, infile, outfile):
self.P = Parser.Parser(infile)
self.C = CodeWriter.CodeWriter(outfile)
def translate(self):
while self.P.hasMoreCommands():
if self.P.commandType() == Const.C_ARITHMETIC:
self.C.writeArithmetic(self.P.curr_command)
elif self.P.commandType() == Const.C_PUSH:
self.C.writePushPop(Const.C_PUSH,self.P.arg1(), int(self.P.arg2()))
elif self.P.commandType() == Const.C_POP:
self.C.writePushPop(Const.C_POP,self.P.arg1(), int(self.P.arg2()))
self.P.advance()
if len(sys.argv) < 2:
print("File not specified!")
sys.exit(-1)
fname = sys.argv[1]
oname = fname.replace(".vm", ".asm")
V = VMT(fname,oname)
V.translate()
```
#### File: nand2tetris/11/VMWriter.py
```python
import os
class VMWriter(object):
def __init__(self,fname):
self.outfile = open(fname, 'w')
def close(self):
self.outfile.close()
def write_cmd(self, cmd, arg1 = "", arg2 = ""):
self.outfile.write(cmd + " " + str(arg1) + " " + str(arg2) + "\n")
def write_push(self,seg,index):
self.write_cmd("push",seg,index)
def write_pop(self,seg,index):
self.write_cmd("pop",seg,index)
def write_arithmetic(self,cmd):
self.write_cmd(cmd)
def write_label(self,label):
self.write_cmd("label",label)
def write_goto(self,label):
self.write_cmd("goto", label)
def write_if(self,label):
self.write_cmd("if-goto", label)
def write_call(self,name,nargs):
self.write_cmd("call",name,nargs)
def write_function(self,name,nlocals):
self.write_cmd("function",name,nlocals)
def write_return(self):
self.write_cmd("return")
#Non Standard i.e. Helper
def push_const(self,val):
self.write_push('constant',val)
def push_arg(self, argnum):
self.write_push('argument', argnum)
def push_this_ptr(self):
self.write_push('pointer', 0)
def pop_this_ptr(self):
self.write_pop('pointer', 0)
def pop_that_ptr(self):
self.write_pop('pointer', 1)
def push_that(self):
self.write_push('that', 0)
def pop_that(self):
self.write_pop('that', 0)
def push_temp(self, temp_num):
self.write_push('temp', temp_num)
def pop_temp(self, temp_num):
self.write_pop('temp', temp_num)
``` |
{
"source": "aadityasinha-dotcom/airbyte",
"score": 2
} |
#### File: source-facebook-marketing/unit_tests/test_source.py
```python
import pydantic
import pytest
from airbyte_cdk.models import ConnectorSpecification
from source_facebook_marketing import SourceFacebookMarketing
@pytest.fixture(name="config")
def config_fixture():
config = {"account_id": 123, "access_token": "TOKEN", "start_date": "2019-10-10T00:00:00"}
return config
@pytest.fixture(name="api")
def api_fixture(mocker):
api_mock = mocker.patch("source_facebook_marketing.source.API")
api_mock.return_value = mocker.Mock(account=123)
return api_mock
@pytest.fixture(name="logger_mock")
def logger_mock_fixture(mocker):
return mocker.patch("source_facebook_marketing.source.logger")
class TestSourceFacebookMarketing:
def test_check_connection_ok(self, api, config, logger_mock):
ok, error_msg = SourceFacebookMarketing().check_connection(logger_mock, config=config)
assert ok
assert not error_msg
api.assert_called_once_with(account_id="123", access_token="TOKEN")
logger_mock.info.assert_called_once_with(f"Select account {api.return_value.account}")
def test_check_connection_end_date_before_start_date(self, api, config, logger_mock):
config["start_date"] = "2019-10-10T00:00:00"
config["end_date"] = "2019-10-09T00:00:00"
with pytest.raises(ValueError, match="end_date must be equal or after start_date."):
SourceFacebookMarketing().check_connection(logger_mock, config=config)
def test_check_connection_invalid_config(self, api, config, logger_mock):
config.pop("start_date")
with pytest.raises(pydantic.ValidationError):
SourceFacebookMarketing().check_connection(logger_mock, config=config)
assert not api.called
def test_check_connection_exception(self, api, config, logger_mock):
api.side_effect = RuntimeError("Something went wrong!")
with pytest.raises(RuntimeError, match="Something went wrong!"):
SourceFacebookMarketing().check_connection(logger_mock, config=config)
def test_streams(self, config, api):
streams = SourceFacebookMarketing().streams(config)
assert len(streams) == 14
def test_spec(self):
spec = SourceFacebookMarketing().spec()
assert isinstance(spec, ConnectorSpecification)
``` |
{
"source": "aadityasinha-dotcom/ploomber",
"score": 2
} |
#### File: ploomber/src/conftest.py
```python
from pathlib import Path
import sys
path_to_src = str(Path(__file__, '..', '..', 'testutils').resolve())
sys.path.insert(0, path_to_src)
from testutils import fixture_tmp_dir, _path_to_tests # noqa: E402
@fixture_tmp_dir(_path_to_tests() / 'assets' / 'doctests', autouse=True)
def doctests():
pass
```
#### File: ploomber/cli/nb.py
```python
import argparse
import json
import shutil
from pathlib import Path
import stat
import click
from ploomber.cli.parsers import CustomParser
from ploomber.cli.io import command_endpoint
from ploomber.telemetry import telemetry
from ploomber.sources.notebooksource import recursive_update
from ploomber.exceptions import BaseException
def _call_in_source(dag, method_name, message, kwargs=None):
"""
Execute method on each task.source in dag, passing kwargs
"""
kwargs = kwargs or {}
files = []
results = []
for task in dag.values():
try:
method = getattr(task.source, method_name)
except AttributeError:
pass
else:
results.append(method(**kwargs))
files.append(str(task.source._path))
files_ = '\n'.join((f' {f}' for f in files))
click.echo(f'{message}:\n{files_}')
return results
def _install_hook(path_to_hook, content, entry_point):
"""
Install a git hook script at the given path
"""
if path_to_hook.exists():
raise RuntimeError(
'hook already exists '
f'at {path_to_hook}. Run: "ploomber nb -u" to uninstall the '
'existing hook and try again')
path_to_hook.write_text(content.format(entry_point=entry_point))
# make the file executable
path_to_hook.chmod(path_to_hook.stat().st_mode | stat.S_IEXEC)
def _delete_hook(path):
"""Delete a git hook at the given path
"""
if path.exists():
if path.is_file():
path.unlink()
else:
# in the remote case that it's a directory
shutil.rmtree(path)
click.echo(f'Deleted hook located at {path}')
pre_commit_hook = """
# !/usr/bin/env bash
# Automatically generated pre-commit hook to remove the injected cell in
# scripts and notebook tasks
# remove injected cells
ploomber nb --entry-point {entry_point} --remove
# re-add files
git add $(git diff --name-only --cached)
"""
post_commit_hook = """
# !/usr/bin/env bash
# Automatically generated post-commit hook to add the injected cell in
# scripts and notebook tasks
# inject cells
ploomber nb --entry-point {entry_point} --inject
"""
# taken from https://github.com/mwouts/jupytext/blob/main/README.md#install
_jupyterlab_default_settings_overrides = """
{
"@jupyterlab/docmanager-extension:plugin": {
"defaultViewers": {
"markdown": "Jupytext Notebook",
"myst": "Jupytext Notebook",
"r-markdown": "Jupytext Notebook",
"quarto": "Jupytext Notebook",
"julia": "Jupytext Notebook",
"python": "Jupytext Notebook",
"r": "Jupytext Notebook"
}
}
}
"""
def _py_with_single_click_enable():
"""
Writes ~/.jupyterlab/labconfig/default_setting_overrides.json to enable
opening .py files as notebooks with a single click. If the secion already
exists, it overrides its value
"""
parent = Path('~/.jupyter', 'labconfig').expanduser()
path = parent / 'default_setting_overrides.json'
if path.exists():
target = json.loads(path.read_text())
else:
target = {}
recursive_update(target,
json.loads(_jupyterlab_default_settings_overrides))
click.echo(f'Overriding JupyterLab defaults at: {str(path)}')
parent.mkdir(exist_ok=True, parents=True)
path.write_text(json.dumps(target))
click.secho(
'Done. You can now open .py and other formats in JupyterLab '
'with a single click. You may need to reload JupyterLab',
fg='green')
def _py_with_single_click_disable():
"""
Opens ~/.jupyterlab/labconfig/default_setting_overrides.json and deletes
the value in
['@jupyterlab/docmanager-extension:plugin'][''defaultViewers'], if any
"""
parent = Path('~/.jupyter', 'labconfig')
target = (parent / 'default_setting_overrides.json').expanduser()
if target.exists():
content = json.loads(target.read_text())
key1 = '@jupyterlab/docmanager-extension:plugin'
key2 = 'defaultViewers'
if content.get(key1, {}).get(key2):
del content[key1][key2]
if key1 in content and not content.get(key1):
del content[key1]
Path(target).write_text(json.dumps(content))
click.secho(
'Done. Disabled opening .py files and other formats in JupyterLab '
'with a single click. You may need to reload JupyterLab',
fg='yellow')
_description = """Manage scripts and notebooks
Inject cell in all scripts and notebooks:
$ ploomber nb -i
Enable one-click to open .py as notebooks in JupyterLab:
$ ploomber nb -S
Re-format .ipynb notebooks as .py files with the percent format:
$ ploomber nb -f py:percent
Re-format .py files as .ipynb notebooks:
$ ploomber nb -f ipynb
"""
# TODO: --log, --log-file should not appear as options
@command_endpoint
@telemetry.log_call('nb')
def main():
parser = CustomParser(
description=_description,
prog='ploomber nb',
# required for the --help text to keep line breaks
formatter_class=argparse.RawTextHelpFormatter)
with parser:
# The next options do not require a valid entry point
# opening .py files as notebooks in JupyterLab with a single click
single_click = parser.add_mutually_exclusive_group()
single_click.add_argument(
'--single-click',
'-S',
action='store_true',
help=('Override JupyterLab defaults to open '
'scripts as notebook with a single click'))
single_click.add_argument(
'--single-click-disable',
'-d',
action='store_true',
help=('Disables opening scripts as notebook with a single '
'click in JupyterLab'))
# install/uninstall hook
hook = parser.add_mutually_exclusive_group()
hook.add_argument('--install-hook',
'-I',
action='store_true',
help='Install git pre-commit hook')
hook.add_argument('--uninstall-hook',
'-u',
action='store_true',
help='Uninstall git pre-commit hook')
# The next options require a valid entry point
# inject/remove cell
cell = parser.add_mutually_exclusive_group()
cell.add_argument('--inject',
'-i',
action='store_true',
help='Inject cell to all script/notebook tasks')
cell.add_argument(
'--remove',
'-r',
action='store_true',
help='Remove injected cell in all script/notebook tasks')
# re-format
parser.add_argument('--format',
'-f',
help='Re-format script/notebook tasks '
'(values: "py:percent" and "ipynb")')
# pair scripts and nbs
parser.add_argument('--pair',
'-p',
help='Pair scripts with ipynb files')
# sync scripts and nbs
parser.add_argument('--sync',
'-s',
action='store_true',
help='Sync scripts with ipynb files')
loading_error = None
# commands that need an entry point to work
needs_entry_point = {'format', 'inject', 'remove', 'sync', 'pair'}
args_ = parser.parse_args()
if any(getattr(args_, arg) for arg in needs_entry_point):
try:
dag, args = parser.load_from_entry_point_arg()
except Exception as e:
loading_error = e
else:
dag.render(show_progress=False)
if loading_error:
raise BaseException('Could not run nb command: the DAG '
'failed to load') from loading_error
else:
dag = None
args = args_
# options that do not need a DAG
if args.single_click:
_py_with_single_click_enable()
if args.single_click_disable:
_py_with_single_click_disable()
if args.install_hook:
if not Path('.git').is_dir():
raise NotADirectoryError(
'Expected a .git/ directory in the current working '
'directory. Run this from the repository root directory.')
parent = Path('.git', 'hooks')
parent.mkdir(exist_ok=True)
# pre-commit: remove injected cells
_install_hook(parent / 'pre-commit', pre_commit_hook, args.entry_point)
click.echo('Successfully installed pre-commit git hook')
# post-commit: inject cells
_install_hook(parent / 'post-commit', post_commit_hook,
args.entry_point)
click.echo('Successfully installed post-commit git hook')
if args.uninstall_hook:
_delete_hook(Path('.git', 'hooks', 'pre-commit'))
_delete_hook(Path('.git', 'hooks', 'post-commit'))
# options that need a valid DAG
if args.format:
new_paths = [
str(p) for p in _call_in_source(
dag,
'format',
'Formatted notebooks',
dict(fmt=args.format),
) if p is not None
]
if len(new_paths):
click.echo('Extension changed for the following '
f'tasks: {", ".join(new_paths)}. Update your '
'pipeline declaration.')
if args.inject:
_call_in_source(
dag,
'save_injected_cell',
'Injected cell',
dict(),
)
click.secho(
'Finished cell injection. Re-run this command if your '
'pipeline.yaml changes.',
fg='green')
if args.remove:
_call_in_source(
dag,
'remove_injected_cell',
'Removed injected cell',
dict(),
)
if args.sync:
# maybe its more efficient to pass all notebook paths at once?
_call_in_source(dag, 'sync', 'Synced notebooks')
# can pair give trouble if we're reformatting?
if args.pair:
_call_in_source(
dag,
'pair',
'Paired notebooks',
dict(base_path=args.pair),
)
click.echo(f'Finished pairing notebooks. Tip: add {args.pair!r} to '
'your .gitignore to keep your repository clean')
```
#### File: src/ploomber/config.py
```python
import warnings
import abc
from collections.abc import Mapping
import yaml
class Config(abc.ABC):
"""An abstract class to create configuration files (stored as YAML)
Notes
-----
For examples, see test_config.py or the concrete classes
(UserSettings, Internal)
"""
def __init__(self):
self._init_values()
# resolve home directory
path = self.path()
if not path.exists():
defaults = self._get_data()
path.write_text(yaml.dump(defaults))
self._set_data(defaults)
else:
try:
content = self._load_from_file()
loaded = True
except Exception as e:
warnings.warn(f'Error loading {str(path)!r}: {e}\n\n'
'reverting to default values')
loaded = False
content = self._get_data()
if loaded and not isinstance(content, Mapping):
warnings.warn(
f'Error loading {str(path)!r}. Expected a dictionary '
f'but got {type(content).__name__}, '
'reverting to default values')
content = self._get_data()
self._set_data(content)
def _load_from_file(self):
path = self.path()
text = path.read_text()
content = yaml.safe_load(text)
for key, type_ in self.__annotations__.items():
value = content.get(key, None)
if value is not None and not isinstance(value, type_):
default = getattr(self, key)
warnings.warn(f'Corrupted config file {str(path)!r}: '
f'expected {key!r} to contain an object '
f'with type {type_.__name__}, but got '
f'{type(value).__name__}. Reverting to '
f'default value {default}')
content[key] = default
return content
def _get_data(self):
"""Extract values from the annotations and return a dictionary
"""
return {key: getattr(self, key) for key in self.__annotations__}
def _set_data(self, data):
"""Take a dictionary and store it in the annotations
"""
for key in self.__annotations__:
if key in data:
setattr(self, key, data[key])
def _init_values(self):
"""
Iterate over annotations to initialize values. This is only relevant
when any of the annotations has a factory method to initialize the
values. If they value is a literal, no changes happen.
"""
for key in self.__annotations__:
name = f'{key}_default'
# if there is a method with such name, call it and store the output
if hasattr(self, name):
value = getattr(self, name)()
# call __setattr__ on the superclass so we skip the part
# where we overwrite the YAML file, here we only want to
# set the default values
super().__setattr__(key, value)
def _write(self):
"""Writes data to the YAML file
"""
data = self._get_data()
self.path().write_text(yaml.dump(data))
def __setattr__(self, name, value):
if name not in self.__annotations__:
raise ValueError(f'{name} not a valid field')
else:
super().__setattr__(name, value)
self._write()
@abc.abstractclassmethod
def path(cls):
"""Returns the path to the YAML file
"""
pass
```
#### File: src/ploomber/exceptions.py
```python
import typing as t
from click.exceptions import ClickException
from click._compat import get_text_stderr
from click.utils import echo
from gettext import gettext as _
def _format_message(exception):
if hasattr(exception, 'format_message'):
return exception.format_message()
else:
return str(exception)
def _build_message(exception):
msg = _format_message(exception)
while exception.__cause__:
msg += f'\n{_format_message(exception.__cause__)}'
exception = exception.__cause__
return msg
class BaseException(ClickException):
"""
A subclass of ClickException that adds support for printing error messages
from chained exceptions
"""
def __init__(self, message, type_=None):
super().__init__(message)
self.type_ = type_
def get_message(self):
return f'Error: {_build_message(self)}'
def show(self, file: t.Optional[t.IO] = None) -> None:
if file is None:
file = get_text_stderr()
echo(_(self.get_message()), file=file)
class DAGRenderError(Exception):
"""Raise when a dag fails to render
Notes
-----
This is a special exception that should only be raised under specific
circumstances in the DAG implementation. Review carefully since this
exceptions signals special output formatting in the CLI (via the
@cli_endpoint decorator)
"""
def __init__(self, message):
message = message + '\nNeed help? https://ploomber.io/community'
super().__init__(message)
class DAGBuildError(Exception):
"""Raise when a dag fails to build
Notes
-----
This is a special exception that should only be raised under specific
circumstances in the DAG implementation. Review carefully since this
exceptions signals special output formatting in the CLI (via the
@cli_endpoint decorator)
"""
def __init__(self, message):
message = message + '\nNeed help? https://ploomber.io/community'
super().__init__(message)
class DAGWithDuplicatedProducts(BaseException):
"""Raised when more than one task has the same product
"""
pass
class DAGBuildEarlyStop(Exception):
"""
This is raised on purpose to signal that the DAG should not continue
executing but is not considered a build error
"""
pass
class TaskInitializationError(BaseException):
"""Raised when a task fails to initialize
"""
pass
class TaskBuildError(Exception):
"""Raise when a task fails to build
"""
pass
class TaskRenderError(Exception):
"""Raise when a task fails to render
"""
pass
class RenderError(Exception):
"""Raise when a template fails to render
"""
pass
class SourceInitializationError(BaseException):
"""Raise when a source fails to initialize due to wrong parameters
"""
pass
class MissingParametersCellError(SourceInitializationError):
"""Raise when a script or notebook is missing the parameters cell
"""
pass
class CallbackSignatureError(Exception):
"""When a callback function does not have the right signature
"""
pass
class CallbackCheckAborted(Exception):
"""
Used by callback_check to signal that signature check is unfeasible because
the user passed a DottedPath whose underlying function hasn't been imported
"""
pass
class UpstreamKeyError(Exception):
"""
Raised when trying to get an upstream dependency that does not exist,
we have to implement a custom exception otherwise jinja is going
to ignore our error messages (if we raise the usual KeyError).
See: https://jinja.palletsprojects.com/en/2.11.x/templates/#variables
"""
pass
class DAGSpecInitializationError(BaseException):
"""
Raised when failing to initialize a DAGSpec object
"""
pass
class DAGSpecInvalidError(Exception):
"""
Raised when trying to find dagspec automatically but the file doesn't exist
or there is an invalid configuration
"""
pass
class DAGCycle(Exception):
"""
Raised when a DAG is defined with cycles.
"""
def __init__(self):
error_message = """
Failed to process DAG because it contains cycles.
"""
super().__init__(error_message)
class SpecValidationError(Exception):
"""
Raised when failing to validate a spec
"""
def __init__(self, errors, model, kwargs):
self.errors = errors
self.model = model
self.kwargs = kwargs
def __str__(self):
n_errors = len(self.errors)
msg = (f'{n_errors} error{"" if n_errors == 1 else "s"} found '
f'when validating {self.model.__name__} with values '
f'{self.kwargs}\n\n'
f'{display_errors(self.errors)}')
return msg
class SQLTaskBuildError(TaskBuildError):
"""
Raised by SQLScript and SQLDump when the .execute method fails
"""
def __init__(self, type_, source_code, original):
self.type_ = type_
self.source_code = source_code
self.original = original
error_message = ('An error occurred when executing '
f'{type_.__name__} task with '
f'source code:\n\n{source_code!r}\n')
super().__init__(error_message)
class RemoteFileNotFound(Exception):
"""
Raised by File clients when atempting to download a file that doesn't exist
"""
pass
class MissingClientError(Exception):
"""
Raised when failing to get a valid task-level or dag-level client
for a Task or Product
"""
pass
class ValidationError(BaseException):
"""Raised when failed to validate input data
"""
pass
class NetworkException(BaseException):
"""Raised when failin to call remote APIs
"""
pass
def display_errors(errors):
return '\n'.join(f'{_display_error_loc(e)} ({e["msg"]})' for e in errors)
def _display_error_loc(error):
return ' -> '.join(str(e) for e in error['loc'])
```
#### File: ploomber/executors/_format.py
```python
import traceback
from ploomber.exceptions import TaskBuildError, RenderError, TaskRenderError
from papermill.exceptions import PapermillExecutionError
def exception(exc):
"""Formats an exception into a more concise traceback
Parameters
----------
"""
exceptions = [exc]
while exc.__cause__:
exceptions.append(exc.__cause__)
exc = exc.__cause__
exceptions.reverse()
breakpoint = None
for i, exc in enumerate(exceptions):
if isinstance(exc, (TaskBuildError)):
breakpoint = i
break
exc_hide = exceptions[breakpoint:]
if breakpoint is not None:
tr = _format_exception(exceptions[breakpoint - 1])
tr = tr + '\n' + '\n'.join(f'{_get_exc_name(exc)}: {str(exc)}'
for exc in exc_hide)
else:
# if not breakpoint, take the outermost exception and show it.
# this ensures we show the full traceback in case there are chained
# exceptions
tr = _format_exception(exceptions[-1])
return tr
def _get_exc_name(exc):
return f'{exc.__module__}.{type(exc).__name__}'
def _format_exception(exc):
if isinstance(exc,
(PapermillExecutionError, RenderError, TaskRenderError)):
tr = str(exc)
else:
tr = ''.join(
traceback.format_exception(type(exc),
exc,
exc.__traceback__,
limit=None))
return tr
```
#### File: src/ploomber/repo.py
```python
import json
import subprocess
import shlex
import sys
from pathlib import Path
import shutil
def _run_command(path, command):
"""Safely run command in certain path
"""
if not Path(path).is_dir():
raise ValueError('{} is not a directory'.format(path))
out = subprocess.check_output(shlex.split(command), cwd=str(path))
s = out.decode('utf-8')
# remove trailing \n
if s[-1:] == '\n':
s = s[:-1]
return s
def is_repo(path):
"""Check if the path is in a git repo"""
if path is None:
return False
if not shutil.which('git'):
return False
out = subprocess.run(['git', '-C', str(path), 'rev-parse'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
repo_exists = out.returncode == 0
if repo_exists:
try:
# edge case: if the repo doesn't have any commits, the following
# will fail. we require a repo with at least one commit for git
# to work
git_hash(path)
except subprocess.CalledProcessError:
return False
else:
return True
def get_git_summary(path):
"""Get one line git summary: {hash} {commit-message}
"""
return _run_command(path, 'git show --oneline -s')
def git_hash(path):
"""Get git hash
If tag: {tag-name}
If clean commit: {hash}
If dirty: {hash}-dirty
dirty: "A working tree is said to be "dirty" if it contains modifications
which have not been committed to the current branch."
https://mirrors.edge.kernel.org/pub/software/scm/git/docs/gitglossary.html#def_dirty
"""
return _run_command(path, 'git describe --tags --always --dirty=-dirty')
def git_location(path):
"""
Returns branch name if at the latest commit, otherwise the hash
"""
hash_ = git_hash(path)
git_branch = current_branch(path)
return git_branch or hash_
def get_git_timestamp(path):
"""Timestamp for last commit
"""
return int(_run_command(path, 'git log -1 --format=%ct'))
def current_branch(path):
# seems like the most reliable way is to do:
# git branch --show-current, but that was added in a recent git
# version 2.22, for older versions, the one below works
try:
return _run_command(path, 'git symbolic-ref --short HEAD')
except subprocess.CalledProcessError:
# if detached head, the command above does not work, since there is
# no current branch
return None
def get_version(package_name):
"""Get package version
"""
installation_path = sys.modules[package_name].__file__
NON_EDITABLE = True if 'site-packages/' in installation_path else False
if NON_EDITABLE:
return getattr(package_name, '__version__')
else:
parent = str(Path(installation_path).parent)
return get_git_summary(parent)
def get_diff(path):
return _run_command(path, "git diff -- . ':(exclude)*.ipynb'")
def get_git_info(path):
return dict(git_summary=get_git_summary(path),
git_hash=git_hash(path),
git_diff=get_diff(path),
git_timestamp=get_git_timestamp(path),
git_branch=current_branch(path),
git_location=git_location(path))
def save_env_metadata(env, path_to_output):
summary = get_git_summary(env.path.home)
hash_ = git_hash(env.path.home)
diff = get_diff(env.path.home)
metadata = dict(summary=summary, hash=hash_)
path_to_patch_file = Path(path_to_output).with_suffix('.patch')
with open(path_to_output, 'w') as f:
json.dump(metadata, f)
with open(path_to_patch_file, 'w') as f:
f.writelines(diff)
```
#### File: src/ploomber/table.py
```python
from functools import reduce
from warnings import warn
from textwrap import TextWrapper
from copy import deepcopy
import shutil
from collections.abc import Mapping, Iterable
from tabulate import tabulate
from ploomber.util.util import isiterable_not_str
_BETWEEN_COLUMN_WIDTH = 2
class Row:
"""A class to represent a dictionary as a table row
Parameters
----------
mapping
Maps column names to a single value
Examples
--------
>>> from ploomber.table import Row
>>> row = Row({'a': 'some value', 'b': 'another value'})
"""
def __init__(self, mapping):
if not isinstance(mapping, Mapping):
raise TypeError('Rows must be initialized with mappings')
self._set_mapping(mapping)
def __str__(self):
return self._str
def __repr__(self):
return str(self)
def _repr_html_(self):
return self._html
def __getitem__(self, key):
if isiterable_not_str(key):
return Row({k: self._mapping[k] for k in key})
else:
return self._mapping[key]
def __setitem__(self, key, value):
self._mapping[key] = value
def __eq__(self, other):
return self._mapping == other
@property
def columns(self):
return tuple(self._mapping.keys())
def _set_mapping(self, mapping):
self._mapping = mapping
self._str = tabulate([self._mapping],
headers='keys',
tablefmt='simple')
self._html = tabulate([self._mapping], headers='keys', tablefmt='html')
class Table:
"""A collection of rows
Parameters
----------
rows
List of Row objects
column_width
Maximum column width, if None, no trimming happens, otherwise values
are converted to string and trimmed. If 'auto' width is determined
based on terminal sized (using ``shutil.get_terminal_size()``)
"""
# Columns to exclude from wrapping
EXCLUDE_WRAP = None
def __init__(self, rows, column_width='auto'):
self.column_width = column_width
if isinstance(rows, list):
self._values = rows2columns(rows)
else:
self._values = rows
self._values = self.data_preprocessing(self._values)
self._str = None
self._html = None
def __str__(self):
if self._str is None or self.column_width == 'auto':
values = wrap_table_dict(self._values, self.column_width,
self.EXCLUDE_WRAP)
self._str = tabulate(values, headers='keys', tablefmt='simple')
return self._str
def __repr__(self):
return str(self)
def _repr_html_(self):
if self._html is None or self.column_width == 'auto':
values = wrap_table_dict(self._values, self.column_width,
self.EXCLUDE_WRAP)
self._html = tabulate(values, headers='keys', tablefmt='html')
return self._html
def __getitem__(self, key):
if isiterable_not_str(key):
return Table({k: v
for k, v in self.values.items() if k in key},
column_width=self.column_width)
else:
return self.values[key]
def __iter__(self):
for col in self.values:
yield col
def __len__(self):
return len(self._values.keys())
def __eq__(self, other):
return self.values == other
def data_preprocessing(self, values):
return values
def to_format(self, fmt):
values = wrap_table_dict(self.values, self.column_width,
self.EXCLUDE_WRAP)
return tabulate(values, headers='keys', tablefmt=fmt)
def to_pandas(self):
import pandas as pd
return pd.DataFrame(self.values)
def to_dict(self):
return deepcopy(self.values)
@property
def values(self):
return self._values
@classmethod
def from_dicts(cls, dicts, complete_keys=False):
if complete_keys:
keys_all = reduce(lambda a, b: set(a) | set(b), dicts)
default = {k: '' for k in keys_all}
return cls([Row({**default, **d}) for d in dicts])
else:
return cls([Row(d) for d in dicts])
class TaskReport(Row):
EXCLUDE_WRAP = ['Ran?', 'Elapsed (s)']
@classmethod
def with_data(cls, name, ran, elapsed):
return cls({'name': name, 'Ran?': ran, 'Elapsed (s)': elapsed})
@classmethod
def empty_with_name(cls, name):
return cls.with_data(name, False, 0)
class BuildReport(Table):
"""A Table that adds a columns for checking task elapsed time
"""
EXCLUDE_WRAP = ['Ran?', 'Elapsed (s)', 'Percentage']
def data_preprocessing(self, values):
"""Create a build report from several tasks
"""
# in case the pipeline has no tasks...
elapsed = values.get('Elapsed (s)', [])
total = sum(elapsed)
def compute_pct(elapsed, total):
if not elapsed:
return 0
else:
return 100 * elapsed / total
values['Percentage'] = [compute_pct(r, total) for r in elapsed]
return values
def rows2columns(rows):
"""Convert [{key: value}, {key: value2}] to [{key: [value, value2]}]
"""
if not len(rows):
return {}
cols_combinations = set(tuple(sorted(row.columns)) for row in rows)
if len(cols_combinations) > 1:
raise KeyError('All rows should have the same columns, got: '
'{}'.format(cols_combinations))
columns = rows[0].columns
return {col: [row[col] for row in rows] for col in columns}
def wrap_table_dict(table_dict, column_width, exclude):
"""Wraps a columns to take at most column_width characters
Parameters
----------
column_width : int, 'auto' or None
Width per column. Splits evenly if 'auto', does not wrap if None
exclude : list
Exclude columns from wrapping (show them in a single line)
"""
exclude = exclude or []
if column_width is None:
return table_dict
if column_width == 'auto':
column_width = calculate_wrapping(
table_dict,
do_not_wrap=exclude,
width_total=shutil.get_terminal_size().columns)
# NOTE: the output of this algorithm may return a table that does not use
# between 0 and {column - 1} characters. We could always take all the
# space available if we refactor and do not keep column_width fixed for
# all columns
wrapper = TextWrapper(width=column_width,
break_long_words=True,
break_on_hyphens=True)
return apply_wrapping(table_dict, wrapper, exclude=exclude)
def separator_width(header_length, max_value_length):
"""
Calculates the width of the '---' line that separates header from content
"""
n_value_extra = header_length - max_value_length
if n_value_extra >= -2:
return header_length + 2
else:
return max_value_length
def width_required_for_column(header, values):
"""
Spaced needed to display column in a single line, accounts for the two
extra characters that the tabulate package adds to the header when the
content is too short
"""
values_max = -1 if not values else max(len(str(v)) for v in values)
return max(values_max, separator_width(len(header), values_max))
def calculate_wrapping(table_dict, do_not_wrap, width_total):
"""
Determines the column width by keeping some columns unwrapped (show all
rows, including the header in a single line) and distributing the
remaining space evenly. Accounts for the betwee-column spacing.
"""
# space required to display a given column on a single column
width_required = {
header: width_required_for_column(header, values)
for header, values in table_dict.items()
}
# TODO: pass set(table_dict) instead of table_dict
column_width = _calculate_wrapping(table_dict, do_not_wrap, width_total,
width_required)
return column_width
def _calculate_wrapping(table_dict, do_not_wrap, width_total, width_required):
width_offset = 0
# how much space we are already taking by not wrapping columns in
# do_not_wrap
for col_name in do_not_wrap:
# edge case: key might not exist if the dag is empty col_name
# is added during Tabble.data_preprocessing (e.g.,) the "Ran?"" column
# in a build report
if col_name in width_required:
width_offset += width_required[col_name]
# how much we have left - takes into account the bweteen-column spacing
# of two characters
width_remaining = (width_total - width_offset -
len(do_not_wrap) * _BETWEEN_COLUMN_WIDTH)
cols_to_wrap = set(table_dict) - set(do_not_wrap)
# there should be at least one column to wrap to continue
if not cols_to_wrap:
return width_total
# split remaining space evenly in the rest of the cols
column_width = equal_column_width(n_cols=len(cols_to_wrap),
width_total=width_remaining)
# check if we have short columns. this means we can give more space
# to the others
short = {
col
for col in cols_to_wrap if width_required[col] <= column_width
}
# if there is a (strict) subset columns that can display in a single line
# with less than column_width, call again. When short has all columns
# already, just return whatever number we have, there are no more columns
# to distribute space
if short and short < set(table_dict):
return _calculate_wrapping(table_dict,
do_not_wrap=do_not_wrap + list(short),
width_total=width_total,
width_required=width_required)
else:
return column_width
def equal_column_width(n_cols, width_total):
"""
Max column width if splitting width_total equally among n_cols. Note
that before computing column width, a quantity is substracted to account
for required for spacing between columns
"""
if not n_cols:
raise ValueError('n_cols must be >0')
offset = (n_cols - 1) * _BETWEEN_COLUMN_WIDTH
width_remaining = width_total - offset
width_column = int(width_remaining / n_cols)
# degenerate case: not even a single space to display. Return width of
# 1 but show a warning, since the table will be illegible
if width_column < 1:
warn(f'Not enough space to display {n_cols} columns with '
f'a width of {width_total}. Using a column width of 1')
return 1
return width_column
def apply_wrapping(table_dict, wrapper, exclude=None):
"""
Wrap text using a wrapper, excluding columns in exclude
"""
exclude = exclude or []
return dict(
apply_wrapping_to_column(header, values, exclude, wrapper)
for header, values in table_dict.items())
def apply_wrapping_to_column(header, values, exclude, wrapper):
# TODO: test this
if header in exclude:
return header, values
# wrapping values is simple, apply the wrapper directly
values_wrapped = wrap_elementwise(values, wrapper)
# wrapping the header has a detail: if there isn't enough space
# for the 2 character offset that tabulate adds to the header, the
# column will end up taking more space than expected, we don't want that.
# wrap the header a bit more if necessary. Clip to 0 since this can be
# negative with large headers
offset = max(wrapper.width - len(header), 0)
if offset >= 2:
header_wrapped = wrap_elementwise(header, wrapper)
else:
_wrapper = TextWrapper(width=wrapper.width - (2 - offset),
break_long_words=True,
break_on_hyphens=True)
header_wrapped = wrap_elementwise(header, _wrapper)
return header_wrapped, values_wrapped
def wrap_elementwise(value, wrapper):
"""Apply wrap if str (elementwise if iterable of str)
"""
if isinstance(value, Iterable) and not isinstance(value, str):
return [wrapper.fill(str(v)) for v in value]
else:
return wrapper.fill(str(value))
```
#### File: ploomber/util/_sys.py
```python
import sys
def _python_bin():
"""
Get the path to the Python executable, return 'python' if unable to get it
"""
executable = sys.executable
return executable if executable else 'python'
```
#### File: ploomber/validators/validators.py
```python
import warnings
from functools import partial, wraps
from collections.abc import Mapping
class Assert:
"""
An object to collect assertions and print results after they've all been
evaluated
Examples
--------
>>> from ploomber.validators import Assert
>>> assert_ = Assert()
>>> assert_(True, 'This wont be displayed')
>>> assert_(False, 'This will be displayed')
>>> assert_(False, 'This will also be displayed')
>>> assert_.check() # doctest: +SKIP
"""
def __init__(self):
self.messages_error = []
self.messages_warning = []
def __call__(self, expression, error_message):
if not expression:
self.messages_error.append(error_message)
def warn(self, expression, warning_message):
if not expression:
warnings.warn(warning_message)
self.messages_warning.append(warning_message)
def __len__(self):
return len(self.messages_error)
def __iter__(self):
for msg in self.messages_error:
yield msg
def __repr__(self):
return 'Assert oject with {} error messages'.format(len(self))
def __str__(self):
if not self.messages_error:
str_ = 'No errors found'
elif len(self.messages_error) == 1:
str_ = '1 error found:\n{}'.format(self.messages_error[0])
else:
str_ = ('{} errors found:\n * {}'.format(
len(self.messages_error), '\n * '.join(self.messages_error)))
if len(self.messages_warning) == 1:
str_ += '\n\n1 warning: {}'.format(self.messages_warning[0])
elif len(self.messages_warning) > 1:
str_ += ('\n\n{} warnings:\n * {}'.format(
len(self.messages_warning),
'\n * '.join(self.messages_warning)))
return str_
def check(self):
"""
Raise AsserionError with all error messages if there is at least one
error
"""
if len(self):
raise AssertionError(str(self))
def validator(fn):
# TODO: verify fn signature
@wraps(fn)
def wrapped(**kwargs):
if 'assert_' in kwargs:
raise TypeError('Do not include the assert_ parameter in '
'validator functions')
if 'data' in kwargs:
raise TypeError('Do not include the data parameter in '
'validator functions')
return partial(fn, **kwargs)
return wrapped
@validator
def validate_schema(assert_,
data,
schema,
optional=None,
on_unexpected_cols='raise'):
"""Check if a data frame complies with a schema
Parameters
----------
data : pandas.DataFrame
Data frame to test
schema : list or dict
List with column names (will only validate names)
or dict with column names as keys, dtypes as values (will validate
names and dtypes)
optional : list, optional
List of optional column names, no warns nor errors if they appear
on_unexpected_cols : str, optional
One of 'warn', 'raise' or None. If 'warn', it will warn on extra
columns, if 'raise' it will raise an error, if None it will completely
ignore extra columns
"""
if on_unexpected_cols not in {'warn', 'raise', None}:
raise ValueError("'on_unexpected_cols' must be one of 'warn', 'raise' "
"or None")
optional = optional or {}
cols = set(data.columns)
expected = set(schema)
missing = expected - cols
unexpected = cols - expected - set(optional)
msg = '(validate_schema) Missing columns {missing}.'.format(
missing=missing)
assert_(not missing, msg)
if on_unexpected_cols is not None:
msg = ('(validate_schema) Unexpected columns {unexpected}'.format(
unexpected=unexpected))
caller = assert_ if on_unexpected_cols == 'raise' else assert_.warn
caller(not unexpected, msg)
# if passing a mapping, schema is validated (even for optional columns)
for schema_to_validate in [schema, optional]:
if isinstance(schema_to_validate, Mapping):
# validate column types (as many as you can)
dtypes = data.dtypes.astype(str).to_dict()
for name, dtype in dtypes.items():
expected = schema_to_validate.get(name)
if expected is not None:
msg = (
'(validate_schema) Wrong dtype for column "{name}". '
'Expected: "{expected}". Got: "{dtype}"'.format(
name=name, expected=expected, dtype=dtype))
assert_(dtype == expected, msg)
return assert_
@validator
def validate_values(assert_, data, values):
data_cols = data.columns
for column, (kind, params) in values.items():
if column not in data_cols:
assert_.warn(False,
('(validate_values) Declared spec for column "{}" but'
' it does not appear in the data').format(column))
elif kind == 'unique':
expected = set(params)
unique = set(data[column].unique())
msg = ('(validate_values) Expected unique values of "{}" to be a'
' subset of {}, got: {}'.format(column, expected, unique))
assert_(expected >= unique, msg)
elif kind == 'range':
if len(params) != 2:
raise ValueError('If kind is range, you must provide two '
'values, got {}'.format(params))
min_expected, max_expected = params
min_ = data[column].min()
max_ = data[column].max()
msg = ('(validate_values) Expected range of "{}" to be [{}, {}], '
'got [{}, {}]'.format(column, min_expected, max_expected,
min_, max_))
assert_(min_expected <= min_ and max_ <= max_expected, msg)
else:
raise ValueError('Got invalid kind, must be "unique" or "range"')
def data_frame_validator(df, validators):
"""
Examples
--------
>>> from ploomber.validators import data_frame_validator
>>> from ploomber.validators import validate_schema, validate_values
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame({'x': np.random.rand(3), 'y': np.random.rand(3),
... 'z': [0, 1, 2], 'i': ['a', 'b', 'c']})
>>> data_frame_validator(df,
... [validate_schema(schema={'x': 'int', 'z': 'int'}),
... validate_values(values={'z': ('range', (0, 1)),
... 'i': ('unique', {'a'}),
... 'j': ('unique', {'b'})}
... )]) # doctest: +SKIP
"""
assert_ = Assert()
for validator in validators:
validator(assert_=assert_, data=df)
if len(assert_):
raise AssertionError('Data frame validation failed. ' + str(assert_))
return True
```
#### File: tests/cli/test_cloud.py
```python
import uuid
from unittest.mock import Mock
from pathlib import Path
import pytest
import yaml
from click.testing import CliRunner
from ploomber.cli import cloud, examples
from ploomber_cli.cli import get_key, set_key, write_pipeline, get_pipelines,\
delete_pipeline
from ploomber.telemetry import telemetry
from ploomber.telemetry.telemetry import DEFAULT_USER_CONF
from ploomber import table
@pytest.fixture()
def write_sample_conf(tmp_directory, monkeypatch):
monkeypatch.setattr(telemetry, 'DEFAULT_HOME_DIR', '.')
stats = Path('stats')
stats.mkdir()
full_path = (stats / DEFAULT_USER_CONF)
full_path.write_text("stats_enabled: False")
def write_sample_pipeline(pipeline_id=None, status=None):
runner = CliRunner()
result = runner.invoke(write_pipeline,
args=[pipeline_id, status],
catch_exceptions=False)
return result.stdout
def delete_sample_pipeline(pipeline_id=None):
runner = CliRunner()
res = runner.invoke(delete_pipeline, args=[pipeline_id])
return res.stdout
def get_tabular_pipeline(pipeline_id=None, verbose=None):
runner = CliRunner()
if pipeline_id:
args = [pipeline_id]
else:
args = []
if verbose:
args.append(verbose)
res = runner.invoke(get_pipelines, args=args, catch_exceptions=False)
return res.stdout
def test_write_api_key(write_sample_conf):
key_val = "TEST_KEY12345678987654"
key_name = "cloud_key"
full_path = (Path('stats') / DEFAULT_USER_CONF)
# Write cloud key to existing file, assert on key/val
cloud.set_key(key_val)
with full_path.open("r") as file:
conf = yaml.safe_load(file)
assert key_name in conf.keys()
assert key_val in conf[key_name]
def test_write_key_no_conf_file(tmp_directory, monkeypatch):
key_val = "TEST_KEY12345678987654"
key_name = "cloud_key"
monkeypatch.setattr(telemetry, 'DEFAULT_HOME_DIR', '.')
stats = Path('stats')
stats.mkdir()
full_path = (Path('stats') / DEFAULT_USER_CONF)
# Write cloud key to existing file, assert on key/val
cloud._set_key(key_val)
with full_path.open("r") as file:
conf = yaml.safe_load(file)
assert key_name in conf.keys()
assert key_val in conf[key_name]
def test_overwrites_api_key(write_sample_conf):
key_val = "TEST_KEY12345678987654"
key_name = "cloud_key"
full_path = (Path('stats') / DEFAULT_USER_CONF)
cloud.set_key(key_val)
# Write cloud key to existing file, assert on key/val
another_val = "SEC_KEY123456789876543"
cloud.set_key(another_val)
with full_path.open("r") as file:
conf = yaml.safe_load(file)
assert key_name in conf.keys()
assert another_val in conf[key_name]
@pytest.mark.parametrize('arg', [None, '12345'])
def test_api_key_well_formatted(write_sample_conf, arg):
with pytest.raises(BaseException) as excinfo:
cloud.set_key(arg)
assert 'The API key is malformed' in str(excinfo.value)
def test_get_api_key(monkeypatch, write_sample_conf, capsys):
monkeypatch.delenv('PLOOMBER_CLOUD_KEY', raising=True)
key_val = "TEST_KEY12345678987654"
runner = CliRunner()
result = runner.invoke(set_key, args=[key_val], catch_exceptions=False)
assert 'Key was stored\n' in result.stdout
result = runner.invoke(get_key, catch_exceptions=False)
assert key_val in result.stdout
def test_get_api_key_from_env_var(monkeypatch):
key_val = 'TEST_KEY12345678987654'
monkeypatch.setenv('PLOOMBER_CLOUD_KEY', key_val)
runner = CliRunner()
result = runner.invoke(set_key,
args=["XXXX_KEY12345678987654"],
catch_exceptions=False)
assert 'Key was stored\n' in result.stdout
result = runner.invoke(get_key, catch_exceptions=False)
assert key_val in result.stdout
def test_get_no_key(monkeypatch, write_sample_conf, capsys):
monkeypatch.delenv('PLOOMBER_CLOUD_KEY', raising=True)
runner = CliRunner()
result = runner.invoke(get_key, catch_exceptions=False)
assert 'No cloud API key was found.\n' == result.stdout
def test_two_keys_not_supported(monkeypatch, write_sample_conf, capsys):
monkeypatch.delenv('PLOOMBER_CLOUD_KEY', raising=True)
key_val = "TEST_KEY12345678987654"
key2 = 'SEC_KEY12345678987654'
runner = CliRunner()
runner.invoke(set_key, args=[key_val], catch_exceptions=False)
# Write a second key (manual on file by user)
full_path = (Path('stats') / DEFAULT_USER_CONF)
print(full_path)
conf = full_path.read_text()
conf += f'cloud_key: {key2}\n'
full_path.write_text(conf)
res = runner.invoke(get_key, catch_exceptions=False)
# By the yaml default, it'll take the latest key
assert key2 in res.stdout
def test_cloud_user_tracked(write_sample_conf):
key_val = "TEST_KEY12345678987654"
runner = CliRunner()
runner.invoke(set_key, args=[key_val], catch_exceptions=False)
assert key_val == telemetry.is_cloud_user()
@pytest.mark.xfail(reason="timing out")
def test_get_pipeline(monkeypatch):
# Write sample pipeline
pid = str(uuid.uuid4())
status = 'started'
res = write_sample_pipeline(pid, status)
assert pid in res
pipeline = cloud.get_pipeline(pid, status)
assert isinstance(pipeline, list)
assert pid == pipeline[0]['pipeline_id']
res = delete_sample_pipeline(pid)
assert pid in res
def test_get_pipeline_no_key(tmp_directory, monkeypatch):
key = "TEST_KEY"
sample_pipeline_id = str(uuid.uuid4())
cloud_mock = Mock(return_value=key)
monkeypatch.setattr(cloud, 'get_key', cloud_mock)
pipeline = get_tabular_pipeline(sample_pipeline_id)
assert isinstance(pipeline, str)
assert 'API_Key not valid' in pipeline
@pytest.mark.xfail(reason="timing out")
def test_write_pipeline():
pid = str(uuid.uuid4())
status = 'started'
res = write_sample_pipeline(pid, status)
assert pid in res
res = delete_sample_pipeline(pid)
assert pid in res
def test_write_pipeline_no_valid_key(monkeypatch):
key = "<KEY>"
sample_pipeline_id = str(uuid.uuid4())
status = 'started'
cloud_mock = Mock(return_value=key)
monkeypatch.setattr(cloud, 'get_key', cloud_mock)
res = write_sample_pipeline(sample_pipeline_id, status)
assert 'API_Key' in res
def test_write_pipeline_no_status_id():
pipeline_id = ''
status = 'started'
res = write_sample_pipeline(pipeline_id, status)
assert 'No input pipeline_id' in res
pipeline_id = str(uuid.uuid4())
status = ''
res = write_sample_pipeline(pipeline_id=pipeline_id, status=status)
assert 'No input pipeline status' in res
@pytest.mark.xfail(reason="timing out")
def test_write_delete_pipeline():
pid = str(uuid.uuid4())
status = 'started'
res = write_sample_pipeline(pid, status)
assert pid in res
res = delete_sample_pipeline(pid)
assert pid in res
@pytest.mark.xfail(reason="timing out")
def test_delete_non_exist_pipeline():
pid = 'TEST_PIPELINE'
res = get_tabular_pipeline(pid)
assert f'{pid} was not' in res
res = delete_sample_pipeline(pid)
assert 'doesn\'t exist' in res
@pytest.mark.xfail(reason="timing out")
def test_update_existing_pipeline():
pid = str(uuid.uuid4())
end_status = 'finished'
res = write_sample_pipeline(pipeline_id=pid, status='started')
assert pid in res
res = write_sample_pipeline(pipeline_id=pid, status=end_status)
assert pid in res
pipeline = get_tabular_pipeline(pid)
assert isinstance(pipeline, str)
assert end_status in pipeline
res = delete_sample_pipeline(pid)
assert pid in res
@pytest.mark.xfail(reason="timing out")
def test_pipeline_write_error():
pid = str(uuid.uuid4())
end_status = 'error'
log = 'Error: issue building the dag'
runner = CliRunner()
result = runner.invoke(write_pipeline,
args=[pid, end_status, log],
catch_exceptions=False)
assert pid in result.stdout
pipeline = get_tabular_pipeline(pid)
assert isinstance(pipeline, str)
assert end_status in pipeline
res = delete_sample_pipeline(pid)
assert pid in res
# Get all pipelines, minimum of 3 should exist.
@pytest.mark.xfail(reason="timing out")
def test_get_multiple_pipelines(monkeypatch):
class CustomTableWrapper(table.Table):
@classmethod
def from_dicts(cls, dicts, complete_keys):
# call the super class
table = super().from_dicts(dicts, complete_keys)
# store the result in the class
cls.table = table
return table
# monkeypatch CustomParser to use our wrapper
# FIXME: we should be fixing the local module, not the global one
# but when we refactored the CLI to load fast, we moved the import in
# the cli module inside the cli function so we can no longer patch it
monkeypatch.setattr(table, 'Table', CustomTableWrapper)
pid = str(uuid.uuid4())
pid2 = str(uuid.uuid4())
pid3 = str(uuid.uuid4())
status = 'finished'
res = write_sample_pipeline(pipeline_id=pid, status=status)
assert pid in res
res = write_sample_pipeline(pipeline_id=pid2, status=status)
assert pid2 in res
res = write_sample_pipeline(pipeline_id=pid3, status=status)
assert pid3 in res
get_tabular_pipeline()
assert len(CustomTableWrapper.table['pipeline_id']) >= 3
res = delete_sample_pipeline(pid)
assert pid in res
res = delete_sample_pipeline(pid2)
assert pid2 in res
res = delete_sample_pipeline(pid3)
assert pid3 in res
def test_get_latest_pipeline(monkeypatch):
pid = str(uuid.uuid4())
status = 'started'
api_mock = Mock(return_value=[{"pipeline_id": pid}])
monkeypatch.setattr(cloud, 'write_pipeline', api_mock)
monkeypatch.setattr(cloud, 'get_pipeline', api_mock)
res = write_sample_pipeline(pid, status)
assert pid in str(res)
pipeline = get_tabular_pipeline('latest')
assert isinstance(pipeline, str)
assert pid in pipeline
@pytest.mark.xfail(reason="timing out")
def test_get_active_pipeline(monkeypatch):
pid = str(uuid.uuid4())
res = write_sample_pipeline(pipeline_id=pid, status='started')
assert pid in res
# Cutting the pipelineID for the tabular print
pipeline = get_tabular_pipeline('active')
prefix = pid.split("-")[0]
assert prefix in pipeline
res = delete_sample_pipeline(pid)
assert pid in res
@pytest.mark.xfail(reason="timing out")
def test_get_pipeline_with_dag(monkeypatch):
dag_mock = Mock(
return_value={
"dag_size": "2",
"tasks": {
"features": {
"products": "features.parquet",
"status": "Skipped",
"type": "PythonCallable",
"upstream": {
"get": "get.parquet"
}
},
"get": {
"products": "get.parquet",
"status": "Skipped",
"type": "PythonCallable",
"upstream": {}
}
}
})
monkeypatch.setattr(telemetry, 'parse_dag', dag_mock)
pid = str(uuid.uuid4())
status = 'finished'
dag = telemetry.parse_dag("Sample_dag")
res = cloud.write_pipeline(pipeline_id=pid, status=status, dag=dag)
assert pid in str(res)
res = get_tabular_pipeline(pipeline_id=pid, verbose='-v')
assert 'dag' in res
res = get_tabular_pipeline(pipeline_id=pid)
assert 'dag' not in res
res = delete_sample_pipeline(pid)
assert pid in res
# Test empty string/emails without a @
@pytest.mark.parametrize('user_email', ['', 'test', '@', 'a@c'])
def test_malformed_email_signup(monkeypatch, user_email):
mock = Mock()
monkeypatch.setattr(cloud, '_email_registry', mock)
cloud._email_validation(user_email)
mock.assert_not_called()
# Testing valid api calls when the email is correct
def test_correct_email_signup(tmp_directory, monkeypatch):
monkeypatch.setattr(telemetry, 'DEFAULT_HOME_DIR', '.')
registry_mock = Mock()
monkeypatch.setattr(cloud, '_email_registry', registry_mock)
sample_email = '<EMAIL>'
cloud._email_validation(sample_email)
registry_mock.assert_called_once()
# Test valid emails are stored in the user conf
def test_email_conf_file(tmp_directory, monkeypatch):
registry_mock = Mock()
monkeypatch.setattr(cloud, '_email_registry', registry_mock)
monkeypatch.setattr(telemetry, 'DEFAULT_HOME_DIR', '.')
stats = Path('stats')
stats.mkdir()
conf_path = stats / telemetry.DEFAULT_USER_CONF
conf_path.write_text("sample_conf_key: True\n")
sample_email = '<EMAIL>'
cloud._email_validation(sample_email)
conf = conf_path.read_text()
assert sample_email in conf
def test_email_write_only_once(tmp_directory, monkeypatch):
monkeypatch.setattr(telemetry, 'DEFAULT_HOME_DIR', '.')
input_mock = Mock(return_value='<EMAIL>')
monkeypatch.setattr(cloud, '_get_input', input_mock)
monkeypatch.setattr(telemetry.UserSettings, 'user_email', '<EMAIL>')
cloud._email_input()
assert not input_mock.called
def test_email_call_on_examples(tmp_directory, monkeypatch):
email_mock = Mock()
monkeypatch.setattr(examples, '_email_input', email_mock)
examples.main(name=None, force=True)
email_mock.assert_called_once()
@pytest.mark.parametrize('user_email', ['<EMAIL>', ''])
def test_email_called_once(tmp_directory, monkeypatch, user_email):
monkeypatch.setattr(telemetry, 'DEFAULT_HOME_DIR', '.')
email_mock = Mock(return_value=user_email)
api_mock = Mock()
monkeypatch.setattr(cloud, '_get_input', email_mock)
monkeypatch.setattr(cloud, '_email_registry', api_mock)
examples.main(name=None, force=True)
examples.main(name=None, force=True)
email_mock.assert_called_once()
```
#### File: tests/cli/test_install.py
```python
import subprocess
import os
import sys
from pathlib import Path
from unittest.mock import Mock, call, ANY
import shutil
import yaml
import datetime
import pytest
from click.testing import CliRunner
from ploomber.cli import install as install_module
from ploomber_cli.cli import install
from ploomber.cli.install import _pip_install
from ploomber.exceptions import BaseException
from conftest import (_write_sample_conda_env, _prepare_files,
_write_sample_pip_req, _write_sample_conda_files,
_write_sample_pip_files)
setup_py = """
from setuptools import setup, find_packages
setup(
name='sample_package',
version='1.0',
extras_require={'dev': []}
)
"""
@pytest.fixture
def pkg_manager():
mamba = shutil.which('mamba')
conda = shutil.which('conda')
return mamba if mamba else conda
@pytest.fixture
def error_if_calling_locate_pip_inside_conda(monkeypatch):
# to make it fail if it attempts to look for pip, see docstring in the
# '_locate_pip_inside_conda' method for details
mock_locate = Mock(side_effect=ValueError)
monkeypatch.setattr(install_module, '_locate_pip_inside_conda',
mock_locate)
@pytest.fixture
def mock_cmdr_wrapped(monkeypatch):
mock = Mock(wraps=install_module.Commander().run)
monkeypatch.setattr(install_module.Commander, 'run', mock)
return mock
@pytest.fixture
def cleanup_conda_tmp_env():
if os.name == 'nt':
conda = shutil.which('conda')
subprocess.run([conda, 'env', 'remove', '--name', 'my_tmp_env'])
def _get_venv_and_pip():
name = Path().resolve().name
venv = f'venv-{name}'
pip = str(
Path(venv, 'Scripts' if os.name == 'nt' else 'bin',
'pip.exe' if os.name == 'nt' else 'pip'))
return venv, pip
@pytest.mark.parametrize('has_conda, use_lock, env, env_lock, reqs, reqs_lock',
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0],
[1, 0, 0, 0, 0, 1],
[1, 0, 0, 1, 0, 1],
])
def test_missing_both_files(tmp_directory, has_conda, use_lock, env, env_lock,
reqs, reqs_lock, monkeypatch):
_prepare_files(has_conda, use_lock, env, env_lock, reqs, reqs_lock,
monkeypatch)
runner = CliRunner()
result = runner.invoke(
install,
args=['--use-lock'] if use_lock else ['--no-use-lock'],
catch_exceptions=False)
expected = ('Expected an environment.yaml (conda)'
' or requirements.txt (pip) in the current directory.'
' Add one of them and try again.')
assert f'Error: {expected}\n' == result.stdout
@pytest.mark.parametrize('has_conda, use_lock, env, env_lock, reqs, reqs_lock',
[
[0, 1, 1, 0, 1, 0],
[1, 1, 1, 0, 1, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 1, 0],
[0, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
])
def test_missing_both_lock_files(tmp_directory, has_conda, use_lock, env,
env_lock, reqs, reqs_lock, monkeypatch):
_prepare_files(has_conda, use_lock, env, env_lock, reqs, reqs_lock,
monkeypatch)
runner = CliRunner()
result = runner.invoke(install,
args=['--use-lock'] if use_lock else [],
catch_exceptions=False)
expected = (
'Expected an environment.lock.yaml '
'(conda) or requirements.lock.txt (pip) in the current directory. '
'Add one of them and try again.')
assert f'Error: {expected}\n' == result.stdout
@pytest.mark.parametrize('has_conda, use_lock, env, env_lock, reqs, reqs_lock',
[
[0, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 0],
])
def test_missing_env_lock(tmp_directory, has_conda, use_lock, env, env_lock,
reqs, reqs_lock, monkeypatch):
_prepare_files(has_conda, use_lock, env, env_lock, reqs, reqs_lock,
monkeypatch)
runner = CliRunner()
result = runner.invoke(
install,
args=['--use-lock'] if use_lock else ['--no-use-lock'],
catch_exceptions=False)
expected = ('Found env environment.lock.yaml '
'but conda is not installed. Install conda or add a '
'requirements.lock.txt to use pip instead')
assert f'Error: {expected}\n' == result.stdout
@pytest.mark.parametrize('has_conda, use_lock, env, env_lock, reqs, '
'reqs_lock', [
[0, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 0, 1],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
])
def test_missing_env(tmp_directory, has_conda, use_lock, env, env_lock, reqs,
reqs_lock, monkeypatch):
_prepare_files(has_conda, use_lock, env, env_lock, reqs, reqs_lock,
monkeypatch)
runner = CliRunner()
result = runner.invoke(
install,
args=['--use-lock'] if use_lock else ['--no-use-lock'],
catch_exceptions=False)
expected = ('Found environment.yaml but conda '
'is not installed. Install conda or add a '
'requirements.txt to use pip instead')
assert f'Error: {expected}\n' == result.stdout
@pytest.fixture
def mock_main(monkeypatch):
main_pip, main_conda = Mock(), Mock()
monkeypatch.setattr(install_module, 'main_pip', main_pip)
monkeypatch.setattr(install_module, 'main_conda', main_conda)
return main_pip, main_conda
def test_use_lock_none_with_pip_lock_exists(tmp_directory, monkeypatch,
mock_main):
main_pip, main_conda = mock_main
# simulate no conda
monkeypatch.setattr(install_module.shutil, 'which', lambda _: None)
Path('requirements.lock.txt').touch()
runner = CliRunner()
result = runner.invoke(install, catch_exceptions=False)
assert result.exit_code == 0
main_pip.assert_called_once_with(use_lock=True, create_env=ANY)
main_conda.assert_not_called()
def test_use_lock_none_with_pip_regular_exists(tmp_directory, monkeypatch,
mock_main):
main_pip, main_conda = mock_main
# simulate no conda
monkeypatch.setattr(install_module.shutil, 'which', lambda _: None)
Path('requirements.txt').touch()
runner = CliRunner()
result = runner.invoke(install, catch_exceptions=False)
assert result.exit_code == 0
main_pip.assert_called_once_with(use_lock=False, create_env=ANY)
main_conda.assert_not_called()
def test_use_lock_none_with_conda_lock_exists(tmp_directory, mock_main):
main_pip, main_conda = mock_main
Path('environment.lock.yml').touch()
runner = CliRunner()
result = runner.invoke(install, catch_exceptions=False)
assert result.exit_code == 0
main_conda.assert_called_once_with(use_lock=True, create_env=ANY)
main_pip.assert_not_called()
def test_use_lock_none_with_conda_regular_exists(tmp_directory, mock_main):
main_pip, main_conda = mock_main
Path('environment.yml').touch()
runner = CliRunner()
result = runner.invoke(install, catch_exceptions=False)
assert result.exit_code == 0
main_conda.assert_called_once_with(use_lock=False, create_env=ANY)
main_pip.assert_not_called()
def test_use_lock_none_with_conda_wrong_lock_exists(tmp_directory, monkeypatch,
mock_main):
main_pip, main_conda = mock_main
# simulate no conda
monkeypatch.setattr(install_module.shutil, 'which', lambda _: None)
Path('environment.lock.yml').touch()
runner = CliRunner()
result = runner.invoke(install, catch_exceptions=False)
assert result.exit_code == 1
expected = 'Expected an environment.yaml (conda) or requirements.txt (pip)'
assert expected in result.output
def test_use_lock_none_with_pip_wrong_lock_exists(tmp_directory, mock_main):
main_pip, main_conda = mock_main
Path('requirements.lock.txt').touch()
runner = CliRunner()
result = runner.invoke(install, catch_exceptions=False)
assert result.exit_code == 1
expected = 'Expected an environment.yaml (conda) or requirements.txt (pip)'
assert expected in result.output
def test_use_venv_even_if_conda_installed(tmp_directory, mock_main):
main_pip, main_conda = mock_main
Path('requirements.lock.txt').touch()
runner = CliRunner()
result = runner.invoke(install,
args=['--use-venv'],
catch_exceptions=False)
assert result.exit_code == 0
main_pip.assert_called_once()
main_conda.assert_not_called()
def mocked_get_now():
dt = datetime.datetime(2021, 1, 1, 10, 10, 10)
return dt
@pytest.mark.parametrize('has_conda, use_lock, env, env_lock, reqs, reqs_lock',
[
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 1],
[1, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 1, 1],
[1, 1, 0, 1, 0, 0],
[1, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 1, 0],
[1, 0, 1, 1, 1, 0],
[1, 0, 1, 0, 1, 1],
[1, 0, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 0],
[1, 0, 1, 1, 0, 0],
[1, 0, 1, 0, 0, 1],
[1, 0, 1, 1, 0, 1],
])
def test_install_with_conda(tmp_directory, has_conda, use_lock, env, env_lock,
reqs, reqs_lock, monkeypatch):
_prepare_files(has_conda, use_lock, env, env_lock, reqs, reqs_lock,
monkeypatch)
mock = Mock()
mock.patch(install_module, 'datetime', side_effect=mocked_get_now)
monkeypatch.setattr(install_module, 'main_conda', mock)
start = mocked_get_now()
install_module.main_conda(start, True if use_lock else False)
inputs_args, kwargs = mock.call_args
assert inputs_args[0] == start
assert inputs_args[1] == bool(use_lock)
@pytest.mark.parametrize('has_conda, use_lock, env, env_lock, reqs, reqs_lock',
[
[1, 1, 1, 0, 1, 1],
[1, 1, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 1],
[1, 1, 0, 0, 0, 1],
[0, 1, 1, 0, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 1, 1],
[0, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 0, 1],
[0, 1, 0, 0, 0, 1],
[0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 1, 0],
[1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 1, 1],
[1, 0, 0, 1, 1, 1],
[0, 0, 1, 0, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 1, 1],
])
def test_install_with_pip(tmp_directory, has_conda, use_lock, env, env_lock,
reqs, reqs_lock, monkeypatch):
_prepare_files(has_conda, use_lock, env, env_lock, reqs, reqs_lock,
monkeypatch)
mock = Mock()
monkeypatch.setattr(install_module, 'main_pip', mock)
mock.patch(install_module, 'datetime', side_effect=mocked_get_now)
start = mocked_get_now()
install_module.main_pip(start, True if use_lock else False)
inputs_args, kwargs = mock.call_args
assert inputs_args[0] == start
assert inputs_args[1] == bool(use_lock)
@pytest.mark.parametrize('args, is_conda, env_name, create_env', [
[[], True, 'some-env', False],
[[], True, 'some-env', False],
[[], True, 'base', False],
[[], True, 'base', False],
[[], False, 'some-env', False],
[[], False, 'some-env', False],
[['--create-env'], True, 'some-env', True],
])
def test_installs_conda_inline_if_inside_venv(tmp_directory, monkeypatch, args,
is_conda, env_name, create_env):
_write_sample_conda_files()
main = Mock()
monkeypatch.setattr(install_module.shutil, 'which', Mock())
monkeypatch.setattr(install_module, 'main_conda', main)
monkeypatch.setattr(install_module.telemetry, 'is_conda', lambda: is_conda)
monkeypatch.setattr(install_module, '_current_conda_env_name',
lambda: env_name)
runner = CliRunner()
result = runner.invoke(install, args=args, catch_exceptions=False)
main.assert_called_once_with(use_lock=ANY, create_env=create_env)
assert result.exit_code == 0
@pytest.mark.parametrize('args, in_venv, create_env', [
[[], False, False],
[[], False, False],
[[], True, False],
[[], True, False],
[['--create-env'], True, True],
])
def test_installs_pip_inline_if_inside_venv(tmp_directory, monkeypatch, args,
in_venv, create_env):
_write_sample_pip_files()
main = Mock()
# simulate no conda
monkeypatch.setattr(install_module.shutil, 'which', lambda _: None)
monkeypatch.setattr(install_module, 'main_pip', main)
monkeypatch.setattr(install_module.telemetry, 'in_virtualenv',
lambda: in_venv)
runner = CliRunner()
result = runner.invoke(install, args=args, catch_exceptions=False)
main.assert_called_once_with(use_lock=ANY, create_env=create_env)
assert result.exit_code == 0
@pytest.mark.parametrize('dev_create, use_lock, expected_call', [
[
False, False,
[
call('pip',
'install',
'--requirement',
'requirements.txt',
description=ANY),
call('pip',
'freeze',
'--exclude-editable',
description=ANY,
capture_output=True)
]
],
[
False, True,
[
call('pip',
'install',
'--requirement',
'requirements.lock.txt',
description=ANY)
]
],
[
True, False,
[
call('pip',
'install',
'--requirement',
'requirements.txt',
description=ANY),
call('pip',
'freeze',
'--exclude-editable',
description=ANY,
capture_output=True),
call('pip',
'install',
'--requirement',
'requirements.dev.txt',
description=ANY),
call('pip',
'freeze',
'--exclude-editable',
description=ANY,
capture_output=True)
]
],
[
True, True,
[
call('pip',
'install',
'--requirement',
'requirements.lock.txt',
description=ANY),
call('pip',
'install',
'--requirement',
'requirements.dev.lock.txt',
description=ANY)
]
],
])
def test_main_pip_install_inline(tmp_directory, monkeypatch, capsys,
dev_create, use_lock, expected_call):
_write_sample_pip_files(dev=False)
_write_sample_pip_files(dev=dev_create)
mock = Mock(return_value='something')
monkeypatch.setattr(install_module.Commander, 'run', mock)
install_module.main_pip(use_lock=use_lock, create_env=False)
assert mock.call_args_list == expected_call
captured = capsys.readouterr()
assert "=\n$ ploomber build\n=" in captured.out
@pytest.mark.parametrize('dev_create, use_lock, expected_calls', [
[
False, False,
[
call('conda',
'env',
'update',
'--file',
'environment.yml',
'--name',
'some-env',
description=ANY),
call('conda',
'env',
'export',
'--no-build',
'--name',
'some-env',
description=ANY,
capture_output=True)
]
],
[
False, True,
[
call('conda',
'env',
'update',
'--file',
'environment.lock.yml',
'--name',
'some-env',
description=ANY),
]
],
[
True, True,
[
call('conda',
'env',
'update',
'--file',
'environment.lock.yml',
'--name',
'some-env',
description=ANY),
call('conda',
'env',
'update',
'--file',
'environment.dev.lock.yml',
'--name',
'some-env',
description=ANY)
]
],
[
True, False,
[
call('conda',
'env',
'update',
'--file',
'environment.yml',
'--name',
'some-env',
description=ANY),
call('conda',
'env',
'export',
'--no-build',
'--name',
'some-env',
description=ANY,
capture_output=True),
call('conda',
'env',
'update',
'--file',
'environment.dev.yml',
'--name',
'some-env',
description=ANY),
call('conda',
'env',
'export',
'--no-build',
'--name',
'some-env',
description=ANY,
capture_output=True)
]
],
])
def test_main_conda_install_inline(monkeypatch, capsys, tmp_directory,
dev_create, use_lock, expected_calls):
_write_sample_conda_files()
_write_sample_conda_files(dev=dev_create)
def which(arg):
return arg if arg == 'conda' else None
mock = Mock(return_value='something')
monkeypatch.setattr(install_module.Commander, 'run', mock)
monkeypatch.setattr(install_module.shutil, 'which', which)
monkeypatch.setattr(install_module, '_current_conda_env_name',
lambda: 'some-env')
install_module.main_conda(use_lock=use_lock, create_env=False)
assert mock.call_args_list == expected_calls
captured = capsys.readouterr()
assert "=\n$ ploomber build\n=" in captured.out
@pytest.mark.parametrize('conda_bin, conda_root',
[
[('something', 'Miniconda3', 'conda'),
('something', 'Miniconda3')],
[('something', 'miniconda3', 'conda'),
('something', 'miniconda3')],
[('something', 'Anaconda3', 'conda'),
('something', 'Anaconda3')],
[('something', 'anaconda3', 'conda'),
('something', 'anaconda3')],
[('one', 'miniconda3', 'dir', 'conda'),
('one', 'miniconda3')],
[('one', 'anaconda3', 'dir', 'conda'),
('one', 'anaconda3')],
[('one', 'another', 'Miniconda3', 'conda'),
('one', 'another', 'Miniconda3')],
[('one', 'another', 'Anaconda3', 'conda'),
('one', 'another', 'Anaconda3')],
])
def test_find_conda_root(conda_bin, conda_root):
assert install_module._find_conda_root(
Path(*conda_bin)).parts == conda_root
def test_error_if_unknown_conda_layout():
with pytest.raises(BaseException):
install_module._find_conda_root(Path('a', 'b'))
@pytest.mark.parametrize(
'conda_bin',
[
# old versions of conda may have the conda binary in a different
# location see #319
['Users', 'user', 'Miniconda3', 'Library', 'bin', 'conda.BAT'],
['Users', 'user', 'Miniconda3', 'condabin', 'conda.BAT'],
],
ids=['location-old', 'location-new'])
def test_locate_pip_inside_conda(monkeypatch, tmp_directory, conda_bin):
mock = Mock(return_value=str(Path(*conda_bin)))
path = Path('Users', 'user', 'Miniconda3', 'envs', 'myenv',
'Scripts' if os.name == 'nt' else 'bin',
'pip.exe' if os.name == 'nt' else 'pip')
path.parent.mkdir(parents=True)
path.touch()
monkeypatch.setattr(install_module.shutil, 'which', mock)
assert install_module._locate_pip_inside_conda('myenv') == str(path)
# FIXME: i tested this locally on a windows machine and it works but for some
# reason, the machine running on github actions is unable to locate "conda"
# hence this fails. it's weird because I'm calling conda without issues
# to install dependencies during setup. Same with the next two tests
@pytest.mark.xfail(sys.platform == 'win32',
reason='Test not working on Github Actions on Windows')
def test_install_package_conda(tmp_directory, mock_cmdr_wrapped):
_write_sample_conda_env()
Path('setup.py').write_text(setup_py)
runner = CliRunner()
runner.invoke(install, catch_exceptions=False)
# check it calls "pip install --editable ."
assert mock_cmdr_wrapped.call_args_list[-2][1][
'description'] == 'Installing project'
# check first argument is the path to the conda binary instead of just
# "conda" since we discovered that fails sometimes on Windows
assert all(
[Path(c[0][0]).is_file() for c in mock_cmdr_wrapped.call_args_list])
assert set(os.listdir()) == {
'environment.yml',
'environment.lock.yml',
'sample_package.egg-info',
'setup.py',
}
@pytest.mark.xfail(sys.platform == 'win32',
reason='Test not working on Github Actions on Windows')
def test_install_non_package_with_conda(tmp_directory, monkeypatch,
mock_cmdr_wrapped):
# to make it fail if it attempts to look for pip, see docstring in the
# '_locate_pip_inside_conda' method for details
mock_locate = Mock(side_effect=ValueError)
monkeypatch.setattr(install_module, '_locate_pip_inside_conda',
mock_locate)
_write_sample_conda_env()
runner = CliRunner()
runner.invoke(install, catch_exceptions=False)
# check first argument is the path to the conda binary instead of just
# "conda" since we discovered that fails sometimes on Windows
assert all(
[Path(c[0][0]).is_file() for c in mock_cmdr_wrapped.call_args_list])
assert set(os.listdir()) == {'environment.yml', 'environment.lock.yml'}
@pytest.mark.xfail(sys.platform == 'win32',
reason='Test not working on Github Actions on Windows')
def test_non_package_with_conda_with_dev_deps(tmp_directory):
_write_sample_conda_env()
_write_sample_conda_env('environment.dev.yml')
runner = CliRunner()
runner.invoke(install, catch_exceptions=False)
assert set(os.listdir()) == {
'environment.yml', 'environment.lock.yml', 'environment.dev.yml',
'environment.dev.lock.yml'
}
@pytest.mark.parametrize('create_dev_lock', [True, False])
def test_install_lock_non_package_with_conda(
tmp_directory, monkeypatch, mock_cmdr_wrapped, pkg_manager,
error_if_calling_locate_pip_inside_conda, cleanup_conda_tmp_env,
create_dev_lock):
_write_sample_conda_env('environment.lock.yml')
if create_dev_lock:
_write_sample_conda_env('environment.dev.lock.yml')
runner = CliRunner()
runner.invoke(install,
args=['--use-lock', '--create-env'],
catch_exceptions=False)
expected = [
call(pkg_manager,
'env',
'create',
'--file',
'environment.lock.yml',
'--force',
description='Creating env'),
call(pkg_manager,
'env',
'update',
'--file',
'environment.dev.lock.yml',
'--name',
'my_tmp_env',
description='Installing dev dependencies')
]
# on windows, we expect this call to check if the env exists already
if os.name == 'nt':
expected.insert(
0, call(pkg_manager, 'env', 'list', '--json', capture_output=True))
# pop the last entry if we dont have dev dependencies
if not create_dev_lock:
expected.pop(-1)
assert mock_cmdr_wrapped.call_args_list == expected
assert all(
[Path(c[0][0]).is_file() for c in mock_cmdr_wrapped.call_args_list])
@pytest.mark.parametrize('create_dev_lock', [True, False])
def test_install_lock_package_with_conda(tmp_directory, monkeypatch,
mock_cmdr_wrapped, pkg_manager,
cleanup_conda_tmp_env,
create_dev_lock):
_write_sample_conda_env('environment.lock.yml')
if create_dev_lock:
_write_sample_conda_env('environment.dev.lock.yml')
Path('setup.py').write_text(setup_py)
runner = CliRunner()
runner.invoke(install,
args=['--use-lock', '--create-env'],
catch_exceptions=False)
pip = install_module._path_to_pip_in_env_with_name(shutil.which('conda'),
'my_tmp_env')
expected = [
call(pkg_manager,
'env',
'create',
'--file',
'environment.lock.yml',
'--force',
description='Creating env'),
call(pip,
'install',
'--editable',
'.',
description='Installing project'),
call(pkg_manager,
'env',
'update',
'--file',
'environment.dev.lock.yml',
'--name',
'my_tmp_env',
description='Installing dev dependencies')
]
if os.name == 'nt':
expected.insert(
0, call(pkg_manager, 'env', 'list', '--json', capture_output=True))
if not create_dev_lock:
expected.pop(-1)
assert mock_cmdr_wrapped.call_args_list == expected
assert all(
[Path(c[0][0]).is_file() for c in mock_cmdr_wrapped.call_args_list])
# FIXME: I tested this locally on a windows machine but breaks on Github
# Actions.
# Problem happens when running pip:
# AssertionError: Egg-link c:\users\runner~1\appdata\local\temp\tmp30cvb5ki
# does not match installed location of sample-package-pip
# (at c:\users\runneradmin\appdata\local\temp\tmp30cvb5ki)
# I think it's because of some weird configuration on github actions
# creates symlinks
@pytest.mark.xfail(sys.platform == 'win32',
reason='Test not working on Github Actions on Windows')
def test_install_pip(tmp_directory):
_write_sample_pip_req()
Path('setup.py').write_text(setup_py)
name = f'venv-{Path(tmp_directory).name}'
runner = CliRunner()
result = runner.invoke(install,
args='--create-env',
catch_exceptions=False)
if os.name == 'nt':
expected_command = (
f'\nIf using cmd.exe: {name}\\Scripts\\activate.bat'
f'\nIf using PowerShell: {name}\\Scripts\\Activate.ps1')
else:
expected_command = f'source {name}/bin/activate'
assert Path('.gitignore').read_text() == f'\n{name}\n'
assert expected_command in result.stdout
assert Path('requirements.lock.txt').exists()
assert result.exit_code == 0
@pytest.mark.xfail(sys.platform == 'win32',
reason='Test not working on Github Actions on Windows')
def test_install_pip_does_not_duplicate_gitignore_entry(tmp_directory):
_write_sample_pip_req()
Path('setup.py').write_text(setup_py)
name = f'venv-{Path(tmp_directory).name}'
Path('.gitignore').write_text(f'{name}\n')
runner = CliRunner()
result = runner.invoke(install, catch_exceptions=False)
# the entry was already there, should not duplicate
assert Path('.gitignore').read_text() == f'{name}\n'
assert result.exit_code == 0
def test_non_package_with_pip(tmp_directory):
_write_sample_pip_req()
Path('setup.py').write_text(setup_py)
name = f'venv-{Path(tmp_directory).name}'
runner = CliRunner()
result = runner.invoke(install,
args=['--create-env'],
catch_exceptions=False)
assert Path('.gitignore').read_text() == f'\n{name}\n'
assert Path('requirements.lock.txt').exists()
assert result.exit_code == 0
def test_non_package_with_pip_with_dev_deps(tmp_directory):
_write_sample_pip_req()
_write_sample_pip_req('requirements.dev.txt')
Path('setup.py').write_text(setup_py)
name = f'venv-{Path(tmp_directory).name}'
runner = CliRunner()
result = runner.invoke(install,
args='--create-env',
catch_exceptions=False)
assert Path('.gitignore').read_text() == f'\n{name}\n'
assert Path('requirements.lock.txt').exists()
assert Path('requirements.dev.lock.txt').exists()
assert '# Editable install' not in Path(
'requirements.lock.txt').read_text()
assert '# Editable install' not in Path(
'requirements.dev.lock.txt').read_text()
assert result.exit_code == 0
@pytest.mark.parametrize('create_setup_py', [True, False])
@pytest.mark.parametrize('create_dev_lock', [True, False])
def test_install_lock_pip(tmp_directory, mock_cmdr_wrapped, create_setup_py,
create_dev_lock):
_write_sample_pip_req('requirements.lock.txt')
if create_dev_lock:
_write_sample_pip_req('requirements.dev.lock.txt')
if create_setup_py:
Path('setup.py').write_text(setup_py)
runner = CliRunner()
result = runner.invoke(install,
args=['--use-lock', '--create-env'],
catch_exceptions=False)
venv, pip = _get_venv_and_pip()
expected = [
call(sys.executable, '-m', 'venv', venv, description='Creating venv'),
call(pip, 'install', '--editable', '.', description=ANY),
call(pip,
'install',
'--requirement',
'requirements.lock.txt',
description=ANY),
call(pip,
'install',
'--requirement',
'requirements.dev.lock.txt',
description=ANY)
]
if not create_setup_py:
expected.pop(1)
if not create_dev_lock:
expected.pop(-1)
assert mock_cmdr_wrapped.call_args_list == expected
assert Path('.gitignore').read_text() == f'\n{venv}\n'
assert result.exit_code == 0
@pytest.mark.parametrize('file', ['requirements.lock.txt', 'requirements.txt'])
def test_suggests_use_pip_if_cmd_fails(tmp_directory, monkeypatch, file):
# simulate no conda
monkeypatch.setattr(install_module.shutil, 'which', lambda _: None)
monkeypatch.setattr(install_module, '_run_pip_commands',
Mock(side_effect=Exception('some-error')))
Path(file).touch()
runner = CliRunner()
result = runner.invoke(install, catch_exceptions=False)
assert result.exit_code == 1
assert f'pip install --requirement {file}' in result.output
assert 'some-error' in result.output
@pytest.mark.parametrize('file', ['environment.yml', 'environment.lock.yml'])
def test_suggests_use_conda_if_cmd_fails(tmp_directory, monkeypatch, file):
monkeypatch.setattr(install_module, '_run_conda_commands',
Mock(side_effect=Exception('some-error')))
monkeypatch.setattr(install_module, '_current_conda_env_name',
lambda: 'current-env')
Path(file).write_text('name: some-env')
runner = CliRunner()
result = runner.invoke(install, catch_exceptions=False)
assert result.exit_code == 1
assert (f'conda env update --file {file} --name current-env'
in result.output)
assert 'some-error' in result.output
@pytest.mark.parametrize('file', ['environment.yml', 'environment.lock.yml'])
def test_suggests_use_conda_create_if_cmd_fails(tmp_directory, monkeypatch,
file):
monkeypatch.setattr(install_module, '_run_conda_commands',
Mock(side_effect=Exception('some-error')))
monkeypatch.setattr(install_module, '_current_conda_env_name',
lambda: 'current-env')
Path(file).write_text('name: some-env')
runner = CliRunner()
result = runner.invoke(install,
args=['--create-env'],
catch_exceptions=False)
assert result.exit_code == 1
assert f'conda env create --file {file} --force' in result.output
assert 'some-error' in result.output
empty = """
key: value
"""
no_python = """
dependencies:
- a
- b
- pip:
- c
"""
with_python = """
dependencies:
- a
- python=3.9
"""
@pytest.mark.parametrize('content, has_python, env_fixed', [
[empty, False, {
'key': 'value'
}],
[no_python, False, {
'dependencies': ['a', 'b', {
'pip': ['c']
}]
}],
[with_python, True, {
'dependencies': ['a']
}],
])
def test_conda_install_ignores_python(
tmp_directory,
content,
has_python,
env_fixed,
):
Path('environment.yml').write_text(content)
(has_python_out, env_yml_out
) = install_module._environment_yml_has_python('environment.yml')
assert has_python == has_python_out
assert env_fixed == env_yml_out
@pytest.mark.parametrize('content, filename, d_to_use', [
[empty, 'environment.yml', {
'key': 'value'
}],
[
no_python, 'environment.yml', {
'dependencies': ['a', 'b', {
'pip': ['c']
}]
}
],
[with_python, '.ploomber-conda-tmp.yml', {
'dependencies': ['a']
}],
])
def test_check_environment_yaml(content, filename, d_to_use, tmp_directory):
Path('environment.yml').write_text(content)
with install_module.check_environment_yaml('environment.yml') as path:
assert Path(path).exists()
assert path == filename
with open(path) as f:
d = yaml.safe_load(f)
assert d_to_use == d
assert not Path('.ploomber-conda-tmp.yml').exists()
def test_pip_mixed_versions(monkeypatch):
mock = Mock()
mock.return_value = """pyflakes==2.4.0\nPygments==2.11.2\n
pygraphviz @ file:///Users/runner/miniforge3/pygraphviz_1644545996627"""
monkeypatch.setattr(install_module.Commander, 'run', mock)
with pytest.warns(UserWarning):
_pip_install(install_module.Commander, {}, True)
```
#### File: tests/cli/test_suggest_command.py
```python
import pytest
from ploomber_cli.cli import _suggest_command, cmd_router
import sys
@pytest.mark.parametrize('name, expected', [
[None, None],
['d', 'do'],
['ake', 'make'],
['MAKE', 'make'],
['do', None],
['make', None],
['run', 'build'],
['execute', 'build'],
])
def test_suggest_command(name, expected):
assert _suggest_command(name, ['do', 'make']) == expected
@pytest.mark.parametrize('name, expected', [
['gt-key', 'get-key'],
['gt', None],
])
def test_nested_suggest_command(name, expected):
assert _suggest_command(
name, ['set-key', 'get-key', 'get-pipelines']) == expected
@pytest.mark.parametrize('cmd, nested_cmd, suggestion', [
['cloud', 'gt-key', 'get-key'],
['cloud', 'gt', None],
])
def test_nested_suggestions(monkeypatch, capsys, cmd, nested_cmd, suggestion):
monkeypatch.setattr(sys, 'argv', ['ploomber', cmd, nested_cmd])
with pytest.raises(SystemExit) as excinfo:
cmd_router()
captured = capsys.readouterr()
if suggestion:
assert f"Did you mean '{cmd} {suggestion}'?" in captured.err
else:
assert f"No such command '{nested_cmd}'" in captured.err
assert excinfo.value.code == 2
```
#### File: tests/cloud/test_io.py
```python
from unittest.mock import Mock
from pathlib import Path
import pytest
from ploomber.cloud import io
@pytest.mark.parametrize('file_size, max_size, expected', [
[2, 1, [(0, 1), (1, 2)]],
[7, 2, [(0, 2), (2, 4), (4, 6), (6, 7)]],
])
def test_yield_index(file_size, max_size, expected):
assert list(io.yield_index(file_size, max_size)) == expected
@pytest.mark.parametrize('i, j, expected', [
[4, 6, [4, 5]],
[10, 13, [10, 11, 12]],
])
def test_read_from_index(tmp_directory, i, j, expected):
Path('file').write_bytes(bytearray(range(256)))
out = io.read_from_index('file', i, j)
assert out == bytearray(expected)
def test_yield_parts(tmp_directory):
Path('file').write_bytes(bytearray(range(10)))
assert list(io.yield_parts('file', 3)) == [
bytearray([0, 1, 2]),
bytearray([3, 4, 5]),
bytearray([6, 7, 8]),
bytearray([9])
]
@pytest.mark.parametrize('n_bytes, max_size, expected', [
[10, 1, 10],
[10, 2, 5],
[10, 3, 4],
])
def test_n_parts(tmp_directory, n_bytes, max_size, expected):
Path('file').write_bytes(bytearray(range(n_bytes)))
assert io.n_parts('file', max_size) == expected
def test_generate_links(monkeypatch):
monkeypatch.setattr(io.boto3, 'client', Mock())
links = io.generate_links('bucket', 'file.csv', 'someid', 10)
assert len(links) == 10
```
#### File: tests/spec/test_dagspec_scriptrunner.py
```python
from pathlib import Path
import pytest
from ploomber.spec import DAGSpec
@pytest.fixture
def tmp_spec(tmp_directory):
Path('script.py').write_text("""
from pathlib import Path
# %% tags=["parameters"]
upstream = None
# %%
Path(product).touch()
""")
spec = {
'tasks': [{
'source': 'script.py',
'product': 'file.txt',
'class': 'ScriptRunner',
}]
}
return spec
def test_spec_with_scriptrunner(tmp_spec):
dag = DAGSpec(tmp_spec).to_dag()
dag.build()
```
#### File: ploomber/tests/test_repo.py
```python
import subprocess
from pathlib import Path
from ploomber import repo
from conftest import git_init
def test_is_repo_false(tmp_directory):
assert not repo.is_repo('.')
def test_is_repo_none(tmp_directory):
assert not repo.is_repo(None)
def test_is_repo_no_commits(tmp_directory):
subprocess.run(['git', 'init', '-b', 'mybranch'])
assert not repo.is_repo('.')
def test_is_repo(tmp_directory):
Path('file').touch()
git_init()
assert repo.is_repo('.')
def test_git_summary(tmp_git):
assert (repo.get_git_summary('.') == repo._run_command(
'.', command='git show --oneline -s'))
def test_git_hash_on_clean_commit(tmp_git):
assert repo.git_hash('.') == repo._run_command(
'.', command='git describe --always')
def test_git_hash_on_dirty_commit(tmp_git):
Path('another').touch()
subprocess.run(['git', 'add', '--all'])
hash_ = repo._run_command('.', command='git describe --always')
assert repo.git_hash('.') == f'{hash_}-dirty'
def test_git_hash_on_tag(tmp_git):
subprocess.run(['git', 'tag', 'my-tag'])
assert repo.git_hash('.') == 'my-tag'
def test_git_location_branch_tip(tmp_git):
subprocess.run(['git', 'tag', 'my-tag'])
assert repo.git_location('.') == 'mybranch'
def test_git_location_detached_head(tmp_git):
Path('another').touch()
subprocess.run(['git', 'add', '--all'])
subprocess.run(['git', 'commit', '-m', 'another'])
subprocess.run(['git', 'checkout', 'HEAD~1'])
hash_ = repo._run_command('.', command='git describe --always')
assert repo.git_location('.') == hash_
def test_git_location_detached_head_tag(tmp_git):
Path('another').touch()
subprocess.run(['git', 'add', '--all'])
subprocess.run(['git', 'commit', '-m', 'another'])
subprocess.run(['git', 'checkout', 'HEAD~1'])
subprocess.run(['git', 'tag', 'my-tag'])
assert repo.git_location('.') == 'my-tag'
def test_is_repo_git_not_installed(tmp_directory, monkeypatch):
monkeypatch.setattr(repo.shutil, 'which', lambda _: False)
git_init()
assert not repo.is_repo('')
def test_get_git_info():
git_info = repo.get_git_info('.')
assert set(git_info) == {
'git_summary', 'git_hash', 'git_diff', 'git_timestamp', 'git_branch',
'git_location'
}
``` |
{
"source": "aadityasp/Virtual_Mouse",
"score": 3
} |
#### File: aadityasp/Virtual_Mouse/virtualmouse.py
```python
import cv2
import numpy as np
import HandTracking as htm
import time
import autopy
cap = cv2.VideoCapture(0)
c_width, c_height = 640, 480
cap.set(3, c_width)
cap.set(4, c_height)
detector = htm.handDetector(maxHands=2)
# print(detector.maxHands)
xmin, xmax = int(c_width / 8), int(7 * c_width / 8)
ymin, ymax = int(c_height / 4), int(3 * c_height / 4)
bbox_default = xmin, ymin, xmax, ymax
# print("BBox[0]",bbox_default[0])
screen_width, screen_height = autopy.screen.size()
print(screen_width, screen_height)
click_flag = 0
smooth_factor = 5
# intersection = 0
x_before_smoothing, y_before_smoothing = 0, 0
x_after_smoothing, y_after_smoothing = 0, 0
def calculateIntersection(a0, a1, b0, b1):
if a0 >= b0 and a1 <= b1: # Contained
intersection = a1 - a0
elif a0 < b0 and a1 > b1: # Contains
intersection = b1 - b0
elif a0 < b0 < a1: # Intersects right
intersection = a1 - b0
elif a1 > b1 > a0: # Intersects left
intersection = b1 - a0
else: # No intersection (either side)
intersection = 0
return intersection
while True:
success, img = cap.read()
img = cv2.flip(img, 1)
# detector = htm.handDetector()
img = detector.findHands(img)
lmlst, bbox = detector.findPosition(img)
if bbox:
X0, Y0, X1, Y1, = bbox
AREA = float((X1 - X0) * (Y1 - Y0))
rectangle = [bbox_default]
# intersecting=[]
for x0, y0, x1, y1 in rectangle:
width = calculateIntersection(x0, x1, X0, X1)
height = calculateIntersection(y0, y1, Y0, Y1)
area = width * height
percent = area / AREA
# if percent >= 0.2:
# intersecting.append([x0, y0, x1, y1])
if percent >= 0.2:
# print("IN Active Range")
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2)
# else:
# print("NOT IN Active Range")
# print( percent, intersecting)
# if not ((bbox[0] > bbox_default[2] or bbox[2] < bbox_default[0]) and (
# bbox[1] > bbox_default[3] or bbox[3] < bbox_default[1])):
# print("IN Active Range")
# else:
# print("NOT IN Active Range")
# cv2.rectangle(img, (xmin-20 , ymin ), (xmax +20 , ymax ), (0, 0,255), 2) #active region
if len(lmlst) != 0:
x1, y1 = lmlst[8][1:]
x2, y2 = lmlst[12][1:]
fingers_upright = detector.fingersUp(img)
# print(fingers_upright)
if fingers_upright[1] == 1 and fingers_upright[2] == 1:
# print("Wid,height=", c_width, c_height)
new_x = np.interp(x1, (bbox_default[0], c_width - bbox_default[0]), (0, screen_width))
new_y = np.interp(y1, (bbox_default[1], c_height - bbox_default[1]), (0, screen_height))
x_after_smoothing = x_before_smoothing + (new_x - x_before_smoothing) / smooth_factor
y_after_smoothing = y_before_smoothing + (new_y - y_before_smoothing) / smooth_factor
cv2.circle(img, (x1, y1), 10, (123, 123, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 10, (123, 123, 255), cv2.FILLED)
# print("X!==", x1)
# print(new_x, new_y)
scale = autopy.screen.scale()
try:
# autopy.mouse.smooth_move(new_x/scale, new_y/scale)
autopy.mouse.move(x_after_smoothing, y_after_smoothing) # (new_x, new_y)
x_before_smoothing, x_before_smoothing = x_after_smoothing, y_after_smoothing
except:
print("OUT of Bounds")
joints = [[8, 6, 5]]
img, angle = detector.findAnglebetween(joints, detector.results, img)
# print("ANGLE==", angle)
try:
if angle < 160:
click_flag = 1
if click_flag == 1:
print("Clicking")
autopy.mouse.click()
click_flag = 0
except:
print("Unable to click")
cv2.imshow("Hand Tracking", img)
cv2.waitKey(1)
``` |
{
"source": "aadm/ASAP",
"score": 2
} |
#### File: aadm/ASAP/asap_library.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import bruges as b
from scipy.interpolate import interp1d
import os
topbox = dict(boxstyle='round', ec='none', fc='w', alpha=0.6)
format_tops={'fontsize':10, 'color':'blue', 'ha':'right', 'bbox':topbox}
format_title={'fontsize':14, 'weight':'bold'}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def contactcement(K0, G0, phi, phi_c=0.4, Cn=8.6, Kc=37, Gc=45, scheme=2):
PR0=(3*K0-2*G0)/(6*K0+2*G0)
PRc = (3*Kc-2*Gc)/(6*Kc+2*Gc)
if scheme == 1: # scheme 1: cement deposited at grain contacts
alpha = ((phi_c-phi)/(3*Cn*(1-phi_c))) ** (1/4)
else: # scheme 2: cement evenly deposited on grain surface
alpha = ((2*(phi_c-phi))/(3*(1-phi_c)))**(1/2)
LambdaN = (2*Gc*(1-PR0)*(1-PRc)) / (np.pi*G0*(1-2*PRc))
N1 = -0.024153*LambdaN**-1.3646
N2 = 0.20405*LambdaN**-0.89008
N3 = 0.00024649*LambdaN**-1.9864
Sn = N1*alpha**2 + N2*alpha + N3
LambdaT = Gc/(np.pi*G0)
T1 = -10**-2*(2.26*PR0**2+2.07*PR0+2.3)*LambdaT**(0.079*PR0**2+0.1754*PR0-1.342)
T2 = (0.0573*PR0**2+0.0937*PR0+0.202)*LambdaT**(0.0274*PR0**2+0.0529*PR0-0.8765)
T3 = 10**-4*(9.654*PR0**2+4.945*PR0+3.1)*LambdaT**(0.01867*PR0**2+0.4011*PR0-1.8186)
St = T1*alpha**2 + T2*alpha + T3
K_DRY = 1/6*Cn*(1-phi_c)*(Kc+(4/3)*Gc)*Sn
G_DRY = 3/5*K_DRY+3/20*Cn*(1-phi_c)*Gc*St
return K_DRY, G_DRY
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def constantcement(K0, G0, phi, phi_cem=0.38, phi_c=0.4, Cn=8.6, Kc=37, Gc=45, scheme=2):
# contact cement model
K_HI, G_HI = contactcement(K0, G0, phi, phi_c=phi_c, Cn=Cn, Kc=Kc, Gc=Gc, scheme=scheme)
# lower bound Hashin-Shtrikman starting from phi_cem
Kcc, Gcc = contactcement(K0, G0, phi_cem, phi_c=phi_c, Cn=Cn, Kc=Kc, Gc=Gc, scheme=scheme)
K_LO = -4/3*Gcc + (((phi/phi_cem)/(Kcc+4/3*Gcc)) + ((1-phi/phi_cem)/(K0+4/3*Gcc)))**-1
tmp = Gcc/6*((9*Kcc+8*Gcc) / (Kcc+2*Gcc))
G_LO= -tmp + ((phi/phi_cem)/(Gcc+tmp) + ((1-phi/phi_cem)/(G0+tmp)))**-1
# initialize empty vectors for K and G dry
K_DRY, G_DRY=(np.full(phi.size, np.nan) for _ in range(2))
# for porosities>phi_cem use [K,G]_HI = contact cement model
# for porosities<=phi_cem use [K,G]_LO = constant cement model
K_DRY[phi>phi_cem]=K_HI[phi>phi_cem]
K_DRY[phi<=phi_cem]=K_LO[phi<=phi_cem]
G_DRY[phi>phi_cem]=G_HI[phi>phi_cem]
G_DRY[phi<=phi_cem]=G_LO[phi<=phi_cem]
return K_DRY, G_DRY
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def inccement(K0, G0, phi, phi_cem=0.38, phi_c=0.4, Cn=8.6, Kc=37, Gc=45, scheme=2):
Kcc, Gcc = contactcement(K0, G0, phi_cem, phi_c=phi_c, Cn=Cn, Kc=Kc, Gc=Gc, scheme=scheme)
K_DRY = -4/3*G0 + (((phi/phi_cem)/(Kcc+4/3*G0)) + ((1-phi/phi_cem)/(K0+4/3*G0)))**-1
tmp = G0/6*((9*K0+8*G0) / (K0+2*G0))
G_DRY = -tmp + ((phi/phi_cem)/(Gcc+tmp) + ((1-phi/phi_cem)/(G0+tmp)))**-1
return K_DRY, G_DRY
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def vels(k_dry,mu_dry,k_min,rho_min,k_fl,rho_fl,phi):
# converts all inputs to SI (density in kg/m3 and moduli in Pa)
KD = k_dry*1e9
GD = mu_dry*1e9
K0 = k_min*1e9
D0 = rho_min*1e3
Kf = k_fl*1e9
Df = rho_fl*1e3
rho = D0*(1-phi)+Df*phi
K = KD + (1-KD/K0)**2 / ( (phi/Kf) + ((1-phi)/K0) - (KD/K0**2) )
vp = np.sqrt((K+4/3*GD)/rho)
vs = np.sqrt(GD/rho)
return vp, vs, rho/1e3, K/1e9
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def blok_plot(REF,BLK,ztop=None,zbot=None):
if ztop is None: ztop = BLK.index.min()
if zbot is None: zbot = BLK.index.max()
REF=REF[(REF.index>=ztop) & (REF.index<=zbot)]
REF.columns=REF.columns.str.upper()
BLK.columns=BLK.columns.str.upper()
f, ax = plt.subplots(nrows=1,ncols=5,sharey=True,figsize=(8,5))
ax[0].plot(REF.VSH, REF.index, color='.5')
ax[0].set_xlabel('Vsh', color='.5')
ax[1].plot(REF.PHIE, REF.index, color='.5')
ax[1].set_xlabel('phi', color='.5')
ax[2].plot(REF.VP_FRMB, REF.index, color='black')
ax[2].plot(BLK.VP, BLK.index, color='red', lw=4)
ax[2].set_xlabel('Vp [m/s]')
ax[3].plot(REF.VS_FRMB, REF.index, color='black')
ax[3].plot(BLK.VS, BLK.index, color='red', lw=4)
ax[3].set_xlabel('Vs [m/s]')
ax[4].plot(REF.RHO_FRMB, REF.index, color='black')
ax[4].plot(BLK.RHO, BLK.index, color='red', lw=4)
ax[4].set_xlabel('Density [g/cc]')
ax[0].set_ylim(zbot,ztop)
plt.tight_layout()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def generate_wells(WW,top_blocks,name,output_dir):
# top_blocks is an array with 4 values defining 3 blocks:
# block 1 = shallow block, caprock
# block 2 = central block, reservoir
# block 3 = deep block
z = WW.index.values
nblks = np.size(top_blocks)-1
# compute and store average vp,vs,rho for fluids 1 (brine) and 2 (oil)
vp1_k,vs1_k,rho1_k,vp2_k,vs2_k,rho2_k,phi_k = (np.zeros(nblks) for _ in range(7))
for nn in range(nblks):
rr = (WW.index>=top_blocks[nn]) & (WW.index<top_blocks[nn+1])
vp1_k[nn] = WW.VP_FRMB[rr].mean()
vs1_k[nn] = WW.VS_FRMB[rr].mean()
rho1_k[nn] = WW.RHO_FRMB[rr].mean()
vp2_k[nn] = WW.VP_FRMO[rr].mean()
vs2_k[nn] = WW.VS_FRMO[rr].mean()
rho2_k[nn] = WW.RHO_FRMO[rr].mean()
# range of variations for thickness (vTH), water saturation (vSW) and porosity (vPH)
vTH= np.linspace(0,60,4)
vSW= np.linspace(0,1,5)
vPH = [-.1, -.05, 0, .05]
# output depths will be within top block 1 and base block 3
z_out = z[(z>=top_blocks[0]) & (z<=top_blocks[3])]
# mm0 = to select depths above reservoir
mm0 = (z_out>=top_blocks[0]) & (z_out<top_blocks[1])
# loop over thickness variations
for tt in vTH:
# initialize vp,vs,rho for both fluids with values taken from block 3
vp1_TH = np.ones(z_out.size)*vp1_k[2]
vs1_TH = np.ones(z_out.size)*vs1_k[2]
rho1_TH = np.ones(z_out.size)*rho1_k[2]
vp2_TH = np.ones(z_out.size)*vp2_k[2]
vs2_TH = np.ones(z_out.size)*vs2_k[2]
rho2_TH = np.ones(z_out.size)*rho2_k[2]
# sets vp,vs,rho for block 1
vp1_TH[mm0] = vp1_k[0]
vs1_TH[mm0] = vs1_k[0]
rho1_TH[mm0] = rho1_k[0]
vp2_TH[mm0] = vp2_k[0]
vs2_TH[mm0] = vs2_k[0]
rho2_TH[mm0] = rho2_k[0]
# mm1 = to select depths in block 2=reservoir (base varies with thickness variations)
mm1 = (z_out>=top_blocks[1]) & (z_out<top_blocks[1]+tt)
# sets vp,vs,rho for block 2
vp1_TH[mm1] = vp1_k[1]
vs1_TH[mm1] = vs1_k[1]
rho1_TH[mm1] = rho1_k[1]
vp2_TH[mm1] = vp2_k[1]
vs2_TH[mm1] = vs2_k[1]
rho2_TH[mm1] = rho2_k[1]
# loop over water saturation variations
for ss in vSW:
# modify velocities and density according to saturation changes
# assume a linear change from brine to oil filled rock
vp_SW = ss*vp1_TH + (1-ss)*vp2_TH
vs_SW = ss*vs1_TH + (1-ss)*vs2_TH
rho_SW = ss*rho1_TH + (1-ss)*rho2_TH
# loop over porosity variations
for pp in vPH:
# modify velocities and density according to porosity changes
# assume linear porosity-velocity ratios in the limited range of study
# relations are given by rock physics study
dvp_b = -pp*4500
dvp_o = -pp*4500
dvs_b = -pp*3100
dvs_o = -pp*3050
dvs = ss*dvs_b + (1-ss)*dvs_o
dvp = ss*dvp_b + (1-ss)*dvp_o
drho_b = (2.65-1.0)*-pp
drho_o = (2.65-0.8)*-pp
drho = ss*drho_b + (1-ss)*drho_o
# calculate new parameters with varying saturations
vp_out=np.copy(vp_SW); vs_out=np.copy(vs_SW); rho_out=np.copy(rho_SW);
vp_out[mm1] = vp_SW[mm1] + dvp
vs_out[mm1] = vs_SW[mm1] + dvs
rho_out[mm1] = rho_SW[mm1] + drho
d_out = {'DEPTH':z_out, 'VP': vp_out, 'VS': vs_out, 'RHO': rho_out}
df_out = pd.DataFrame(data=d_out)
filename = '{:s}_Z{:02d}_Sw{:03d}_Por{:02d}'.format(name,int(np.round(tt)),int(100*ss),int((0.3+pp)*100))
df_out.to_csv(output_dir+'/'+filename, index=False)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_well_files(wells_dir, name):
well_files = []
for dirpath, _, filenames in os.walk(wells_dir):
well_files += [os.path.join(dirpath, f) for f in filenames if f.startswith(name)]
return well_files
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_nears_fars(df):
sample_size = 64
number_of_splits = abs(df['NEAR'].size/64)
nears = np.array_split(df['NEAR'].values, number_of_splits)
fars = np.array_split(df['FAR'].values, number_of_splits)
nears = np.asarray(nears).transpose()
fars = np.asarray(fars).transpose()
return nears, fars
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_twt(tdr,z):
tt=tdr[:,1]
zz=tdr[:,0]
d2t = interp1d(zz, tt, kind='linear', bounds_error=False, fill_value='extrapolate')
return d2t(z)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def make_synt(WW,ang,wavelet,method='shuey'):
'''
WW: Pandas dataframe with VP, VS, RHO (optionally EPSILON and DELTA)
ang: angle range, define with ang=np.arange(0,50,1)
wavelet
method: 'shuey' (Shuey 2-terms), 'shuey3' (Shuey 3-terms),
'aki' (Aki-Richards)
'''
WW.columns=WW.columns.str.upper()
uvp, lvp = WW.VP.values[:-1], WW.VP.values[1:]
uvs, lvs = WW.VS.values[:-1], WW.VS.values[1:]
urho, lrho = WW.RHO.values[:-1], WW.RHO.values[1:]
z=WW.index.values # z is two-way-time
synt = np.zeros((z.size,ang.size))
#--> calculate reflectivities with AVO equation,
#--> convolve with input wavelet and fill in traces of synthetic seismogram
for i,alpha in enumerate(ang):
if method is 'shuey':
RC = shuey(uvp,uvs,urho,lvp,lvs,lrho,alpha)
elif method is 'shuey3':
RC = shuey(uvp,uvs,urho,lvp,lvs,lrho,alpha,approx=False)
else:
RC = akirichards(uvp,uvs,urho,lvp,lvs,lrho,alpha)
RC = np.append(np.nan, RC)
RC = np.nan_to_num(RC)
synt[:,i] = np.convolve(RC, wavelet, mode='same')
return RC, synt
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def plot_synt(WW,synt,ztop,zbot,gain=10):
'''
WW: Pandas dataframe with VP, VS, RHO (in time)
synt: synthetic seismogram computed with make_synt (in time)
ztop,zbot: display window
gain: multiplier to be applied to wiggles (default=5)
method: 'shuey' (Shuey 2-terms), 'shuey3' (Shuey 3-terms),
'aki' (Aki-Richards)
'''
WW.columns=WW.columns.str.upper()
it1=np.abs(WW.index-ztop).argmin()
it2=np.abs(WW.index-zbot).argmin()
ss = synt[it1:it2,:]
clip=np.abs(synt.max())
f,ax=plt.subplots(nrows=1,ncols=5)
opz1={'color':'k','linewidth':.5}
opz2={'linewidth':0, 'alpha':0.6}
ax[0].plot(WW.VP*WW.RHO,WW.index,'-k')
ax[1].plot(WW.VP/WW.VS,WW.index,'-k')
ax[2].plot(synt[:,0],WW.index,'-k')
ax[3].plot(synt[:,1],WW.index,'-k')
im=ax[4].imshow(ss,interpolation='none', cmap='Greys',aspect='auto')
cbar=plt.colorbar(im, ax=ax[4])
ax[0].set_xlabel('AI [m/s*g/cc]')
ax[0].set_ylabel('TWT [s]')
ax[1].set_xlabel('Vp/Vs')
ax[2].set_title('Near')
ax[3].set_title('Far')
ax[4].set_title('Near|Far')
for aa in ax[:4]:
aa.set_ylim(zbot,ztop)
aa.grid()
for aa in ax[1:]:
aa.set_yticklabels([])
for aa in ax[2:4]:
aa.set_xlim(-clip,+clip)
aa.set_xticklabels([])
for aa in ax[:2]:
aa.xaxis.tick_top()
plt.setp(aa.xaxis.get_majorticklabels(), rotation=90)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def td(WW,sr=0.1524,KB=0,WD=0,repl_vel=1600):
'''
td (C) aadm 2016
Calculates time-depth relation by sonic integration.
INPUT
WW: Pandas dataframe
sr: depth sampling rate (m, default 0.1524 m = half-foot)
KB: kelly bushing elevation (m, default 0)
WD: water depth (m, default 0)
repl_vel: replacement velocity for overburden i.e. interval between seafloor and beginning of the logs
(m/s, default 1600)
OUTPUT
numpy array with 2 columns: 0=depth, 1=twt (secs)
'''
WW.columns=WW.columns.str.upper()
if 'TVD' in WW.columns:
depth = WW.TVD.values
else:
depth = WW.index.values
WW.VP.interpolate(inplace=True)
sonic=1/WW.VP.values # VP in m/s
start = depth.min()
water_vel = 1480
wb_twt = 2.0*WD/water_vel
sonic_start=depth[np.isfinite(sonic)].min()
sonic_start_twt=2*(sonic_start-KB-WD)/repl_vel + wb_twt
scaled_sonic = sr*sonic[depth>=sonic_start]
twt = 2*np.cumsum(scaled_sonic) + sonic_start_twt
print('[TD] water bottom two-way-time: {:.3f} [s]'.format(wb_twt))
print('[TD] sonic log start: {:.3f} [m] = {:.3f} [s]'.format(sonic_start, sonic_start_twt))
print('[TD] computed twt scale range: {:.3f}-{:.3f} [s]'.format(twt.min(),twt.max()))
return np.column_stack((depth[depth>=sonic_start],twt))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def welltime(WW,tdr,dt=0.001,ztop=None,zbot=None,name=None,tops=None,qcplot=True):
'''
welltime (C) aadm 2016
Converts logs sampled in depth to time using a reference time-depth function.
Load existing t-d with tdr=np.loadtxt('TD.dat', delimiter=',')
or use squit.well.td to create one.
INPUT
WW: Pandas dataframe
tdr: time-depth table: numpy array shape Nx2, column 0 = MD [m], column 1 = TWT [s]
dt: sample rate in seconds
ztop,zbot: display window (defaults to min,max depth)
name: well name (or anything else) to print
tops: dictionary containing stratigraphic tops, e.g.: tops={'Trias': 1200,'Permian': 2310}
or Pandas Series, e.g: tops=pd.Series({'Trias': 1200,'Permian': 2310})
OUTPUT
Pandas dataframe with logs sampled in time
ztop, zbot converted to TWT
'''
WW.columns=WW.columns.str.upper()
flagfrm=True if 'VP_FRMB' in WW.columns else False
z = WW.index
if ztop is None: ztop = z.min()
if zbot is None: zbot = z.max()
flagtops=False if tops is None else True
if flagtops:
if not isinstance(tops, pd.Series):
tops=pd.Series(tops)
tops=tops.dropna().sort_values()
#--> load depth-time relationship (depth is MD)
tt=tdr[:,1]
zz=tdr[:,0]
#--> twt reference log sampled like depth reference log
# twt_log = np.interp(z, zz, tt, left=np.nan, right=np.nan)
ff=(z>=zz.min()) & (z<=zz.max())
twt_log = np.interp(z[ff], zz, tt, left=np.nan, right=np.nan)
#--> interpolant to convert depths to times on the fly (e.g., tops)
d2t = interp1d(zz, tt, kind='linear', bounds_error=False, fill_value='extrapolate')
if qcplot:
print('[WELLTIME] plot window top, bottom [m]:{:.0f}-{:.0f}, [s]:{:.4f}-{:.4f}'.format(ztop,zbot,float(d2t(ztop)),float(d2t(zbot))))
#--> regularly-sampled twt scale and its depth (MD) equivalent on the basis of depth-time rel.
twt = np.arange(0, tt.max(), dt)
zt = np.interp(x=twt, xp=tt, fp=zz, left=np.nan, right=np.nan)
#--> resample logs to twt
WWt=pd.DataFrame(data=zt, columns=['DEPTH'], index=twt)
WWt.index.rename('TWT',inplace=True)
loglist=WW.columns
for i in loglist:
tmp = np.interp(x=twt, xp=twt_log, fp=WW[i][ff].values, left=np.NaN, right=np.NaN)
WWt=pd.concat([WWt,pd.Series(tmp, index=twt, name=i)],axis=1)
WWt.interpolate(inplace=True)
WWt.fillna(method = 'bfill',inplace=True)
#--> QC plot with IP in depth and time
if qcplot:
tmp_IP = WW['VP']*WW['RHO']
tmp_IPt = WWt['VP']*WWt['RHO']
plotmax = tmp_IP[(z>=ztop) & (z<=zbot)].max()
plotmin = tmp_IP[(z>=ztop) & (z<=zbot)].min()
plotmax += plotmax*.1
plotmin -= plotmin*.1
f, ax = plt.subplots(nrows=1,ncols=2,figsize=(5,5), facecolor='w')
ax[0].plot(tmp_IP, z, '-k')
ax[1].plot(tmp_IPt, WWt.index, '-k')
ax[0].set_xlabel('AI [m/s*g/cc]'), ax[0].set_ylabel('MD [m]')
ax[1].set_xlabel('AI [m/s*g/cc]'), ax[1].set_ylabel('TWT [s]')
ax[1].yaxis.set_label_position('right')
ax[1].yaxis.set_ticks_position('right')
ax[0].set_ylim(ztop,zbot)
ax[1].set_ylim(d2t(ztop),d2t(zbot))
for aa in ax.flatten():
aa.invert_yaxis()
aa.grid()
aa.xaxis.tick_top()
plt.setp(aa.xaxis.get_majorticklabels(), rotation=90, fontsize=8)
# aa.set_xlim(plotmin,plotmax)
if flagtops: # plot top markers on all columns
for topn,topz in tops.iteritems():
if (topz>=ztop) & (topz<=zbot):
ax[0].axhline(y=topz,color=color_top,alpha=alpha_top)
ax[0].text(x=plotmax,y=topz,s=topn,**format_tops)
ax[1].axhline(y=d2t(topz),color=color_top,alpha=alpha_top)
ax[1].text(x=plotmax,y=d2t(topz),s=topn,**format_tops)
if name is not None:
plt.suptitle(name, **format_title)
plt.tight_layout()
return WWt,float(d2t(ztop)),float(d2t(zbot))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def shuey(vp1, vs1, rho1, vp2, vs2, rho2, theta, approx=True, terms=False):
'''
shuey (C) aadm 2016
Calculates P-wave reflectivity with Shuey's equation
reference:
Avseth et al. (2005), Quantitative Seismic Interpretation, Cambridge University Press (p.182)
INPUT
vp1, vs1, rho1: P-, S-wave velocity (m/s) and density (g/cm3) of upper medium
vp2, vs2, rho2: P-, S-wave velocity (m/s) and density (g/cm3) of lower medium
theta: angle of incidence (degree)
approx: returns approximate (2-term) form (default: True)
terms: returns reflectivity, intercept and gradient (default: False)
OUTPUT
reflectivity (and optionally intercept, gradient; see terms option) at angle theta
'''
a = np.radians(theta)
dvp = vp2-vp1
dvs = vs2-vs1
drho = rho2-rho1
vp = np.mean([vp1,vp2])
vs = np.mean([vs1,vs2])
rho = np.mean([rho1,rho2])
R0 = 0.5*(dvp/vp + drho/rho)
G = 0.5*(dvp/vp) - 2*(vs**2/vp**2)*(drho/rho+2*(dvs/vs))
F = 0.5*(dvp/vp)
if approx:
R = R0 + G*np.sin(a)**2
else:
R = R0 + G*np.sin(a)**2 + F*(np.tan(a)**2-np.sin(a)**2)
if terms:
return R,R0,G
else:
return R
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def akirichards(vp1, vs1, rho1, vp2, vs2, rho2, theta):
'''
Aki-Richards (C) aadm 2017
Calculates P-wave reflectivity with Aki-Richards approximate equation
only valid for small layer contrasts.
reference:
Mavko et al. (2009), The Rock Physics Handbook, Cambridge University Press (p.182)
INPUT
vp1, vs1, rho1: P-, S-wave velocity (m/s) and density (g/cm3) of upper medium
vp2, vs2, rho2: P-, S-wave velocity (m/s) and density (g/cm3) of lower medium
theta: angle of incidence (degree)
OUTPUT
reflectivity at angle theta
'''
a = np.radians(theta)
p = np.sin(a)/vp1
dvp = vp2-vp1
dvs = vs2-vs1
drho = rho2-rho1
vp = np.mean([vp1,vp2])
vs = np.mean([vs1,vs2])
rho = np.mean([rho1,rho2])
A = 0.5*(1-4*p**2*vs**2)*drho/rho
B = 1/(2*np.cos(a)**2) * dvp/vp
C = 4*p**2*vs**2*dvs/vs
R = A + B - C
return R
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def quicklook(WW,ztop=None,zbot=None,name=None,tops=None):
'''
quicklook (C) aadm 2015-2018
Summary well plot with raw and processed logs.
INPUT
WW: Pandas dataframe with VP, [VS], RHO, IP, [VPVS or PR], [SWE], PHIE, VSH
ztop,zbot: depth range to plot (defaults to min,max depth)
name: well name (or anything else) to print
tops: dictionary containing stratigraphic tops, e.g.: tops={'Trias': 1200,'Permian': 2310}
or Pandas Series, e.g: tops=pd.Series({'Trias': 1200,'Permian': 2310})
'''
if ztop is None: ztop = WW.index.min()
if zbot is None: zbot = WW.index.max()
WW=WW[(WW.index>=ztop) & (WW.index<=zbot)]
WW.columns=WW.columns.str.upper()
flagtops=False if tops is None else True
if flagtops:
if not isinstance(tops, pd.Series):
tops=pd.Series(tops)
tops=tops.dropna().sort_values()
f, ax = plt.subplots(nrows=1,ncols=4,sharey=True,figsize=(8,5))
ax[0].plot(WW.VSH, WW.index, color='.5')
ax[0].set_xlabel('Vsh', color='.5')
ax[1].plot(WW.PHIE, WW.index, color='.5')
ax[1].set_xlabel('phi', color='.5')
ax[2].plot(WW.VP*WW.RHO, WW.index, 'black')
ax[2].set_xlabel('AI [m/s*g/cc]')
ax[3].plot(WW.VP/WW.VS, WW.index, 'black')
ax[3].set_xlabel('Vp/Vs')
rlims = ax[3].get_xlim()
for i,aa in enumerate(ax):
if flagtops: # plot top markers on all columns
for topn,topz in tops.iteritems():
if (topz>=ztop) & (topz<=zbot):
aa.axhline(y=topz,color='blue',alpha=.8)
if i is 3: # plot top name also on last column
aa.text(x=rlims[1]+rlims[1]*.01,y=topz,s=topn,**format_tops)
if name is not None:
plt.suptitle(name, **format_title)
ax[0].set_ylim(zbot,ztop)
``` |
{
"source": "aadottori/AutomatizandoFaturas",
"score": 3
} |
#### File: aadottori/AutomatizandoFaturas/enel.py
```python
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
def enel(codigo_cliente, cpf_cliente):
firefox = webdriver.Firefox()
firefox.get('https://www.eneldistribuicao.com.br/rj/LoginAcessoRapidoSegundaVia.aspx')
path_codigo_cliente = "ctl00$CONTENT$NumeroCliente"
path_cpf_cliente = "ctl00$CONTENT$Documento"
path_ok = "ctl00$CONTENT$Ok"
firefox.find_element_by_name(path_codigo_cliente).send_keys(codigo_cliente)
firefox.find_element_by_name(path_cpf_cliente).send_keys(cpf_cliente)
firefox.find_element_by_name(path_ok).click()
time.sleep(3)
status_ultima_fatura = '/html/body/form/div[4]/div/div/div[2]/div[2]/div[6]/div/div/table/tbody/tr[2]/td[8]'
status = firefox.find_element_by_xpath(status_ultima_fatura).text
if status == "EM ABERTO":
checkbox_ultima_fatura = 'ctl00$CONTENT$segviarapida$GridViewSegVia$ctl02$CheckFatura'
firefox.find_element_by_name(checkbox_ultima_fatura).click()
time.sleep(3)
salvar_em_pdf = 'ctl00$CONTENT$segviarapida$btnSalvarPDF'
firefox.find_element_by_name(salvar_em_pdf).click()
else:
return "Nada a pagar."
``` |
{
"source": "aad/playground",
"score": 4
} |
#### File: python/mars_rover_kata/rover.py
```python
class Position:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
class Grid:
def __init__(self, x=50, y=50, obstacles=None):
self.x = x
self.y = y
self.obstacles = obstacles if obstacles else []
class Rover:
def __init__(self, position=Position(), aspect="N"):
self.position = position
self.aspect = aspect
self.commands = None
self.grid = None
self.f = self.move_forward
self.b = self.move_backward
self.l = self.turn_left
self.r = self.turn_right
def load_grid(self, grid):
self.grid = grid
def get_position(self):
return (self.position.x, self.position.y)
def receive_commands(self, commands):
self.commands = commands if isinstance(commands, list) else list(commands)
def detect_obstacles(self, next_move):
next_x = self.position.x + next_move[0]
next_y = self.position.y + next_move[1]
if [next_x, next_y] in self.grid.obstacles:
raise Exception(f"obstacle {[next_x, next_y]} detected")
def guess_next_move(self, direction):
forward_aspect_mapping = {
"N": [0, 1],
"S": [0, -1],
"W": [-1, 0],
"E": [1, 0]
}
return {
"f": forward_aspect_mapping[self.aspect],
"b": [-i for i in forward_aspect_mapping[self.aspect]],
}[direction]
def get_next_aspect(self, direction):
turn_aspect_mapping = {
"N": ["W", "E"],
"S": ["E", "W"],
"W": ["S", "N"],
"E": ["N", "S"],
}
return {
"l": turn_aspect_mapping[self.aspect][0],
"r": turn_aspect_mapping[self.aspect][1],
}[direction]
def move_forward(self):
# self.position.x, self.position.y += self.guess_next_move("f")
next_move = self.guess_next_move("f")
self.detect_obstacles(next_move)
self.position.x += next_move[0]
self.position.y += next_move[1]
def move_backward(self):
next_move = self.guess_next_move("b")
self.detect_obstacles(next_move)
self.position.x += next_move[0]
self.position.y += next_move[1]
def turn_left(self):
self.aspect = self.get_next_aspect("l")
def turn_right(self):
self.aspect = self.get_next_aspect("r")
def rove(self):
for command in self.commands:
method_name = getattr(self, command)
if callable(method_name):
method_name()
``` |
{
"source": "aadps/kaldi",
"score": 2
} |
#### File: s10/chain/inference.py
```python
import logging
import os
import sys
import math
import torch
from torch.utils.dlpack import to_dlpack
import kaldi
from common import load_checkpoint
from common import setup_logger
from device_utils import allocate_gpu_devices
from feat_dataset import get_feat_dataloader
from model import get_chain_model
from options import get_args
def main():
args = get_args()
setup_logger('{}/log-inference'.format(args.dir), args.log_level)
logging.info(' '.join(sys.argv))
if torch.cuda.is_available() == False:
logging.warning('No GPU detected! Use CPU for inference.')
device = torch.device('cpu')
else:
if args.device_ids != None and len(args.device_ids) > 0:
device_id = args.device_ids[0]
else:
devices = allocate_gpu_devices(1)
if len(devices) != 1:
logging.error('Allocate GPU failed!')
sys.exit(-1)
device_id = devices[0][0]
logging.info('device: {}'.format(device_id))
device = torch.device('cuda', device_id)
model = get_chain_model(
feat_dim=args.feat_dim,
output_dim=args.output_dim,
ivector_dim=args.ivector_dim,
lda_mat_filename=args.lda_mat_filename,
hidden_dim=args.hidden_dim,
bottleneck_dim=args.bottleneck_dim,
prefinal_bottleneck_dim=args.prefinal_bottleneck_dim,
kernel_size_list=args.kernel_size_list,
subsampling_factor_list=args.subsampling_factor_list)
load_checkpoint(args.checkpoint, model)
model.to(device)
model.eval()
specifier = 'ark,scp:{filename}.ark,{filename}.scp'.format(
filename=os.path.join(args.dir, 'nnet_output'))
if args.save_as_compressed:
Writer = kaldi.CompressedMatrixWriter
Matrix = kaldi.CompressedMatrix
else:
Writer = kaldi.MatrixWriter
Matrix = kaldi.FloatMatrix
writer = Writer(specifier)
dataloader = get_feat_dataloader(
feats_scp=args.feats_scp,
ivector_scp=args.ivector_scp,
model_left_context=args.model_left_context,
model_right_context=args.model_right_context,
batch_size=32,
num_workers=10)
subsampling_factor = 3
subsampled_frames_per_chunk = args.frames_per_chunk // subsampling_factor
for batch_idx, batch in enumerate(dataloader):
key_list, padded_feat, output_len_list = batch
padded_feat = padded_feat.to(device)
with torch.no_grad():
nnet_output, _ = model(padded_feat)
num = len(key_list)
first = 0
for i in range(num):
key = key_list[i]
output_len = output_len_list[i]
target_len = math.ceil(output_len / subsampled_frames_per_chunk)
result = nnet_output[first:first + target_len, :, :].split(1, 0)
value = torch.cat(result, dim=1)[0, :output_len, :]
value = value.cpu()
first += target_len
m = kaldi.SubMatrixFromDLPack(to_dlpack(value))
m = Matrix(m)
writer.Write(key, m)
if batch_idx % 10 == 0:
logging.info('Processed batch {}/{} ({:.6f}%)'.format(
batch_idx, len(dataloader),
float(batch_idx) / len(dataloader) * 100))
writer.Close()
logging.info('pseudo-log-likelihood is saved to {}'.format(
os.path.join(args.dir, 'nnet_output.scp')))
if __name__ == '__main__':
main()
```
#### File: pybind/decoder/lattice_faster_decoder_pybind_test.py
```python
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
import unittest
import kaldi
class TestLatticeFasterDecoder(unittest.TestCase):
def test_lattice_faster_decoder_config(self):
opts = kaldi.LatticeFasterDecoderConfig()
print('default value for LatticeFasterDecoderConfig:')
print(opts)
def test_lattice_faster_decoder_config_parse_options(self):
usage = 'testing'
parse_options = kaldi.ParseOptions(usage)
argv = [
'a.out', '--print-args=false', '--beam=20', '--max-active=7000',
'a.scp', 'b.scp'
]
opts = kaldi.LatticeFasterDecoderConfig()
opts.Register(parse_options)
parse_options.Read(argv)
self.assertEqual(parse_options.NumArgs(), 2)
self.assertEqual(parse_options.GetArg(1), 'a.scp')
self.assertEqual(parse_options.GetArg(2), 'b.scp')
self.assertEqual(opts.beam, 20)
self.assertEqual(opts.max_active, 7000)
if __name__ == '__main__':
unittest.main()
```
#### File: pybind/feat/wave_reader_pybind_test.py
```python
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
import unittest
import numpy as np
import kaldi
class TestWaveData(unittest.TestCase):
def test_duration(self):
waveform = kaldi.FloatMatrix(1, 16000)
wave_data = kaldi.feat.WaveData(samp_freq=16000, data=waveform)
self.assertEqual(1, wave_data.Duration())
if __name__ == '__main__':
unittest.main()
```
#### File: pybind/fst/vector_fst_pybind_test.py
```python
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
import unittest
import kaldi
from kaldi import fst
class TestStdVectorFst(unittest.TestCase):
def test_std_vector_fst(self):
vector_fst = fst.StdVectorFst()
# create the same FST from
# http://www.openfst.org/twiki/bin/view/FST/FstQuickTour#Creating%20FSTs%20Using%20Constructors
# 1st state will be state 0 (returned by AddState)
vector_fst.AddState()
vector_fst.SetStart(0)
vector_fst.AddArc(0, fst.StdArc(1, 1, fst.TropicalWeight(0.5), 1))
vector_fst.AddArc(0, fst.StdArc(2, 2, fst.TropicalWeight(1.5), 1))
vector_fst.AddState()
vector_fst.AddArc(1, fst.StdArc(3, 3, fst.TropicalWeight(2.5), 2))
vector_fst.AddState()
vector_fst.SetFinal(2, fst.TropicalWeight(3.5))
# fstprint with default options
print(vector_fst)
print('-' * 20)
print('fstprint with customized options (default options)')
print(
vector_fst.ToString(is_acceptor=False,
show_weight_one=False,
fst_field_separator=" " * 6,
missing_symbol=""))
# now build the symbol table
input_words = '<eps> a b c'.split()
output_words = '<eps> x y z'.split()
isymbol_table = fst.SymbolTable()
for w in input_words:
isymbol_table.AddSymbol(w)
osymbol_table = fst.SymbolTable()
for w in output_words:
osymbol_table.AddSymbol(w)
vector_fst.SetInputSymbols(isyms=isymbol_table)
vector_fst.SetOutputSymbols(osyms=osymbol_table)
print(vector_fst)
# now for I/O
fst_filename = 'test.fst'
vector_fst.Write(filename=fst_filename)
read_back_fst = fst.StdVectorFst.Read(filename=fst_filename)
print('fst after reading back is:')
print(read_back_fst)
# TODO(fangjun): check that the two fsts are the same: start/final/states/arcs/symbol tables
# TODO(fangjun): add fstdraw support
# TODO(fangjun): test fstcompile
text_fst_str = read_back_fst.ToString()
compiled_filename = "compiled.fst"
fst.CompileFst(text_fst_str=text_fst_str,
out_binary_fst_filename=compiled_filename,
isymbols=isymbol_table,
osymbols=osymbol_table,
keep_isymbols=True,
keep_osymbols=True)
read_back_compiled_fst = fst.StdVectorFst.Read(
filename=compiled_filename)
print('-' * 20)
print('read back compiled fst is:')
print(read_back_compiled_fst)
os.remove(compiled_filename)
os.remove(fst_filename)
if __name__ == '__main__':
unittest.main()
```
#### File: pybind/kaldi/table.py
```python
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
import numpy as np
import kaldi_pybind
from kaldi_pybind.nnet3 import _SequentialNnetChainExampleReader
from kaldi_pybind.nnet3 import _RandomAccessNnetChainExampleReader
from kaldi_pybind.nnet3 import _NnetChainExampleWriter
from kaldi_pybind.nnet3 import _SequentialNnetExampleReader
from kaldi_pybind.nnet3 import _RandomAccessNnetExampleReader
from kaldi_pybind.feat import _SequentialWaveReader
from kaldi_pybind.feat import _RandomAccessWaveReader
from kaldi_pybind.feat import _SequentialWaveInfoReader
from kaldi_pybind.feat import _RandomAccessWaveInfoReader
from kaldi_pybind import _SequentialBaseFloatMatrixReader
from kaldi_pybind import _RandomAccessBaseFloatMatrixReader
from kaldi_pybind import _BaseFloatMatrixWriter
from kaldi_pybind import _SequentialBaseFloatVectorReader
from kaldi_pybind import _RandomAccessBaseFloatVectorReader
from kaldi_pybind import _BaseFloatVectorWriter
from kaldi_pybind import _CompressedMatrixWriter
from kaldi_pybind import _SequentialInt32VectorReader
from kaldi_pybind import _RandomAccessInt32VectorReader
from kaldi_pybind import _Int32VectorWriter
from kaldi_pybind import _SequentialLatticeReader
from kaldi_pybind import _RandomAccessLatticeReader
from kaldi_pybind import _LatticeWriter
from kaldi_pybind import _SequentialCompactLatticeReader
from kaldi_pybind import _RandomAccessCompactLatticeReader
from kaldi_pybind import _CompactLatticeWriter
################################################################################
# Sequential Readers
################################################################################
class _SequentialReaderBase(object):
'''Base class defining the Python API for sequential table readers.'''
def __init__(self, rspecifier=''):
'''
This class is used for reading objects sequentially from an archive or
script file. It implements the iterator protocol similar to how Python
implements iteration over dictionaries. Each iteration returns a `(key,
value)` pair from the table in sequential order.
Args:
rspecifier(str): Kaldi rspecifier for reading the table.
If provided, the table is opened for reading.
Raises:
IOError: If opening the table for reading fails.
'''
super(_SequentialReaderBase, self).__init__()
if rspecifier != '':
if not self.Open(rspecifier):
raise IOError('Error opening sequential table reader with '
'rspecifier: {}'.format(rspecifier))
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.IsOpen():
self.Close()
def __iter__(self):
while not self.Done():
key = self.Key()
value = self.Value()
yield key, value
self.Next()
def Open(self, rspecifier):
'''Opens the table for reading.
Args:
rspecifier(str): Kaldi rspecifier for reading the table.
If provided, the table is opened for reading.
Returns:
True if table is opened successfully, False otherwise.
Raises:
IOError: If opening the table for reading fails.
'''
return super(_SequentialReaderBase, self).Open(rspecifier)
def Done(self):
'''Indicates whether the table reader is exhausted or not.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Returns:
True if the table reader is exhausted, False otherwise.
'''
return super(_SequentialReaderBase, self).Done()
def Key(self):
'''Returns the current key.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Returns:
str: The current key.
'''
return super(_SequentialReaderBase, self).Key()
def FreeCurrent(self):
'''Deallocates the current value.
This method is provided as an optimization to save memory, for large
objects.
'''
super(_SequentialReaderBase, self).FreeCurrent()
def Value(self):
'''Returns the current value.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Returns:
The current value.
'''
return super(_SequentialReaderBase, self).Value()
def Next(self):
'''Advances the table reader.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
'''
super(_SequentialReaderBase, self).Next()
def IsOpen(self):
'''Indicates whether the table reader is open or not.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Returns:
True if the table reader is open, False otherwise.
'''
return super(_SequentialReaderBase, self).IsOpen()
def Close(self):
'''Closes the table.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Returns:
True if table is closed successfully, False otherwise.
'''
return super(_SequentialReaderBase, self).Close()
class SequentialNnetChainExampleReader(_SequentialReaderBase,
_SequentialNnetChainExampleReader):
'''Sequential table reader for nnet chain examples.'''
pass
class SequentialNnetExampleReader(_SequentialReaderBase,
_SequentialNnetExampleReader):
'''Sequential table reader for nnet examples.'''
pass
class SequentialWaveReader(_SequentialReaderBase, _SequentialWaveReader):
'''Sequential table reader for wave files.'''
pass
class SequentialWaveInfoReader(_SequentialReaderBase,
_SequentialWaveInfoReader):
'''Sequential table reader for wave file headers.'''
pass
class SequentialMatrixReader(_SequentialReaderBase,
_SequentialBaseFloatMatrixReader):
'''Sequential table reader for single precision matrices.'''
pass
class SequentialVectorReader(_SequentialReaderBase,
_SequentialBaseFloatVectorReader):
'''Sequential table reader for single precision vectors.'''
pass
class SequentialIntVectorReader(_SequentialReaderBase,
_SequentialInt32VectorReader):
'''Sequential table reader for integer sequences.'''
pass
class SequentialLatticeReader(_SequentialReaderBase, _SequentialLatticeReader):
'''Sequential table reader for lattices.'''
pass
class SequentialCompactLatticeReader(_SequentialReaderBase,
_SequentialCompactLatticeReader):
'''Sequential table reader for compact lattices.'''
pass
################################################################################
# Random Access Readers
################################################################################
class _RandomAccessReaderBase(object):
'''Base class defining the Python API for random access table readers.'''
def __init__(self, rspecifier=''):
'''
This class is used for randomly accessing objects in an archive or
script file. It implements `__contains__` and `__getitem__` methods to
provide a dictionary-like interface for accessing table entries. e.g.
`reader[key]` returns the `value` associated with the `key`.
Args:
rspecifier(str): Kaldi rspecifier for reading the table.
If provided, the table is opened for reading.
Raises:
IOError: If opening the table for reading fails.
'''
super(_RandomAccessReaderBase, self).__init__()
if rspecifier != '':
if not self.Open(rspecifier):
raise IOError('Error opening random access table reader with '
'rspecifier: {}'.format(rspecifier))
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.IsOpen():
self.Close()
def __contains__(self, key):
return self.HasKey(key)
def __getitem__(self, key):
if self.HasKey(key):
return self.Value(key)
else:
raise KeyError(key)
def Open(self, rspecifier):
'''Opens the table for reading.
Args:
rspecifier(str): Kaldi rspecifier for reading the table.
If provided, the table is opened for reading.
Returns:
True if table is opened successfully, False otherwise.
Raises:
IOError: If opening the table for reading fails.
'''
return super(_RandomAccessReaderBase, self).Open(rspecifier)
def HasKey(self, key):
'''Checks whether the table has the key.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Args:
key (str): The key.
Returns:
True if the table has the key, False otherwise.
'''
return super(_RandomAccessReaderBase, self).HasKey(key)
def Value(self, key):
'''Returns the value associated with the key.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Args:
key (str): The key.
Returns:
The value associated with the key.
'''
return super(_RandomAccessReaderBase, self).Value(key)
def IsOpen(self):
'''Indicates whether the table reader is open or not.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Returns:
True if the table reader is open, False otherwise.
'''
return super(_RandomAccessReaderBase, self).IsOpen()
def Close(self):
'''Closes the table.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Returns:
True if table is closed successfully, False otherwise.
'''
return super(_RandomAccessReaderBase, self).Close()
class RandomAccessNnetChainExampleReader(_RandomAccessReaderBase,
_RandomAccessNnetChainExampleReader):
'''Random access table reader for nnet chain examples.'''
pass
class RandomAccessNnetExampleReader(_RandomAccessReaderBase,
_RandomAccessNnetExampleReader):
'''Random access table reader for nnet examples.'''
pass
class RandomAccessWaveReader(_RandomAccessReaderBase, _RandomAccessWaveReader):
'''Random access table reader for wave files.'''
pass
class RandomAccessWaveInfoReader(_RandomAccessReaderBase,
_RandomAccessWaveInfoReader):
'''Random access table reader for wave file headers.'''
pass
class RandomAccessMatrixReader(_RandomAccessReaderBase,
_RandomAccessBaseFloatMatrixReader):
'''Random access table reader for single precision matrices.'''
pass
class RandomAccessVectorReader(_RandomAccessReaderBase,
_RandomAccessBaseFloatVectorReader):
'''Random access table reader for single precision vectors.'''
pass
class RandomAccessIntVectorReader(_RandomAccessReaderBase,
_RandomAccessInt32VectorReader):
'''Random access table reader for integer sequences.'''
pass
class RandomAccessLatticeReader(_RandomAccessReaderBase,
_RandomAccessLatticeReader):
'''Random access table reader for lattices.'''
pass
class RandomAccessCompactLatticeReader(_RandomAccessReaderBase,
_RandomAccessCompactLatticeReader):
'''Random access table reader for compact lattices.'''
pass
################################################################################
# Writers
################################################################################
class _WriterBase(object):
'''Base class defining the additional Python API for table writers.'''
def __init__(self, wspecifier=''):
'''
This class is used for writing objects to an archive or script file. It
implements the `__setitem__` method to provide a dictionary-like
interface for writing table entries, e.g. `writer[key] = value` writes
the pair `(key, value)` to the table.
Args:
wspecifier (str): Kaldi wspecifier for writing the table.
If provided, the table is opened for writing.
Raises:
IOError: If opening the table for writing fails.
'''
super(_WriterBase, self).__init__()
if wspecifier != '':
if not self.Open(wspecifier):
raise IOError(
'Error opening table writer with wspecifier: {}'.format(
wspecifier))
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.IsOpen():
self.Close()
def __setitem__(self, key, value):
self.Write(key, value)
def Open(self, wspecifier):
'''Opens the table for writing.
Args:
wspecifier(str): Kaldi wspecifier for writing the table.
If provided, the table is opened for writing.
Returns:
True if table is opened successfully, False otherwise.
Raises:
IOError: If opening the table for writing fails.
'''
return super(_WriterBase, self).Open(wspecifier)
def Flush(self):
'''Flushes the table contents to disk/pipe.'''
super(_WriterBase, self).Flush()
def Write(self, key, value):
'''Writes the `(key, value)` pair to the table.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Args:
key (str): The key.
value: The value.
'''
super(_WriterBase, self).Write(key, value)
def IsOpen(self):
'''Indicates whether the table writer is open or not.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Returns:
True if the table writer is open, False otherwise.
'''
return super(_WriterBase, self).IsOpen()
def Close(self):
'''Closes the table.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Returns:
True if table is closed successfully, False otherwise.
'''
return super(_WriterBase, self).Close()
class NnetChainExampleWriter(_WriterBase, _NnetChainExampleWriter):
'''Table writer for nnet chain examples.'''
pass
class MatrixWriter(_WriterBase, _BaseFloatMatrixWriter):
'''Table writer for single precision matrices.'''
def Write(self, key, value):
if isinstance(value, np.ndarray):
m = kaldi_pybind.FloatSubMatrix(value)
value = kaldi_pybind.FloatMatrix(m)
super().Write(key, value)
class VectorWriter(_WriterBase, _BaseFloatVectorWriter):
'''Table writer for single precision vectors.'''
def Write(self, key, value):
if isinstance(value, np.ndarray):
v = kaldi_pybind.FloatSubVector(value)
value = kaldi_pybind.FloatVector(v)
super().Write(key, value)
class CompressedMatrixWriter(_WriterBase, _CompressedMatrixWriter):
'''Table writer for single precision compressed matrices.'''
pass
class IntVectorWriter(_WriterBase, _Int32VectorWriter):
'''Table writer for integer sequences.'''
pass
class LatticeWriter(_WriterBase, _LatticeWriter):
'''Table writer for lattices.'''
pass
class CompactLatticeWriter(_WriterBase, _CompactLatticeWriter):
'''Table writer for compact lattices.'''
pass
if False:
# TODO(fangjun): enable the following once other wrappers are added
class SequentialDoubleVectorReader(_SequentialReaderBase,
_kaldi_table.SequentialDoubleVectorReader
):
'''Sequential table reader for double precision vectors.'''
pass
class SequentialDoubleMatrixReader(_SequentialReaderBase,
_kaldi_table.SequentialDoubleMatrixReader
):
'''Sequential table reader for double precision matrices.'''
pass
class SequentialPosteriorReader(_SequentialReaderBase,
_kaldi_table.SequentialPosteriorReader):
'''Sequential table reader for frame posteriors.'''
pass
class SequentialGaussPostReader(_SequentialReaderBase,
_kaldi_table.SequentialGaussPostReader):
'''Sequential table reader for Gaussian-level frame posteriors.'''
pass
class SequentialFstReader(_SequentialReaderBase,
_kaldi_table_ext.SequentialFstReader):
'''Sequential table reader for FSTs over the tropical semiring.'''
pass
class SequentialLogFstReader(_SequentialReaderBase,
_kaldi_table_ext.SequentialLogFstReader):
'''Sequential table reader for FSTs over the log semiring.'''
pass
class SequentialKwsIndexFstReader(
_SequentialReaderBase,
_kaldi_table_ext.SequentialKwsIndexFstReader):
'''Sequential table reader for FSTs over the KWS index semiring.'''
pass
class SequentialRnnlmExampleReader(_SequentialReaderBase,
_kaldi_table.SequentialRnnlmExampleReader
):
'''Sequential table reader for RNNLM examples.'''
pass
class SequentialIntReader(_SequentialReaderBase,
_kaldi_table.SequentialIntReader):
'''Sequential table reader for integers.'''
pass
class SequentialFloatReader(_SequentialReaderBase,
_kaldi_table.SequentialFloatReader):
'''Sequential table reader for single precision floats.'''
pass
class SequentialDoubleReader(_SequentialReaderBase,
_kaldi_table.SequentialDoubleReader):
'''Sequential table reader for double precision floats.'''
pass
class SequentialBoolReader(_SequentialReaderBase,
_kaldi_table.SequentialBoolReader):
'''Sequential table reader for Booleans.'''
pass
class SequentialIntVectorVectorReader(
_SequentialReaderBase,
_kaldi_table.SequentialIntVectorVectorReader):
'''Sequential table reader for sequences of integer sequences.'''
pass
class SequentialIntPairVectorReader(
_SequentialReaderBase, _kaldi_table.SequentialIntPairVectorReader):
'''Sequential table reader for sequences of integer pairs.'''
pass
class SequentialFloatPairVectorReader(
_SequentialReaderBase,
_kaldi_table.SequentialFloatPairVectorReader):
'''Sequential table reader for sequences of single precision float pairs.'''
pass
class RandomAccessDoubleVectorReader(
_RandomAccessReaderBase,
_kaldi_table.RandomAccessDoubleVectorReader):
'''Random access table reader for double precision vectors.'''
pass
class RandomAccessDoubleMatrixReader(
_RandomAccessReaderBase,
_kaldi_table.RandomAccessDoubleMatrixReader):
'''Random access table reader for double precision matrices.'''
pass
class RandomAccessPosteriorReader(_RandomAccessReaderBase,
_kaldi_table.RandomAccessPosteriorReader):
'''Random access table reader for frame posteriors.'''
pass
class RandomAccessGaussPostReader(_RandomAccessReaderBase,
_kaldi_table.RandomAccessGaussPostReader):
'''Random access table reader for Gaussian-level frame posteriors.'''
pass
class RandomAccessFstReader(_RandomAccessReaderBase,
_kaldi_table_ext.RandomAccessFstReader):
'''Random access table reader for FSTs over the tropical semiring.'''
pass
class RandomAccessLogFstReader(_RandomAccessReaderBase,
_kaldi_table_ext.RandomAccessLogFstReader):
'''Random access table reader for FSTs over the log semiring.'''
pass
class RandomAccessKwsIndexFstReader(
_RandomAccessReaderBase,
_kaldi_table_ext.RandomAccessKwsIndexFstReader):
'''Random access table reader for FSTs over the KWS index semiring.'''
pass
class RandomAccessIntReader(_RandomAccessReaderBase,
_kaldi_table.RandomAccessIntReader):
'''Random access table reader for integers.'''
pass
class RandomAccessFloatReader(_RandomAccessReaderBase,
_kaldi_table.RandomAccessFloatReader):
'''Random access table reader for single precision floats.'''
pass
class RandomAccessDoubleReader(_RandomAccessReaderBase,
_kaldi_table.RandomAccessDoubleReader):
'''Random access table reader for double precision floats.'''
pass
class RandomAccessBoolReader(_RandomAccessReaderBase,
_kaldi_table.RandomAccessBoolReader):
'''Random access table reader for Booleans.'''
pass
class RandomAccessIntVectorVectorReader(
_RandomAccessReaderBase,
_kaldi_table.RandomAccessIntVectorVectorReader):
'''Random access table reader for sequences of integer sequences.'''
pass
class RandomAccessIntPairVectorReader(
_RandomAccessReaderBase,
_kaldi_table.RandomAccessIntPairVectorReader):
'''Random access table reader for sequences of integer pairs.'''
pass
class RandomAccessFloatPairVectorReader(
_RandomAccessReaderBase,
_kaldi_table.RandomAccessFloatPairVectorReader):
'''
Random access table reader for sequences of single precision float pairs.
'''
pass
################################################################################
# Mapped Random Access Readers
################################################################################
class _RandomAccessReaderMappedBase(object):
'''
Base class defining the Python API for mapped random access table readers.
'''
def __init__(self, table_rspecifier='', map_rspecifier=''):
'''
This class is used for randomly accessing objects in an archive or
script file. It implements `__contains__` and `__getitem__` methods to
provide a dictionary-like interface for accessing table entries. If a
**map_rspecifier** is provided, the map is used for converting the keys
to the actual keys used to query the table, e.g. `reader[key]` returns
the `value` associated with the key `map[key]`. Otherwise, it works like
a random access table reader.
Args:
table_rspecifier(str): Kaldi rspecifier for reading the table.
If provided, the table is opened for reading.
map_rspecifier (str): Kaldi rspecifier for reading the map.
If provided, the map is opened for reading.
Raises:
IOError: If opening the table or map for reading fails.
'''
super(_RandomAccessReaderMappedBase, self).__init__()
if table_rspecifier != '' and map_rspecifier != '':
if not self.open(table_rspecifier, map_rspecifier):
raise IOError(
'Error opening mapped random access table reader '
'with table_rspecifier: {}, map_rspecifier: {}'.format(
table_rspecifier, map_rspecifier))
def __enter__(self):
return self
def __contains__(self, key):
return self.has_key(key)
def __getitem__(self, key):
if self.has_key(key):
return self.value(key)
else:
raise KeyError(key)
def open(self, table_rspecifier, map_rspecifier):
'''Opens the table for reading.
Args:
table_rspecifier(str): Kaldi rspecifier for reading the table.
If provided, the table is opened for reading.
map_rspecifier (str): Kaldi rspecifier for reading the map.
If provided, the map is opened for reading.
Returns:
True if table is opened successfully, False otherwise.
Raises:
IOError: If opening the table or map for reading fails.
'''
return super(_RandomAccessReaderMappedBase,
self).open(table_rspecifier, map_rspecifier)
def has_key(self, key):
'''Checks whether the table has the key.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Args:
key (str): The key.
Returns:
True if the table has the key, False otherwise.
'''
return super(_RandomAccessReaderMappedBase, self).has_key(key)
def value(self, key):
'''Returns the value associated with the key.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Args:
key (str): The key.
Returns:
The value associated with the key.
'''
return super(_RandomAccessReaderMappedBase, self).value(key)
def is_open(self):
'''Indicates whether the table reader is open or not.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Returns:
True if the table reader is open, False otherwise.
'''
return super(_RandomAccessReaderMappedBase, self).is_open()
def close(self):
'''Closes the table.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Returns:
True if table is closed successfully, False otherwise.
'''
return super(_RandomAccessReaderMappedBase, self).close()
class RandomAccessVectorReaderMapped(
_RandomAccessReaderMappedBase,
_kaldi_table.RandomAccessVectorReaderMapped):
'''Mapped random access table reader for single precision vectors.'''
pass
class RandomAccessDoubleVectorReaderMapped(
_RandomAccessReaderMappedBase,
_kaldi_table.RandomAccessDoubleVectorReaderMapped):
'''Mapped random access table reader for double precision vectors.'''
pass
class RandomAccessMatrixReaderMapped(
_RandomAccessReaderMappedBase,
_kaldi_table.RandomAccessMatrixReaderMapped):
'''Mapped random access table reader for single precision matrices.'''
pass
class RandomAccessDoubleMatrixReaderMapped(
_RandomAccessReaderMappedBase,
_kaldi_table.RandomAccessDoubleMatrixReaderMapped):
'''Mapped random access table reader for double precision matrices.'''
pass
class RandomAccessFloatReaderMapped(
_RandomAccessReaderMappedBase,
_kaldi_table.RandomAccessFloatReaderMapped):
'''Mapped random access table reader for single precision floats.'''
pass
class DoubleVectorWriter(_WriterBase, _kaldi_table.DoubleVectorWriter):
'''Table writer for double precision vectors.'''
def write(self, key, value):
'''Writes the `(key, value)` pair to the table.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Overrides write to accept both DoubleVector and DoubleSubVector.
Args:
key (str): The key.
value: The value.
'''
super(DoubleVectorWriter, self).write(key,
_matrix.DoubleVector(value))
class DoubleMatrixWriter(_WriterBase, _kaldi_table.DoubleMatrixWriter):
'''Table writer for double precision matrices.'''
def write(self, key, value):
'''Writes the `(key, value)` pair to the table.
This method is provided for compatibility with the C++ API only;
most users should use the Pythonic API.
Overrides write to accept both DoubleMatrix and DoubleSubMatrix.
Args:
key (str): The key.
value: The value.
'''
super(DoubleMatrixWriter, self).write(key,
_matrix.DoubleMatrix(value))
class WaveWriter(_WriterBase, _kaldi_table.WaveWriter):
'''Table writer for wave files.'''
pass
class PosteriorWriter(_WriterBase, _kaldi_table.PosteriorWriter):
'''Table writer for frame posteriors.'''
pass
class GaussPostWriter(_WriterBase, _kaldi_table.GaussPostWriter):
'''Table writer for Gaussian-level frame posteriors.'''
pass
class FstWriter(_WriterBase, _kaldi_table_ext.FstWriter):
'''Table writer for FSTs over the tropical semiring.'''
pass
class LogFstWriter(_WriterBase, _kaldi_table_ext.LogFstWriter):
'''Table writer for FSTs over the log semiring.'''
pass
class KwsIndexFstWriter(_WriterBase, _kaldi_table_ext.KwsIndexFstWriter):
'''Table writer for FSTs over the KWS index semiring.'''
pass
class NnetExampleWriter(_WriterBase, _kaldi_table.NnetExampleWriter):
'''Table writer for nnet examples.'''
pass
class RnnlmExampleWriter(_WriterBase, _kaldi_table.RnnlmExampleWriter):
'''Table writer for RNNLM examples.'''
pass
class IntWriter(_WriterBase, _kaldi_table.IntWriter):
'''Table writer for integers.'''
pass
class FloatWriter(_WriterBase, _kaldi_table.FloatWriter):
'''Table writer for single precision floats.'''
pass
class DoubleWriter(_WriterBase, _kaldi_table.DoubleWriter):
'''Table writer for double precision floats.'''
pass
class BoolWriter(_WriterBase, _kaldi_table.BoolWriter):
'''Table writer for Booleans.'''
pass
class IntVectorVectorWriter(_WriterBase,
_kaldi_table.IntVectorVectorWriter):
'''Table writer for sequences of integer sequences.'''
pass
class IntPairVectorWriter(_WriterBase, _kaldi_table.IntPairVectorWriter):
'''Table writer for sequences of integer pairs.'''
pass
class FloatPairVectorWriter(_WriterBase,
_kaldi_table.FloatPairVectorWriter):
'''Table writer for sequences of single precision float pairs.'''
pass
################################################################################
```
#### File: pybind/matrix/kaldi_vector_pybind_test.py
```python
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
import unittest
import numpy as np
import kaldi
class TestFloatSubVecotr(unittest.TestCase):
def test_numpy(self):
num_data = 10
data = np.arange(num_data).astype(np.float32)
# =============================================================
# build a FloatSubVector() from a numpy array; memory is shared
# -------------------------------------------------------------
v = kaldi.FloatSubVector(data)
self.assertEqual(v.Dim(), num_data)
for i in range(num_data):
self.assertEqual(i, v[i])
# memory is shared between numpy array and FloatSubVector
for i in range(num_data):
v[i] += 10
self.assertEqual(data[i], v[i])
# =============================================================
# Convert a FloatSubVector to a numpy array; memory is shared
# -------------------------------------------------------------
v_reference_count = sys.getrefcount(v)
d = v.numpy()
self.assertEqual(v_reference_count + 1, sys.getrefcount(v))
self.assertIsInstance(d, np.ndarray)
self.assertEqual(d.ndim, 1)
self.assertEqual(d.dtype, np.float32)
self.assertEqual(d.size, v.Dim())
for i in range(num_data):
d[i] += 10
self.assertEqual(v[i], d[i])
del d
self.assertEqual(v_reference_count, sys.getrefcount(v))
class TestFloatVecotr(unittest.TestCase):
def test_to_numpy(self):
# first, build a kaldi vector
dim = 8
v = kaldi.FloatVector(size=dim)
self.assertEqual(v.Dim(), dim)
for i in range(dim):
self.assertEqual(v[i], 0)
# now to numpy; memory is shared
d = v.numpy()
d += 10
for i in range(dim):
self.assertEqual(d[i], v[i])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "aadps/ultraviolet",
"score": 3
} |
#### File: ultraviolet/src/mailer.py
```python
import os
import smtplib
from email.message import EmailMessage
import markdown
import config
def generateLetter(letter, format):
"""
Generate designated email from file.
:param letter: The name of the email to be generated.
:param format: Generate an email with HTML format?
:returns: The generated email.
"""
filename = "/letter/" + letter + ".md"
if not os.path.isfile(filename):
print('Designated letter does not exist.')
return None
else:
file = open(filename, 'r')
if format:
return markdown.markdown(file.read())
else:
return file.read()
def sendLetter(receiver, letter, subject):
"""
Send designated email from file.
:param receiver: The recipient's email address.
:param letter: The name of the email to be sent.
:param subject: The subject of the email to be sent.
:returns: nothing.
"""
try:
msg = EmailMessage()
msg['Subject'] = subject
msg['From'] = config.SMTP_CONFIG['email']
msg['To'] = receiver
msg.set_content(generateLetter(letter, False))
msg.add_alternative(
generateLetter(letter, True), subtype='html'
)
smtpObj = smtplib.SMTP(config.SMTP_CONFIG['host'],
config.SMTP_CONFIG['port'])
smtpObj.login(config.SMTP_CONFIG['username'],
config.SMTP_CONFIG['password'])
smtpObj.send_message(msg)
print('Successfully sent email')
except smtplib.SMTPException:
print('Error: unable to send email')
``` |
{
"source": "aadrianson/xlr-ansible-tower-plugin",
"score": 2
} |
#### File: jython/ansible_tower/run_job_template.py
```python
from tower_cli import get_resource
from ansible_tower.connect_util import session
def get_resource_id(resource, name_or_id):
if name_or_id.isdigit():
return int(name_or_id)
result = resource.list(name=name_or_id)
count = int(result['count'])
if count == 0:
raise Exception("Resource name '%s''%s' not found " % (resource, name_or_id))
if count > 1:
raise Exception("Too many result for resource name '%s''%s' not found " % (resource, name_or_id))
return int(result['results'][0]['id'])
def process(task_vars):
with session(task_vars['tower_server'], task_vars['username'], task_vars['password']):
job = get_resource('job')
workflow_job = get_resource('workflow_job') # adding the necessary call to start a workflow job as per https://tower-cli.readthedocs.io/en/latest/api_ref/resources/workflow_job.html
try:
k_vars = {}
if task_vars['inventory']:
result = get_resource_id(get_resource('inventory'), task_vars['inventory'])
print("* set inventory : {0}->{1}".format(task_vars['inventory'], result))
k_vars['inventory'] = result
if task_vars['credential']:
result = get_resource_id(get_resource('credential'), task_vars['credential'])
print("* set credentials : {0}->{1}".format(task_vars['credential'], result))
k_vars['credential'] = result
if task_vars['extraVars2']:
vars_ = str(task_vars['extraVars2'])
print("* set extra_vars : {0}".format(vars_))
# TODO: manage taskPasswordToken && taskPassword (turn hidden in waiting for...)
k_vars['extra_vars'] = [vars_]
print("\n")
print("```") # started markdown code block
if task_vars['isTemplateWorkflow']: # use the synthetic.xml new form checkbox to build a condition to differentiate a standard template job and a workflow template job
# not that using monitor here instead of wait will raise an exception.
res = workflow_job.launch(workflow_job_template=task_vars['jobTemplate'],wait=task_vars['waitTillComplete'], **k_vars)
else:
res = job.launch(job_template=task_vars['jobTemplate'], monitor=task_vars['waitTillComplete'], **k_vars)
finally:
print("```")
print("\n") # end markdown code block
globals()['jobId'] = res['id']
globals()['jobStatus'] = res['status']
if task_vars['isTemplateWorkflow']: # use the synthetic.xml new form checkbox to build a condition to differentiate a standard template job and a workflow template job and provide the current job status URL
print("* [Job %s Link](%s/#/workflow_jobs/%s)" % (res['id'], task_vars['tower_server']['url'], res['id']))
else:
print("* [Job %s Link](%s/#/jobs/%s)" % (res['id'], task_vars['tower_server']['url'], res['id']))
if task_vars['stopOnFailure'] and not res['status'] == 'successful':
raise Exception("Failed with status %s" % res['status'])
if __name__ == '__main__' or __name__ == '__builtin__':
process(locals())
```
#### File: ansibletower-stub/app/app.py
```python
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask, request, jsonify, make_response
from time import strftime
import traceback
from werkzeug.exceptions import HTTPException, BadRequest, NotFound, Unauthorized
from functools import wraps
import os, io, json
app = Flask(__name__)
handler = RotatingFileHandler('ansibleTowerPlugin.log', maxBytes=1000000, backupCount=1)
logger_formatter = logging.Formatter('%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s')
handler.setFormatter(logger_formatter)
handler.setLevel(logging.DEBUG)
app.logger.addHandler(handler)
def getFile( fileName, status="200" ):
filePath = "/ansibletower-stub/responses/%s" % fileName
if not os.path.isfile(filePath):
raise NotFound({"code": "response_file_not_found", "description": "Unable to load response file"}, 500)
f = io.open(filePath, "r", encoding="utf-8")
resp = make_response( (f.read(), status) )
resp.headers['Content-Type'] = 'application/json; charset=utf-8'
return resp
def requires_auth(f):
"""
Determines if the access token is valid
"""
@wraps(f)
def decorated(*args, **kwargs):
token = get_token_auth_header()
if token != "<PASSWORD>=": # admin:admin in base64
raise Unauthorized({"code": "invalid_header", "description": "Unable to find appropriate key"}, 400)
return f(*args, **kwargs)
return decorated
@app.route('/')
def index():
return "Hello, World!"
@app.route('/api/v2/inventory_updates/<id>/', methods=['GET'])
@requires_auth
def getInventoryUpdate(id):
return getFile("inventory-sync-%s.json" % id)
@app.route('/api/v2/inventory_updates/<id>/stdout/', methods=['GET'])
@requires_auth
def getInventoryUpdateStdout(id):
# note: currently the 'content' field on these json files is base64 encoded, as that is how the CLI requests it
return getFile("inventory-sync-stdout-%s.json" % id)
@app.route('/api/v2/inventory_sources/<inventory_source_id>/', methods=['GET'])
@requires_auth
def getInventorySource(inventory_source_id):
resp = make_response( ("{ \"inventory\": \"1337\"}", 200) )
resp.headers['Content-Type'] = 'application/json; charset=utf-8'
return resp
@app.route('/api/v2/inventory_sources/<inventory_source_id>/update/', methods=['GET', 'POST'])
@requires_auth
def startSync(inventory_source_id):
if request.method == 'GET':
resp = make_response( ("{ \"can_update\": \"true\"}", 200) )
resp.headers['Content-Type'] = 'application/json; charset=utf-8'
return resp
app.logger.debug("startSync for inventory source %s" % inventory_source_id)
resp = make_response((getFile("inventory-sync-update-%s.json" % inventory_source_id), 202))
return resp
@app.route('/api/v2/job_templates/<id>/', methods=['GET'])
@requires_auth
def launchJob(id):
app.logger.debug("In Launch Job, id = %s" % (id))
return getFile("launchJob.json")
@app.route('/api/v2/job_templates/<id>/launch/', methods=['GET'])
@requires_auth
def getLaunch(id):
app.logger.debug("In getLaunch, id = %s" % (id))
return getFile("getLaunch.json")
@app.route('/api/v2/job_templates/<id>/launch/', methods=['POST'])
@requires_auth
def postLaunch(id):
app.logger.debug("In postLaunch, id = %s" % (id))
return getFile("postLaunch.json")
@app.route('/api/v2/jobs/<id>/')
@requires_auth
def getJobs(id):
app.logger.debug("In getJobs, id = %s" % (id))
return getFile("getJobs.json")
def get_token_auth_header():
"""
Obtains the access token from the Authorization Header
"""
auth = request.headers.get("Authorization", None)
if not auth:
raise AuthError({"code": "authorization_header_missing",
"description": "Authorization header is expected"}, 401)
parts = auth.split()
if parts[0] != "Basic":
raise AuthError({"code": "invalid_header",
"description":
"Authorization header must start with Basic"}, 401)
token = parts[1]
return token
# Added for debug purposes - logging all requests
@app.route("/json")
def get_json():
data = {"Name":"Some Name","Books":"[Book1, Book2, Book3]"}
return jsonify(data_WRONG) # INTENTIONAL ERROR FOR TRACEBACK EVENT
@app.after_request
def after_request(response):
timestamp = strftime('[%Y-%b-%d %H:%M]')
app.logger.error('%s %s %s %s %s %s', timestamp, request.remote_addr, request.method, request.scheme, request.full_path, response.status)
return response
@app.errorhandler(Exception)
def exceptions(e):
tb = traceback.format_exc()
timestamp = strftime('[%Y-%b-%d %H:%M]')
app.logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\n%s', timestamp, request.remote_addr, request.method, request.scheme, request.full_path, tb)
return e
if __name__ == '__main__':
app.run()
``` |
{
"source": "aadrm/breakoutwagtail",
"score": 3
} |
#### File: apps/booking/coupon_import.py
```python
import csv
from datetime import timedelta
from django.utils import timezone
from .models import Coupon
# name, code, amount, is_percent, created, expiry
def import_coupons():
with open('cpn.csv', 'r') as cpnfile:
csvreader = csv.DictReader(cpnfile, delimiter=',')
coupons = []
for row in csvreader:
c = Coupon()
c.code = row['code']
c.amount = row['amount']
if row['percent'] == "True":
c.is_percent = True
c.created = timezone.now()
c.expiry = timezone.now() + timedelta(days=365)
c.name = row['name']
c.save()
for coupon in coupons:
print(coupon)
```
#### File: apps/booking/forms.py
```python
from django import forms
from django.core.validators import RegexValidator
from django.utils.html import format_html
from django.utils.translation import gettext as _
# from phonenumber_field.formfields import PhoneNumberField
from .models import Slot, Product, Invoice, Coupon, Room, Payment, PaymentMethod
from .utils import getProductsFromProductFamily
from paypal.standard.forms import PayPalPaymentsForm
class DateInput(forms.DateInput):
input_type = 'date'
class SlotBookingForm(forms.Form):
def __init__(self, *args, **kwargs):
self.slot_id = kwargs.pop('slot_id')
self.slot = Slot.objects.get(pk=self.slot_id)
super(SlotBookingForm, self).__init__(*args, **kwargs)
self.fields['product'] = forms.ModelChoiceField(
label=_('Players'),
queryset=Product.objects.filter(family=self.slot.product_family),
)
self.fields['slot_id'] = forms.IntegerField(required=False, initial=self.slot_id)
class InvoiceForm(forms.ModelForm):
required_css_class = 'required'
def __init__(self, *args, **kwargs):
cart = kwargs.pop('cart')
payment_methods = cart.get_valid_payment_methods()
super().__init__(*args, **kwargs)
self.fields['is_terms'].required = True
self.fields['is_privacy'].required = True
self.fields['phone'] = forms.CharField(label="phone", max_length=31, required=True)
self.fields['payment'] = forms.ModelChoiceField(
payment_methods,
required=True,
widget=forms.RadioSelect,
)
class Meta:
model = Invoice
fields = '__all__'
class MyModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.player_price_str()
class AddProductToCartForm(forms.Form):
def __init__(self, *args, **kwargs):
family = kwargs.pop('family', '')
if family:
products = family.products.all()
else:
products = Product.objects.all()
super().__init__(*args, **kwargs)
self.fields['product'] = MyModelChoiceField(
products,
required=True,
# widget=forms.RadioSelect
)
class RemoveFromCartForm(forms.Form):
cart = forms.IntegerField(label='cart', required=True)
item = forms.IntegerField(label='item', required=True)
class ApplyCouponForm(forms.Form):
code = forms.CharField(label="code", max_length=32, required=True)
class CouponGeneratorForm(forms.ModelForm):
# number = forms.IntegerField(_('Number of coupons'), required=False)
class Meta:
model = Coupon
fields = [
'name',
'amount',
'is_percent',
'is_apply_to_basket',
'is_individual_use',
'is_overrule_individual_use',
'is_upgrade',
'product_families_included',
'product_families_excluded',
'product_included',
'product_excluded',
'use_limit',
'expiry',
'dow_valid',
]
def __init__(self, *args, **kwargs):
super(CouponGeneratorForm, self).__init__(*args, **kwargs)
self.fields['number'] = forms.IntegerField(required=False)
# def save(self, *args, **kwargs):
# times = self.cleaned_data['number']
# for i in range(0, times):
# super(CouponGeneratorForm, self).save(*args, **kwargs)
# notes = forms.CharField(max_length='32', required=False)
# amount = forms.DecimalField(required=False)
# is_percent = forms.BooleanField(_('Apply as percent'), required=False)
# is_apply_to_basket = forms.BooleanField(_('Apply to entire basket'), required=False)
# is_individual_use = forms.BooleanField(_('Not compatible with coupons'), required=False)
# is_overrule_individual_use = forms.BooleanField(_('Force compatibility with coupons'), required=False)
# is_upgrade = forms.BooleanField(_('Upgrades the item'), required=False)
# product_familie
class CustomPaypal(PayPalPaymentsForm):
def render(self):
return format_html(u"""<form action="{0}" method="post">
{1}
<button class="standard-button standard-button--emphasis"
type="sumbit" name="submit" />Pay Pal<button>
</form>""", self.get_endpoint(), self.as_p(), self.get_image())
class FilterAppointmentsForm(forms.Form):
start_date = forms.DateField(widget=DateInput, required=False)
end_date = forms.DateField(widget=DateInput, required=False)
room = forms.ModelChoiceField(queryset=Room.objects.filter(is_active=True), required=False)
search = forms.CharField(max_length=64, required=False)
class FilterOrdersForm(forms.Form):
start_date = forms.DateField(widget=DateInput, required=False)
end_date = forms.DateField(widget=DateInput, required=False)
payment = forms.ModelChoiceField(queryset=PaymentMethod.objects.all(), required=False)
search = forms.CharField(max_length=64, required=False)
class PaymentForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Meta:
model = Payment
fields = ("invoice", "amount")
```
#### File: wagtail/menus/models.py
```python
from django.db import models
from django_extensions.db.fields import AutoSlugField
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from wagtail.admin.edit_handlers import (
MultiFieldPanel,
InlinePanel,
FieldPanel,
PageChooserPanel,
)
from wagtail.core.models import Orderable
from wagtail.snippets.models import register_snippet
class Document(Orderable):
name = models.CharField(max_length=48)
icon = models.TextField()
document = models.ForeignKey("wagtaildocs.Document", on_delete=models.CASCADE)
docs = ParentalKey("DocumentCollection", related_name="documents")
panels = [
FieldPanel('name'),
FieldPanel('icon'),
FieldPanel('document'),
]
@register_snippet
class DocumentCollection(ClusterableModel):
"""The main menu clusterable model."""
title = models.CharField(max_length=100)
slug = AutoSlugField(populate_from="title", editable=True)
# slug = models.SlugField()
panels = [
MultiFieldPanel([
FieldPanel("title"),
FieldPanel("slug"),
], heading="Menu"),
InlinePanel("documents", label="Document")
]
def __str__(self):
return self.title
class MenuItem(Orderable):
link_title = models.CharField(
blank=True,
null=True,
max_length=50
)
link_url = models.CharField(
max_length=500,
blank=True
)
link_page = models.ForeignKey(
"wagtailcore.Page",
null=True,
blank=True,
related_name="+",
on_delete=models.CASCADE,
)
uri_fragment = models.CharField(max_length=50, null=True, blank=True)
sub_menu = models.ForeignKey(
"menus.Menu",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
fixed = models.BooleanField(default=False, blank=True)
noopener = models.BooleanField(default=False, blank=True)
new_tab = models.BooleanField(default=False, blank=True)
page = ParentalKey("Menu", related_name="menu_items")
panels = [
FieldPanel("link_title"),
FieldPanel("link_url"),
FieldPanel("uri_fragment"),
PageChooserPanel("link_page"),
FieldPanel("fixed"),
FieldPanel("new_tab"),
FieldPanel("noopener"),
SnippetChooserPanel("sub_menu"),
]
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_url:
return self.link_url
else:
return ''
@property
def title(self):
if self.link_page and not self.link_title:
return self.link_page.title
elif self.link_title:
return self.link_title
return 'Missing Title'
def save(self, *args, **kwargs):
menus = [self.sub_menu]
if self.sub_menu == self.page:
print('error')
return
super(MenuItem,self).save(*args, **kwargs)
@register_snippet
class Menu(ClusterableModel):
"""The main menu clusterable model."""
title = models.CharField(max_length=100)
slug = AutoSlugField(populate_from="title", editable=True)
# slug = models.SlugField()
panels = [
MultiFieldPanel([
FieldPanel("title"),
FieldPanel("slug"),
], heading="Menu"),
InlinePanel("menu_items", label="Menu Item")
]
def __str__(self):
return self.title
class WagtailLanguage(models.Model):
language_code = models.CharField('code', max_length=8, unique=True)
language_name = models.CharField('language', max_length=50, null=True, blank=True)
svg_flag = models.TextField(null=True, blank=True)
class Meta:
verbose_name = "wagtail_language"
verbose_name_plural = "wagtail_languages"
def __str__(self):
return self.language_code
def get_absolute_url(self):
return reverse("wagtail_language_detail", kwargs={"pk": self.pk})
```
#### File: myblog/templatetags/blog_extras.py
```python
import html
from django import template
from django.utils.html import strip_spaces_between_tags, strip_tags
from django.utils.text import Truncator
register = template.Library()
@register.filter(name='excerpt')
def excerpt_with_ptag_spacing(value, arg):
try:
limit = int(arg)
except ValueError:
return 'Invalid literal for int().'
# remove spaces between tags
value = strip_spaces_between_tags(value)
# add space before each P end tag (</p>)
value = value.replace("</p>"," </p>")
value = value.replace("</h2>"," </h2>")
value = value.replace("</h3>"," </h3>")
value = value.replace("</h4>"," </h4>")
value = value.replace("</h5>"," </h5>")
value = value.replace("</h6>"," </h6>")
# strip HTML tags
value = strip_tags(value)
# other usage: return Truncator(value).words(length, html=True, truncate=' see more')
return html.unescape(Truncator(value).words(limit))
```
#### File: wagtail/streams/models.py
```python
from django.db import models
from django.utils.translation import ugettext_lazy as _
# Create your models here.
class ReviewFamily(models.Model):
name = models.CharField(_("Name"), max_length=50)
class Meta:
verbose_name = _("Review Family")
verbose_name_plural = _("Review Families")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("ReviewFamily_detail", kwargs={"pk": self.pk})
class ReviewPlatform(models.Model):
name = models.CharField(_("Name"), max_length=50)
score = models.FloatField("Score", blank=True, null=True)
link = models.URLField(_("Link"), max_length=1024, blank=True, null=True)
icon = models.TextField(blank=True, null=True)
star_icon = models.TextField(blank=True, null=True)
class Meta:
verbose_name = _("ReviewPaltform")
verbose_name_plural = _("ReviewPaltforms")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("ReviewPaltform_detail", kwargs={"pk": self.pk})
class Review(models.Model):
family = models.ManyToManyField("streams.ReviewFamily")
name = models.CharField(_("Name"), max_length=50, null=True, blank=True)
platform = models.ForeignKey("streams.ReviewPlatform", on_delete=models.SET_NULL, null=True)
review = models.TextField(_("Review"))
class Meta:
verbose_name = _("Review")
verbose_name_plural = _("Reviews")
def __str__(self):
return self.review
def get_absolute_url(self):
return reverse("Review_detail", kwargs={"pk": self.pk})
def get_families(self):
return "\n".join([p.name for p in self.family.all()])
class Colour(models.Model):
name = models.CharField(max_length=32)
hex_code = models.CharField(max_length=6)
class Meta:
verbose_name = _("Colour")
verbose_name_plural = _("Colours")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("Colour_detail", kwargs={"pk": self.pk})
``` |
{
"source": "aadrm/imagzgame",
"score": 3
} |
#### File: aadrm/imagzgame/imagzle.py
```python
import pygame
import random
from board import Board
autoplay = input("Play yourself or let the computer try? 0 = human 1 (true) = computer ")
WIDTH = 800
HEIGHT = 800
FPS = 100000
TITLE = "Hard game from Imagzle"
#define colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (155,30,33)
GREEN = (0, 255, 0)
BLUE = (0, 120, 255)
YELLOW = (255, 255, 98)
PURPLE = (255, 0, 255)
ORANGE = (240,150,30)
ORANGE_LIGHT = (255,180,40)
# initialize pygame and create window
pygame.init()
# pygame.mixer.init()
pygame.display.set_caption(TITLE)
clock = pygame.time.Clock()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
all_sprites = pygame.sprite.Group()
# board class
tile_size = HEIGHT / 9
board = Board()
class Tile(pygame.sprite.Sprite):
px = 0
py = 0
def __init__(self, px, py):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((tile_size + 1, tile_size + 1))
self.image.fill((120, 120, 120))
self.rect = self.image.get_rect()
self.px = px
self.py = py
posx = px * tile_size + tile_size * .5
posy = py * tile_size + tile_size * .5
self.rect.center = (int(posx), int(posy))
def update(self):
char = board.layout[self.py][self.px]
# reset sprite
self.image.fill(ORANGE)
# show cursor position:
if board.cx == self.px and board.cy == self.py:
pygame.draw.circle(self.image, (ORANGE_LIGHT), (int(tile_size / 2), int(tile_size / 2)), int(tile_size / 2))
if char == 'o':
pygame.draw.circle(self.image, BLUE, (int(tile_size / 2), int(tile_size / 2)), int(tile_size / 3))
elif char == 'x':
pygame.draw.circle(self.image, RED, (int(tile_size / 2), int(tile_size / 2)), int(tile_size / 4))
elif char == '+':
pygame.draw.circle(self.image, YELLOW, (int(tile_size / 2), int(tile_size / 2)), int(tile_size / 4))
tiles = list()
# create tiles
for i in range(0, 9):
tiles.append(list())
for j in range(0, 9):
tiles[i].append(Tile(i, j))
all_sprites.add(tiles[i][j])
# game loop
running = True
board.print_board()
game_stage = 0
while running:
# Keep loop runnnig at the right speed
clock.tick(FPS)
# Process input
if autoplay:
board.auto_play()
else:
for event in pygame.event.get():
# check for closing the window
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
print ('keypress')
if event.key == pygame.K_UP:
board.move('u')
elif event.key == pygame.K_DOWN:
board.move('d')
elif event.key == pygame.K_LEFT:
board.move('l')
elif event.key == pygame.K_RIGHT:
board.move('r')
elif event.key == pygame.K_SPACE:
board.select_cursor()
elif event.key == pygame.K_r:
board.reset()
elif event.key == pygame.K_b:
board.undo_move()
board.print_board()
# Update
all_sprites.update()
# Draw
screen.fill((255, 255, 255))
all_sprites.draw(screen)
# *after* drawing everything, flip the display
pygame.display.flip()
pygame.quit()
``` |
{
"source": "aadroher/aoc-2020",
"score": 3
} |
#### File: src/day_7/solution.py
```python
from pathlib import Path
from pprint import pprint as pp
from functools import reduce
from collections import namedtuple
current_dir = Path(__file__).parent
file_handler = open(current_dir/"input.txt", 'r')
file_lines = [
line for line
in file_handler.readlines()
]
def parse_content(contained_bag_str):
number, *colour_tokens = contained_bag_str.strip().split(' ')[:3]
return {
'number': int(number),
'colour': "_".join(colour_tokens)
}
def parse_rule(rule_str):
container_statement, content_statement = (
token.strip()
for token
in rule_str.split('contain')
)
container_colour = "_".join(container_statement.split()[:2])
contents = [
parse_content(contained_bag_str)
for contained_bag_str
in content_statement.split(',')
] if "no other" not in content_statement else []
return {
'colour': container_colour,
'contents': contents
}
def get_membership_edges(rules):
return reduce(
lambda membership_set, rule: {
*membership_set,
*{
(content['colour'], rule['colour'])
for content
in rule['contents']
}
}, rules, set()
)
def get_membership_paths(path_prefixes, edge_set):
path_extensions = reduce(
lambda new_path_prefixes, path_prefix: {
*new_path_prefixes,
*{
(*path_prefix, parent)
for child, parent
in edge_set
if path_prefix[-1] == child
}
},
path_prefixes,
{}
)
if len(path_extensions) == 0:
return path_extensions
else:
return {
*path_extensions,
*get_membership_paths(path_extensions, edge_set)
}
def count_child_bags(colour, rules):
rule = next(
rule for rule in rules if rule['colour'] == colour
)
return sum(
content['number'] + (
content['number'] * count_child_bags(content['colour'], rules)
)
for content in rule['contents']
)
rules = [
parse_rule(rule_str.strip())
for rule_str
in file_lines
]
membership_edges = get_membership_edges(rules)
paths = get_membership_paths({('shiny_gold',)}, membership_edges)
end_colours = {path[-1] for path in paths}
num_end_colours = len(end_colours)
pp(f"Puzzle 1: {num_end_colours}")
child_bags = count_child_bags(colour='shiny_gold', rules=rules)
pp(f"Puzzle 2: {child_bags}")
```
#### File: src/day_9/solution.py
```python
from pathlib import Path
from pprint import pprint as pp
from itertools import product, combinations
PREFIX_LENGTH = 25
current_dir = Path(__file__).parent
file_handler = open(current_dir/"input.txt", 'r')
numbers = [
int(line.strip())
for line
in file_handler.readlines()
]
indexes = range(0, len(numbers))
def is_valid(index, number):
start = index - PREFIX_LENGTH
end = index
prefix = numbers[start:end]
return any(
x + y == number
for x, y in product(prefix, prefix)
)
first_invalid_number = next(
number
for i, number
in enumerate(numbers)
if i >= PREFIX_LENGTH and not is_valid(i, number)
)
intervals = (
numbers[start: end + 1]
for start, end
in combinations(indexes, 2)
if start + 3 < end
)
encryption_weakness_range = next(
interval
for interval in intervals
if sum(interval) == first_invalid_number
)
encryption_weakness = \
min(encryption_weakness_range) + max(encryption_weakness_range)
pp(f"Puzzle 1: {first_invalid_number}")
pp(f"Puzzle 2: {encryption_weakness}")
``` |
{
"source": "AadSah/bona-fide",
"score": 4
} |
#### File: bona-fide/backend/compareContent.py
```python
def check(content):
with open("extractedTextFromURL.txt") as f:
datafile = f.readlines()
for line in datafile:
if content in line:
# print("A Plag Found!!!...\n")
print("Comparison DONE!!!...True\n")
return True
print("Comparison DONE!!!...False\n")
return False
``` |
{
"source": "AadSah/fixmatch",
"score": 2
} |
#### File: fixmatch/cta/cta_remixmatch.py
```python
import os
from absl import app
from absl import flags
from cta.lib.train import CTAClassifySemi
from libml import utils, data
from remixmatch_no_cta import ReMixMatch
FLAGS = flags.FLAGS
class CTAReMixMatch(ReMixMatch, CTAClassifySemi):
pass
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.MANY_DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = CTAReMixMatch(
os.path.join(FLAGS.train_dir, dataset.name, CTAReMixMatch.cta_name()),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
K=FLAGS.K,
beta=FLAGS.beta,
w_kl=FLAGS.w_kl,
w_match=FLAGS.w_match,
w_rot=FLAGS.w_rot,
redux=FLAGS.redux,
use_dm=FLAGS.use_dm,
use_xe=FLAGS.use_xe,
warmup_kimg=FLAGS.warmup_kimg,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('beta', 0.75, 'Mixup beta distribution.')
flags.DEFINE_float('w_kl', 0.5, 'Weight for KL loss.')
flags.DEFINE_float('w_match', 1.5, 'Weight for distribution matching loss.')
flags.DEFINE_float('w_rot', 0.5, 'Weight for rotation loss.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
flags.DEFINE_integer('warmup_kimg', 1024, 'Unannealing duration for SSL loss.')
flags.DEFINE_enum('redux', '1st', 'swap mean 1st'.split(), 'Logit selection.')
flags.DEFINE_bool('use_dm', True, 'Whether to use distribution matching.')
flags.DEFINE_bool('use_xe', True, 'Whether to use cross-entropy or Brier.')
FLAGS.set_default('augment', 'd.d.d')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 4) #----Changed 64-->4
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
``` |
{
"source": "aadu999/gnfetcher",
"score": 3
} |
#### File: gnfetcher/gnfetcher/gnfetcher.py
```python
import json
import requests
from bs4 import BeautifulSoup
from .utils import editionMap, topicMap, langMap
from .userexception import NotFound
class gnfetcher:
def __init__(self, edition='United States (English)',
topic='top stories', location=None,
query=None, language='english'):
'''
constructor function
'''
# list of editions and topics
self.editions = list(editionMap)
self.topics = list(topicMap)
self.languages = list(langMap)
# default parameter values
self.edition = edition
self.topic = topic
self.location = location
self.query = query
self.language = language
# parameters to be passed in HTTP request
self.params = {'output': 'atom',
'ned': self.edition,
'topic': self.topic,
'geo': self.location,
'q': self.query,
'hl': self.language}
def get_config(self):
'''
function to get current configuration
'''
config = {
'edition': self.edition,
'topic': self.topic,
'language': self.language,
'location': self.location,
'query': self.query
}
return config
def reset(self):
'''
function to reset the parameters
'''
self.edition = 'United States (English)'
self.language = 'english'
self.location = None
self.query = None
self.topic = 'top stories'
def get_news(self):
'''
function to fetch news articles
'''
status = self.set_params()
# params not set properly
if status is False:
return
soup = self.load_feed()
articles = self.scrape_feed(soup)
return articles
def set_params(self):
'''
function to set params for HTTP request
'''
# setting edition
try:
self.params['ned'] = editionMap[self.edition]
except KeyError:
print(f"{self.edition} edition not found.\n"
f"Use editions attribute to get list of editions.")
return False
# setting topic
try:
self.params['topic'] = topicMap[self.topic]
except KeyError:
print(f"{self.topic} topic not found.\n"
f"Use topics attribute to get list of topics.")
return False
# setting language
try:
self.params['hl'] = langMap[self.language]
except KeyError:
print(f"{self.language} language not found.\n"
f"Use langugaes attribute to get list of languages.")
return False
# setting query
if self.query is not None:
self.params['q'] = self.query
# topic overrides query parameter. So, clearing it.
self.params['topic'] = None
# setting location
if self.location is not None:
self.params['geo'] = self.location
# topic overrides location parameter. So, overriding it.
self.params['topic'] = None
# params setting successful
return True
def load_feed(self):
'''
function to load atom feed
'''
url = "https://news.google.com/news"
resp = requests.get(url, params=self.params)
soup = BeautifulSoup(resp.content, 'html5lib')
return soup
def scrape_feed(self, soup):
'''
function to scrape atom feed
'''
entries = soup.findAll('entry')
articles = []
for entry in entries:
article = {}
article['title'] = entry.title.text
article['link'] = entry.link['href'].split('&url=')[1]
article['releasedAt'] = entry.updated.text
try:
string = entry.content.text.split('src=\"')[1].split('\"')[0]
article['img'] = "https:" + string
except Exception:
article['img'] = None
pass
articles.append(article)
try:
if len(articles) == 0:
raise NotFound
except NotFound:
print("The articles for the given response are not found.")
return
return articles
``` |
{
"source": "aadubCoveo/pokemon-challenge-scrapper",
"score": 2
} |
#### File: aadubCoveo/pokemon-challenge-scrapper/main.py
```python
import os
import requests
from bs4 import BeautifulSoup
from coveopush import CoveoConstants
from coveopush import CoveoPermissions
from coveopush import CoveoPush
from coveopush import Document
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
def find_gen(index):
if 1 <= index <= 151:
return '1'
if 152 <= index <= 251:
return '2'
if 252 <= index <= 386:
return '3'
if 387 <= index <= 493:
return '4'
if 494 <= index <= 649:
return '5'
if 650 <= index <= 721:
return '6'
if 722 <= index <= 809:
return '7'
if 810 <= index <= 898:
return '8'
def scrap():
pokemon_list_page = requests.get('https://pokemondb.net/pokedex/national')
soup_pokemon_list_page = BeautifulSoup(pokemon_list_page.content, 'html.parser')
results = soup_pokemon_list_page.find(id='main')
info_cards = results.find_all('div', class_='infocard')
coveo_source_id = os.environ.get("COVEO_SOURCE_ID")
coveo_api_key = os.environ.get("COVEO_API_KEY")
coveo_org_id = os.environ.get("COVEO_ORG_ID")
push = CoveoPush.Push(coveo_source_id, coveo_org_id, coveo_api_key)
push.Start(True, True)
push.SetSizeMaxRequest(150 * 1024 * 1024)
user_email = os.environ.get("USER_EMAIL")
my_permissions = CoveoPermissions.PermissionIdentity(CoveoConstants.Constants.PermissionIdentityType.User, "",
user_email)
for info_card in info_cards:
pokemon_name = info_card.find('a', class_='ent-name').text
pokemon_page_url = 'https://pokemondb.net' + info_card.find('a', class_='ent-name')['href']
document = Document(pokemon_page_url)
pokemon_picture_url = info_card.find('span', class_='img-fixed img-sprite')
if pokemon_picture_url is None:
pokemon_picture_url = info_card.find('span', class_='img-fixed img-sprite img-sprite-v18')['data-src']
else:
pokemon_picture_url = info_card.find('span', class_='img-fixed img-sprite')['data-src']
pokemon_number = info_card.find('small').text[1:]
pokemon_gen = find_gen(int(pokemon_number))
pokemon_types = []
pokemon_types_tags = info_card.find_all('small')[1].find_all('a')
print('scrapping pokemon: ' + pokemon_name + ' | index : ' + pokemon_number)
for pokemon_type_tag in pokemon_types_tags:
pokemon_types.append(pokemon_type_tag.text)
pokemon_page = requests.get(pokemon_page_url)
soup_pokemon_page = BeautifulSoup(pokemon_page.content, 'html.parser')
results = soup_pokemon_page.find(id='main')
tables = results.find_all('table', class_='vitals-table')
pokemon_species = tables[0].find_all('tr')[2].find('td').text
pokemon_height = tables[0].find_all('tr')[3].find('td').text
pokemon_weight = tables[0].find_all('tr')[4].find('td').text
base_stats = {}
base_stats_tags = tables[3].find_all('tr')
for base_stat_tag in base_stats_tags:
base_stats[base_stat_tag.find('th').text] = base_stat_tag.find('td').text
defense = {}
defenses_tables = results.find_all('table', class_='type-table type-table-pokedex')
for defense_table in defenses_tables:
for x in range(0, len(defense_table.find_all('tr')[0].find_all('th'))):
defense[defense_table.find_all('tr')[0].find_all('th')[x].find('a').text] = \
defense_table.find_all('tr')[1].find_all('td')[x].text
document.AddMetadata(defense_table.find_all('tr')[0].find_all('th')[x].find('a').text,
defense_table.find_all('tr')[1].find_all('td')[x].text)
document.Title = pokemon_name
document.SetData(pokemon_page.text)
document.FileExtension = ".html"
document.AddMetadata('name', pokemon_name)
document.AddMetadata('url', pokemon_page_url)
document.AddMetadata('number', pokemon_number)
document.AddMetadata('generation', pokemon_gen)
document.AddMetadata('types', pokemon_types)
document.AddMetadata('specie', pokemon_species)
document.AddMetadata('weight', pokemon_weight)
document.AddMetadata('weight_int', pokemon_weight[0:pokemon_weight.index('kg') - 1])
document.AddMetadata('height', pokemon_height)
document.AddMetadata('height_int', pokemon_height[0:pokemon_height.index('m') - 1])
document.AddMetadata('hp', base_stats.get('HP'))
document.AddMetadata('hp_int', base_stats.get('HP'))
document.AddMetadata('attack', base_stats.get('Attack'))
document.AddMetadata('attack_int', base_stats.get('Attack'))
document.AddMetadata('defense', base_stats.get('Defense'))
document.AddMetadata('defense_int', base_stats.get('Defense'))
document.AddMetadata('sp_atk', base_stats.get('Sp.Atk'))
document.AddMetadata('sp_def', base_stats.get('Sp.Def'))
document.AddMetadata('speed', base_stats.get('Speed'))
document.AddMetadata('speed_int', base_stats.get('Speed'))
document.AddMetadata('picture_url', pokemon_picture_url)
document.SetAllowedAndDeniedPermissions([my_permissions], [], True)
print('Send: ' + pokemon_name + ' | index : ' + pokemon_number + ' to the PUSH API')
push.Add(document)
print('Sent: ' + pokemon_name + ' | index : ' + pokemon_number + ' to the PUSH API')
push.End(True, True)
if __name__ == '__main__':
scrap()
``` |
{
"source": "aadu/cake",
"score": 3
} |
#### File: cake/cake/buzz.py
```python
import atexit
import time
import pigpio
from random import randint
BUZZER_PIN = 4 # pin11
class BuzzerLCD(LCD):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pi = self.bus.pi
self.pi.set_mode(BUZZER_PIN, pigpio.OUTPUT)
self.pi.write(BUZZER_PIN, pigpio.HIGH)
atexit.register(self.destroy)
def destroy(self):
self.pi.write(BUZZER_PIN, pigpio.HIGH)
self.pi.cleanup()
def _beep(self, seconds=None):
if seconds is None:
seconds = randint(5, 100) / 1000
self.pi.write(BUZZER_PIN, pigpio.LOW)
time.sleep(seconds)
self.pi.write(BUZZER_PIN, pigpio.HIGH)
time.sleep(seconds)
def write(self, value):
self._beep()
super().write(value)
``` |
{
"source": "Aadvait/EvalAI",
"score": 2
} |
#### File: apps/jobs/models.py
```python
from __future__ import unicode_literals
import datetime
import logging
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Max
from rest_framework.exceptions import PermissionDenied
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from base.models import (TimeStampedModel, )
from base.utils import RandomFileName
from challenges.models import ChallengePhase
from participants.models import ParticipantTeam
logger = logging.getLogger(__name__)
# submission.pk is not available when saving input_file
# OutCome: `input_file` was saved for submission in folder named `submission_None`
# why is the hack not done for `stdout_file` and `stderr_file`
# Because they will be saved only after a submission instance is saved(pk will be available)
@receiver(pre_save, sender='jobs.Submission')
def skip_saving_file(sender, instance, **kwargs):
if not instance.pk and not hasattr(instance, '_input_file'):
setattr(instance, '_input_file', instance.input_file)
instance.input_file = None
@receiver(post_save, sender='jobs.Submission')
def save_file(sender, instance, created, **kwargs):
if created and hasattr(instance, '_input_file'):
instance.input_file = getattr(instance, '_input_file')
instance.save()
class Submission(TimeStampedModel):
SUBMITTED = "submitted"
RUNNING = "running"
FAILED = "failed"
CANCELLED = "cancelled"
FINISHED = "finished"
SUBMITTING = "submitting"
STATUS_OPTIONS = (
(SUBMITTED, SUBMITTED),
(RUNNING, RUNNING),
(FAILED, FAILED),
(CANCELLED, CANCELLED),
(FINISHED, FINISHED),
(SUBMITTING, SUBMITTING),
)
participant_team = models.ForeignKey(
ParticipantTeam, related_name='submissions')
challenge_phase = models.ForeignKey(
ChallengePhase, related_name='submissions')
created_by = models.ForeignKey(User)
status = models.CharField(max_length=30, choices=STATUS_OPTIONS, db_index=True)
is_public = models.BooleanField(default=False)
is_flagged = models.BooleanField(default=False)
submission_number = models.PositiveIntegerField(default=0)
download_count = models.IntegerField(default=0)
output = models.TextField(blank=True, null=True)
submitted_at = models.DateTimeField(auto_now_add=True, db_index=True)
started_at = models.DateTimeField(null=True, blank=True, db_index=True)
completed_at = models.DateTimeField(null=True, blank=True, db_index=True)
when_made_public = models.DateTimeField(null=True, blank=True)
input_file = models.FileField(upload_to=RandomFileName("submission_files/submission_{id}"))
stdout_file = models.FileField(upload_to=RandomFileName("submission_files/submission_{id}"), null=True, blank=True)
stderr_file = models.FileField(upload_to=RandomFileName("submission_files/submission_{id}"), null=True, blank=True)
submission_result_file = models.FileField(
upload_to=RandomFileName("submission_files/submission_{id}"), null=True, blank=True)
submission_metadata_file = models.FileField(
upload_to=RandomFileName("submission_files/submission_{id}"), null=True, blank=True)
execution_time_limit = models.PositiveIntegerField(default=300)
method_name = models.CharField(max_length=1000, null=True, db_index=True)
method_description = models.TextField(blank=True, null=True)
publication_url = models.CharField(max_length=1000, null=True)
project_url = models.CharField(max_length=1000, null=True)
def __unicode__(self):
return '{}'.format(self.id)
class Meta:
app_label = 'jobs'
db_table = 'submission'
@property
def execution_time(self):
"""Returns the execution time of a submission"""
# if self.self.completed_at and self.started_at:
try:
return (self.completed_at - self.started_at).total_seconds()
except:
return "None"
# else:
# return None
def save(self, *args, **kwargs):
if not self.pk:
sub_num = Submission.objects.filter(
challenge_phase=self.challenge_phase,
participant_team=self.participant_team).aggregate(
Max('submission_number'))['submission_number__max']
if sub_num:
self.submission_number = sub_num + 1
else:
self.submission_number = 1
failed_count = Submission.objects.filter(
challenge_phase=self.challenge_phase,
participant_team=self.participant_team,
status=Submission.FAILED).count()
successful_count = self.submission_number - failed_count
if successful_count > self.challenge_phase.max_submissions:
logger.info("Checking to see if the successful_count {0} is greater than maximum allowed {1}".format(
successful_count, self.challenge_phase.max_submissions))
logger.info("The submission request is submitted by user {0} from participant_team {1} ".format(
self.created_by.pk, self.participant_team.pk))
raise PermissionDenied({'error': 'The maximum number of submissions has been reached'})
else:
logger.info("Submission is below for user {0} form participant_team {1} for challenge_phase {2}".format(
self.created_by.pk, self.participant_team.pk, self.challenge_phase.pk))
if hasattr(self.challenge_phase, 'max_submissions_per_day'):
submissions_done_today_count = Submission.objects.filter(
challenge_phase__challenge=self.challenge_phase.challenge,
participant_team=self.participant_team,
challenge_phase=self.challenge_phase,
submitted_at__gte=datetime.date.today()).count()
failed_count = Submission.objects.filter(
challenge_phase=self.challenge_phase,
participant_team=self.participant_team,
status=Submission.FAILED,
submitted_at__gte=datetime.date.today()).count()
if ((submissions_done_today_count + 1 - failed_count > self.challenge_phase.max_submissions_per_day) or
(self.challenge_phase.max_submissions_per_day == 0)):
logger.info("Permission Denied: The maximum number of submission for today has been reached")
raise PermissionDenied({'error': 'The maximum number of submission for today has been reached'})
self.is_public = (True if self.challenge_phase.is_submission_public else False)
self.status = Submission.SUBMITTED
submission_instance = super(Submission, self).save(*args, **kwargs)
return submission_instance
``` |
{
"source": "aadyajha12/Covid19-SmartAlarm",
"score": 2
} |
#### File: Covid19-SmartAlarm/CA3/news_test.py
```python
import json
from newsapi import covid_news
def news_test_one():
news_json = json.load(open('gb-news.json'))
news:str = covid_news(news_json)
assert news[0]['title'] != None
```
#### File: Covid19-SmartAlarm/CA3/weatherapi.py
```python
import requests
import json
import config
from flask import Markup
with open('config.json', 'r') as f:
json_file = json.load(f)
weather_api_key= json_file["weather_api_key"]
city = json_file['city_name']
def extract_weather(weather_api_key,city_name):
base_url = "http://api.openweathermap.org/data/2.5/weather?"
complete_url = base_url + "appid=" + weather_api_key + "&q=" + city_name
# print response object
x = requests.get(complete_url).json()
if x["cod"] != "404":
y = x['main']
current_temperature = y["temp"]
current_humidity = y["humidity"]
z = x["weather"]
weather_description = z[0]["description"]
# print following values
print(" Temperature (in Celcius ) = " +
str(round(current_temperature - 273.15)) +
"\n Humidity (in percentage) = " + str(current_humidity) +
"\n Description = " + str(weather_description))
if __name__ == '__main__':
(extract_weather(weather_api_key,city))
``` |
{
"source": "aaearon/pysmoothstreams",
"score": 3
} |
#### File: pysmoothstreams/pysmoothstreams/auth.py
```python
import json
import logging
import urllib.request
import http.cookiejar
from datetime import datetime, timedelta
from pysmoothstreams import Service
from pysmoothstreams.exceptions import InvalidService
from json import JSONDecodeError
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class AuthSign:
def __init__(self, service=Service.LIVE247, auth=(None, None)):
self.service = self.__set_service(service)
self.username = auth[0]
self.password = auth[1]
self.expiration_date = None
self.hash = None
self.url = "https://auth.smoothstreams.tv/hash_api.php"
logger.debug(
"Created {name} with username {username} and service {service}".format(
name=self.__class__.__name__,
username=self.username,
service=self.service,
)
)
def __set_service(self, service):
if not isinstance(service, Service):
raise InvalidService(f"{service} is not a valid service!")
return service
def _get_hash_via_hash_api(self):
logger.debug("Getting hash using hash API")
data = {
"username": self.username,
"site": self.service.value,
"password": <PASSWORD>
}
data = urllib.parse.urlencode(data)
hash_url = "{url}?{data}".format(url=self.url, data=data)
headers = {"User-Agent": "pysmoothstreams/0.18"}
logger.debug("Fetching hash at {hash_url}".format(hash_url=hash_url))
with urllib.request.urlopen(urllib.request.Request(url=hash_url,headers=headers)) as response:
try:
as_json = json.loads(response.read())
if "hash" in as_json:
self.hash = as_json["hash"]
self.set_expiration_date(as_json["valid"])
except Exception as e:
logger.critical(e)
def fetch_hash(self):
now = datetime.now()
if self.hash is None or now > self.expiration_date:
logger.warning(
"Hash is either none or may be expired. Getting a new one..."
)
if self.username is not None and self.password is not None:
logger.debug("Username and password are not none.")
try:
self._get_hash_via_hash_api()
except urllib.error.HTTPError as error:
logger.info("Could not retrieve using normal API: Code {error_code} trying via moden player.".format(error_code=error.code))
self._get_hash_via_player()
except Exception as e:
logger.critical(
"Could not fetch hash via hash API or modern player. Is SmoothStreams working?"
)
logger.critical(e)
else:
raise ValueError("Username or password is not set.")
logger.debug("Got a hash!")
return self.hash
def set_expiration_date(self, minutes):
now = datetime.now()
self.expiration_date = now + timedelta(minutes=minutes - 1)
logger.debug(
"Expiration date set to {expiration_date}".format(
expiration_date=self.expiration_date
)
)
def _get_hash_via_player(self):
logger.debug("Getting hash via modern player")
# Set the API URL used by each site's modern player. I can only guarantee that
# the one for Live247 works.
if self.service == Service.LIVE247:
api_url = (
"https://live247.tv/players/modern/api.php?action=regenerate&p=ipv4&server_id=3"
)
if self.service == Service.STARSTREAMS:
api_url = (
"https://starstreams.tv/players/modern/api.php?action=regenerate&p=ipv4&server_id=3"
)
if self.service == Service.STREAMTVNOW:
api_url = (
"https://streamtvnow.tv/players/modern/api.php?action=regenerate&p=ipv4&server_id=3"
)
# Set up the needed parameters to login to the above sites with. The sites are actually
# WordPress so we need to login, grab the authorization cookie, then hit the URL again
# to actually get the response we want.
data = {
"username": self.username,
"password": <PASSWORD>,
"protect_login": "1",
}
data = urllib.parse.urlencode(data)
data = data.encode("ascii")
request = urllib.request.Request(url=api_url, data=data)
# We need to set up an opener to reuse between requests as well as define a CookieJar
# as by default urllib is stateless.
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
try:
response = opener.open(request)
if response.code == 200:
response = opener.open(api_url)
try:
as_json = json.loads(response.read())
self.hash = as_json["hash"]
self.set_expiration_date(as_json["expire"])
except JSONDecodeError as e:
logger.critical(
"Could not load response as json! Possibly triggered CAPTCHA?"
)
raise e
except Exception as e:
logger.critical(e)
except Exception as e:
logger.critical(e)
raise
```
#### File: pysmoothstreams/pysmoothstreams/playlist.py
```python
from pysmoothstreams import Server, Quality, Protocol, Service, Feed
class Playlist:
def __init__(self, auth_sign, guide):
self.auth_sign = auth_sign
self.guide = guide
self.channels = guide.channels
def generate_m3u_playlist(
self, server, auth_sign, quality=Quality.HD, protocol=Protocol.HLS
):
playlist = "#EXTM3U\n"
for channel in self.channels:
clean_channel_name = channel["name"].strip()
playlist += '#EXTINF: tvg-id="{channel_id}" tvg-name="{clean_channel_name}" tvg-logo="{channel_icon}" tvg-chno="{channel_number}", {clean_channel_name}\n'.format(
channel_id=channel["id"],
clean_channel_name=clean_channel_name,
channel_icon=channel["icon"],
channel_number=channel["number"],
)
playlist += "{url}\n".format(
url=self.guide.build_stream_url(
server, channel["number"], auth_sign, quality, protocol
)
)
return playlist
```
#### File: pysmoothstreams/tests/test_guide.py
```python
from datetime import datetime, timedelta
from unittest import TestCase
from unittest.mock import patch, MagicMock
from pysmoothstreams import Server, Quality, Protocol, Service, Feed
from pysmoothstreams.auth import AuthSign
from pysmoothstreams.exceptions import (
InvalidServer,
InvalidQuality,
InvalidProtocol,
InvalidContentType,
)
from pysmoothstreams.guide import Guide
class TestGuide(TestCase):
@patch("urllib.request.urlopen")
def setUp(self, mock_urlopen):
with open("./tests/test_altepg1.xml", "r") as f:
json_feed = f.read()
cm = MagicMock()
cm.getcode.return_value = 200
cm.read.return_value = json_feed.encode()
cm.info.return_value = {
"Expires": "Sat, 25 Aug 2018 22:39:41 GMT",
"Content-Type": "text/xml",
}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.g = Guide(Feed.SMOOTHSTREAMS)
def test_build_stream_url_live247_rtmp(self):
a = AuthSign(service=Service.LIVE247, auth=("fake", "fake"))
# set hash and expiration manually
a.expiration_date = datetime.now() + timedelta(minutes=240)
a.hash = "abc1234"
generated = self.g.build_stream_url(
Server.NA_EAST_NY, 44, a, Quality.HD, Protocol.RTMP
)
self.assertEqual(
"rtmp://dnae2.smoothstreams.tv:3625/view247/ch44q1.stream/playlist.m3u8?wmsAuthSign=abc1234",
generated,
)
def test_build_stream_url_streamtvnow_hls(self):
a = AuthSign(service=Service.STREAMTVNOW, auth=("fake", "fake"))
# set hash and expiration manually
a.expiration_date = datetime.now() + timedelta(minutes=240)
a.hash = "abc1234"
generated = self.g.build_stream_url(
Server.ASIA_MIX, 10, a, Quality.LQ, Protocol.HLS
)
self.assertEqual(
"https://dAP.smoothstreams.tv:443/viewstvn/ch10q3.stream/playlist.m3u8?wmsAuthSign=abc1234",
generated,
)
def test_build_stream_url_streamtvnow_mpeg(self):
a = AuthSign(service=Service.STREAMTVNOW, auth=("fake", "fake"))
# set hash and expiration manually
a.expiration_date = datetime.now() + timedelta(minutes=240)
a.hash = "abc1234"
generated = self.g.build_stream_url(
Server.EU_MIX, 3, a, Quality.LQ, Protocol.MPEG
)
self.assertEqual(
"https://deu.smoothstreams.tv:443/viewstvn/ch03q3.stream/mpeg.2ts?wmsAuthSign=abc1234",
generated,
)
def test_build_stream_url_rtsp(self):
a = AuthSign(service=Service.STREAMTVNOW, auth=("fake", "fake"))
a.expiration_date = datetime.now() + timedelta(minutes=240)
a.hash = "abc1234"
generated = self.g.build_stream_url(
Server.EU_MIX, 3, a, Quality.LQ, Protocol.RTSP
)
self.assertEqual(
"rtsp://deu.smoothstreams.tv:2935/viewstvn/ch03q3.stream/playlist.m3u8?wmsAuthSign=abc1234",
generated,
)
def test_build_stream_url_live247_dash(self):
a = AuthSign(service=Service.LIVE247, auth=("fake", "fake"))
# set hash and expiration manually
a.expiration_date = datetime.now() + timedelta(minutes=240)
a.hash = "abc1234"
generated = self.g.build_stream_url(
Server.NA_EAST_NY, 44, a, Quality.HD, Protocol.DASH
)
self.assertEqual(
"https://dnae2.smoothstreams.tv:443/view247/ch44.smil/manifest.mpd?wmsAuthSign=abc1234",
generated,
)
def test_build_stream_url_live247_hls_adaptive(self):
a = AuthSign(service=Service.LIVE247, auth=("fake", "fake"))
# set hash and expiration manually
a.expiration_date = datetime.now() + timedelta(minutes=240)
a.hash = "abc1234"
generated = self.g.build_stream_url(
Server.NA_EAST_NY, 44, a, Quality.HD, Protocol.HLSA
)
self.assertEqual(
"https://dnae2.smoothstreams.tv:443/view247/ch44.smil/playlist.m3u8?wmsAuthSign=abc1234",
generated,
)
def test_generate_streams(self):
a = AuthSign(service=Service.STREAMTVNOW, auth=("fake", "fake"))
with self.assertRaises(InvalidServer) as context:
self.g.generate_streams("FakeServer", Quality.HD, a, protocol=Protocol.HLS)
self.assertTrue("FakeServer is not a valid server!" in str(context.exception))
with self.assertRaises(InvalidQuality) as context:
self.g.generate_streams(Server.EU_MIX, 29, a, protocol=Protocol.HLS)
self.assertTrue("29 is not a valid quality!" in str(context.exception))
with self.assertRaises(InvalidProtocol) as context:
self.g.generate_streams(Server.EU_MIX, Quality.LQ, a, protocol="abc")
self.assertTrue("abc is not a valid protocol!" in str(context.exception))
@patch("urllib.request.urlopen")
def test__fetch_channels(self, mock_urlopen):
self.assertEqual(150, len(self.g.channels))
self.assertEqual("ESPNNews", self.g.channels[0]["name"])
self.assertEqual(1, self.g.channels[0]["number"])
self.assertTrue(
self.g.channels[149]["icon"].endswith(
"smoothstreams.tv/assets/images/channels/150.png"
)
)
@patch("urllib.request.urlopen")
def test__detect_xml_feed_type(self, mock_urlopen):
cm = MagicMock()
cm.getcode.return_value = 200
cm.info.return_value = {
"Expires": "Tue, 07 Jan 2020 00:53:17 GMT",
"Content-Type": "text/xml",
}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.assertEqual("text/xml", self.g._get_content_type())
@patch("urllib.request.urlopen")
def test__detect_unknown_feed_type(self, mock_urlopen):
cm = MagicMock()
cm.getcode.return_value = 404
cm.info.return_value = {
"Expires": "Sat, 25 Aug 2018 22:39:41 GMT",
"Content-Type": "application/html",
}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
with self.assertRaises(InvalidContentType):
Guide()
@patch("urllib.request.urlopen")
def test__gzipped_feed(self, mock_urlopen):
with open("tests/test_xmltv1.xml.gz", "rb") as f:
feed = f.read()
cm = MagicMock()
cm.read.return_value = feed # No decode, already bytes
cm.info.return_value = {
"Expires": "Tue, 07 Jan 2020 00:53:17 GMT",
"Content-Type": "application/octet-stream",
}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.assertEqual(150, len(self.g.channels))
@patch("urllib.request.urlopen")
def test__add_xml_declaration(self, mock_urlopen):
with open("./tests/test_altepg1.xml", "r") as f:
json_feed = f.read()
cm = MagicMock()
cm.getcode.return_value = 200
cm.read.return_value = json_feed.encode()
cm.info.return_value = {
"Expires": "Sat, 25 Aug 2018 22:39:41 GMT",
"Content-Type": "text/xml",
}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
g = Guide(Feed.ALTEPG)
self.assertTrue(g.epg_data.startswith(b"<?xml version"))
``` |
{
"source": "a-a-egorovich/training_extensions",
"score": 3
} |
#### File: chest_xray_screening/utils/get_config.py
```python
import os
import json
def get_config(action, optimised = False):
""" action: train, test, export or gdrive
optimised: False --> DenseNet121
True --> DenseNet121Eff
"""
root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
config_path = os.path.join(root_path, 'configs')
if action == 'download':
with open(os.path.join(config_path, 'download_configs.json')) as f1:
config = json.load(f1)
else:
if optimised:
with open(os.path.join(config_path, 'densenet121eff_config.json')) as f1:
config_file = json.load(f1)
config = config_file[action]
else:
with open(os.path.join(config_path, 'densenet121_config.json')) as f1:
config_file = json.load(f1)
config = config_file[action]
return config
``` |
{
"source": "aaelfiky/gameball-python",
"score": 3
} |
#### File: gameball-python/gameball/api_requestor.py
```python
from __future__ import absolute_import, division, print_function
import gameball.constants, gameball.http_client
from gameball.exceptions.gameball_exception import GameballException, AuthenticationError, APIError
from gameball.gameball_response import gameballResponse
import gameball
class APIRequestor(object):
def __init__(
self,
key=None,
client=None,
api_base=None,
):
self.api_base = api_base or gameball.constants.api_base
self.api_key = key or gameball.api_key
if client:
self._client = client
elif gameball.default_http_client:
self._client = gameball.default_http_client
else:
# If the gameball.default_http_client has not been set by the user
# yet, we'll set it here. This way, we aren't creating a new
# HttpClient for every request.
gameball.default_http_client = gameball.http_client.new_default_http_client()
self._client = gameball.default_http_client
def request(self, method, url, params=None, headers=None):
rcode, rbody, rheaders = self.request_raw(
method.lower(), url, params, headers
)
resp = self.interpret_response(rbody, rcode, rheaders)
return resp
def request_raw(self, method, url, params=None, supplied_headers=None):
"""
Mechanism for issuing an API call
"""
if self.api_key:
my_api_key = self.api_key
else:
from gameball import api_key
my_api_key = api_key
if my_api_key is None:
raise AuthenticationError(
"No API key provided. (HINT: set your API key using "
'"gameball.api_key = <API-KEY>"). You can generate API keys '
"from the Gameball web interface. See "
"https://help.gameball.co/en/articles/3467114-get-your-account-integration-details-api-key-and-transaction-key "
"for details, or email <EMAIL> if you have any "
"questions."
)
abs_url = "%s%s" % (self.api_base, url)
if method == "get":
post_data = None
elif method == "post":
post_data = params
else:
raise GameballException(
"Unrecognized HTTP method %r. This may indicate a bug in the "
"Gameball bindings. Please contact <EMAIL> for "
"assistance." % (method)
)
if supplied_headers is None:
supplied_headers={}
supplied_headers['APIKey']=my_api_key
rcode, rbody, rheaders = self._client.request(
method, abs_url, supplied_headers, post_data
)
return rcode, rbody, rheaders
# Mechanism of returning an object in case of 200 or exception otherwise
def interpret_response(self, rbody, rcode, rheaders):
try:
resp_object = gameballResponse(rbody, rcode, rheaders)
resp = resp_object.get_data()
except Exception:
# Done for boolean returns in general and for vaildate coupon specially
if rcode == 200:
return True
else:
raise GameballException(
"Invalid response body from API: %s "
"(HTTP response code was %d)" % (rbody, rcode),
rbody,
rcode,
rheaders,
)
if rcode != 200:
resp = self.handle_error_response(rbody, rcode, resp_object.data, rheaders)
return resp
# Mechanism of sending GameballException in case of non-200 responses
def handle_error_response(self, rbody, rcode, resp, rheaders):
try:
error_message = resp["message"]
error_code = resp["code"]
message = "%s (GameballException with error code %s)" % (error_message, error_code)
except (KeyError, TypeError):
raise APIError(
"Invalid response object from API: %r (HTTP response code "
"was %d)" % (rbody, rcode),
rbody,
rcode,
resp,
)
return GameballException(message, rbody, rcode, resp, rheaders)
```
#### File: gameball/exceptions/gameball_exception.py
```python
from __future__ import absolute_import, division, print_function
class GameballException(Exception):
def __init__(
self,
message=None,
http_body=None,
json_body=None,
headers=None,
code=None,
):
super(GameballException, self).__init__(message)
self._message = message
self.http_body = http_body
self.json_body = json_body
self.headers = headers or {}
self.code = code
def __str__(self):
msg = self._message or "<empty message>"
return msg
# Returns the underlying `Exception` (base class) message, which is usually
# the raw message returned by Gameball's API. This was previously available
# in python2 via `error.message`. Unlike `str(error)`, it omits "Request
# req_..." from the beginning of the string.
@property
def user_message(self):
return self._message
def __repr__(self):
return "%s(message=%r)" % (
self.__class__.__name__,
self._message,
)
class AuthenticationError(GameballException):
pass
class APIError(GameballException):
pass
```
#### File: gameball/models/referral_object.py
```python
class referralObject(object):
def __init__(
self,
player_code,
player_unique_id,
player_attributes = {}
):
self.player_unique_id = player_unique_id
self.player_code = player_code
self.player_attributes = player_attributes
def add_player_attribute(self, attribute_name, value):
self.player_attributes[attribute_name] = value
def add_custom_player_attribute(self, attribute_name, value):
custom_chk = self.player_attributes.get('custom', None)
if custom_chk is None:
self.player_attributes['custom'] = {}
self.player_attributes['custom'][attribute_name] = value
``` |
{
"source": "aaelsay2/cvat",
"score": 2
} |
#### File: apps/engine/models.py
```python
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.contrib.auth.models import User
import shlex
import csv
from io import StringIO
import re
import os
class Task(models.Model):
name = models.CharField(max_length=256)
size = models.PositiveIntegerField()
path = models.CharField(max_length=256)
mode = models.CharField(max_length=32,default = "interpolation")
owner = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
bug_tracker = models.CharField(max_length=2000, default="")
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=32, default="interpolate")
overlap = models.PositiveIntegerField(default=0)
# Extend default permission model
class Meta:
permissions = (
("view_task", "Can see available tasks"),
("view_annotation", "Can see annotation for the task"),
("change_annotation", "Can modify annotation for the task"),
)
def get_upload_dirname(self):
return os.path.join(self.path, ".upload")
def get_data_dirname(self):
return os.path.join(self.path, "data")
def get_dump_path(self):
return os.path.join(self.path, "{}.dump".format(self.name))
def get_log_path(self):
return os.path.join(self.path, "task.log")
def get_client_log_path(self):
return os.path.join(self.path, "client.log")
def set_task_dirname(self, path):
self.path = path
self.save(update_fields=['path'])
def get_task_dirname(self):
return self.path
def get_user_segments(self, user):
return [s for s in self.segment_set.all() if s.check_user_access(user)]
def __str__(self):
return self.name
class Segment(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE)
start_frame = models.IntegerField()
stop_frame = models.IntegerField()
def get_task_name(self):
return self.task.name
get_task_name.short_description = 'Task'
def get_annotator(self):
return self.job_set.first().annotator
def check_user_access(self, user):
if user.is_superuser:
return True
segment_user = self.get_annotator()
if not segment_user:
return False
return segment_user.get_username() == user.get_username()
class Job(models.Model):
segment = models.ForeignKey(Segment, on_delete=models.CASCADE)
annotator = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
updated_date = models.DateTimeField(auto_now_add=True)
#url_state = models.BooleanField(default=True)
def get_task_name(self):
return self.segment.get_task_name()
get_task_name.short_description = 'Task'
get_task_name.admin_order_field = 'segment__task__name'
def get_id(self):
return str(self.id)
get_id.short_description = 'Job ID'
get_id.admin_order_field = 'id'
def get_deltatime(self):
#if timezone.now() - self.updated_date < timezone.timedelta(minutes=2):
# return 'False'
#else:
return 'True'
# TODO: add sub-issue number for the task
def __str__(self):
return self.get_task_name() + ':' + self.get_id()
class Label(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE)
name = models.CharField(max_length=64)
def __str__(self):
return self.name
class AttributeSpec(models.Model):
label = models.ForeignKey(Label, on_delete=models.CASCADE)
text = models.CharField(max_length=1024)
def get_attribute(self):
match = re.match(r'^([~@])(\w+)=(\w+):(.+)$', self.text)
prefix = match.group(1)
type = match.group(2)
name = match.group(3)
values = list(csv.reader(StringIO(match.group(4)), quotechar="'"))[0]
return {'prefix':prefix, 'type':type, 'name':name, 'values':values}
def is_mutable(self):
attr = self.get_attribute()
return attr['prefix'] == '~'
def get_type(self):
attr = self.get_attribute()
return attr['type']
def get_name(self):
attr = self.get_attribute()
return attr['name']
def get_default_value(self):
attr = self.get_attribute()
return attr['values'][0]
def get_values(self):
attr = self.get_attribute()
return attr['values']
def __str__(self):
return self.get_attribute()['name']
class AttributeVal(models.Model):
# TODO: add a validator here to be sure that it corresponds to self.label
spec = models.ForeignKey(AttributeSpec, on_delete=models.CASCADE)
value = models.CharField(max_length=64)
class Meta:
abstract = True
class Annotation(models.Model):
job = models.ForeignKey(Job, on_delete=models.CASCADE)
label = models.ForeignKey(Label, on_delete=models.CASCADE)
frame = models.PositiveIntegerField()
class Meta:
abstract = True
class BoundingBox(models.Model):
xtl = models.FloatField()
ytl = models.FloatField()
xbr = models.FloatField()
ybr = models.FloatField()
# TODO: need to think where to define below properties
occluded = models.BooleanField(default=False)
class Meta:
abstract = True
class LabeledBox(Annotation, BoundingBox):
pass
class LabeledBoxAttributeVal(AttributeVal):
box = models.ForeignKey(LabeledBox, on_delete=models.CASCADE)
class ObjectPath(Annotation):
pass
class ObjectPathAttributeVal(AttributeVal):
track = models.ForeignKey(ObjectPath, on_delete=models.CASCADE)
class TrackedObject(models.Model):
track = models.ForeignKey(ObjectPath, on_delete=models.CASCADE)
frame = models.PositiveIntegerField()
outside = models.BooleanField(default=False)
class Meta:
abstract = True
class TrackedBox(TrackedObject, BoundingBox):
pass
class TrackedBoxAttributeVal(AttributeVal):
box = models.ForeignKey(TrackedBox, on_delete=models.CASCADE)
class Skeleton(models.Model):
# TODO: define exactly what "outside" attribute would be
pass
class Keypoint(models.Model):
VISIBILITY= (
(0, 'NOT IN IMAGE'),
(1, 'INVISIBLE'),
(2, 'VISIBLE')
)
name = models.CharField(max_length = 256)
skeleton = models.ForeignKey(Skeleton, on_delete=models.CASCADE)
x = models.FloatField()
y = models.FloatField()
visibility = models.PositiveIntegerField(default=2, choices=VISIBILITY)
class LabeledSkeleton(Annotation, Skeleton):
pass
class LabeledSkeletonAttributeVal(AttributeVal):
skeleton = models.ForeignKey(LabeledSkeleton, on_delete=models.CASCADE)
class TrackedSkeleton(TrackedObject,Skeleton):
# TODO: Is this most appropriate place to define "activity" field?
# It is a property specific to a tracked skeleton
# but could also be defined as an attribute
activity = models.CharField(max_length=64)
pass
class TrackedSkeletonAttributeVal(AttributeVal):
skeleton = models.ForeignKey(TrackedSkeleton, on_delete=models.CASCADE)
``` |
{
"source": "aaerrolla/replace-tokens",
"score": 3
} |
#### File: aaerrolla/replace-tokens/replaceTokens.py
```python
import sys
import re
import in_place
def to_dict(propertyFile):
"""
returns dict after converting properties file in the form of
prop=value
"""
token_dict = {}
#TODO check if file exists
with open(propertyFile) as file:
return { line.split("=")[0].strip():line.split("=")[1].strip() for line in file }
def replace_tokens(filesWithTokens, propertiesFile ):
token_dict = to_dict(propertiesFile)
#TODO open file , for each line replace tokens
#TODO there are multiple issues with this implementation fix
with in_place.InPlace(filesWithTokens) as fp:
for line in fp:
# replace @token@ with {token}
re_line = re.sub(r"@(\w.+?)@", r"{\1}", line)
print(re_line)
replaced_line = ""
try:
replaced_line = re_line.format(**token_dict)
except KeyError:
print(KeyError.with_traceback())
fp.write(replaced_line)
if __name__ == "__main__":
#TODO add condition to check if required number of cmd arguments are provided
#print(to_dict(sys.argv[1]))
replace_tokens(sys.argv[1], sys.argv[2])
``` |
{
"source": "aafanasyev/FirefoxNightlyAntiTracking",
"score": 3
} |
#### File: aafanasyev/FirefoxNightlyAntiTracking/main.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, FirefoxNightlyAntiTracking"
__license__ = "MIT"
__version__ = "0.0.2"
__email__ = "<EMAIL>"
__status__ = "Prototype"
#
# https://wiki.mozilla.org/Security/Tracking_protection
# http://kb.mozillazine.org/Category:Preferences
#
import os
import sys
import csv
from time import sleep
from selenium import __version__
from selenium.webdriver import Firefox
from selenium.webdriver import FirefoxProfile
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
print("This script capture and counts cookies. Different versions of Firefox")
print("with different anti-tracking protection techniques enabled are used.")
print("Limitations are applied. Disk, memory, offline cache and history of")
print("browsers are disabled. During browse session all cookies are accepted.")
print("Content of cookies is saved. However, on close browser is sanitized but")
print("Use usecases:")
print("1 Firefox ESR Version 60.2.2")
print("2 Firefox Release 62.0.3")
print("3 Firefox Nightly Version 64.0a1")
print("4 Firefox ESR Version 60.2.2 with Tracking Protection(TP)")
print("5 Firefox Release 62.0.3 with Tracking Protection(TP)")
print("6 Firefox Nightly Version 64.0a1 with Tracking Protection(TP)")
print("7 Firefox Nightly Version 64.0a1 with Tracking Protectionand(TP) and Content Blocking(CB)")
print("Experiment is about all usecases above with waiting period\n for 1 minute between each usecase ")
# number of sessions per each sites
experiments = 10
path_to_bin = os.path.dirname(os.path.realpath(__file__))
# Three browsers:
# (0)Firefox ESR Version 60.2.2
# (1)Firefox Release 62.0.3
# (2)Firefox Nightly Version 64.0a1
browsers = ["firefox-esr/firefox", "firefox-release/firefox", "firefox-nightly/firefox"]
#browsers = ["firefox-nightly/firefox"]
usecases = ["no TP","TP","TP and CB"]
sites = ["https://www.nu.nl/", "https://www.telegraaf.nl/", "https://www.ad.nl/",
"https://tweakers.net/", "https://www.businessinsider.nl",
"https://www.bloomberg.com/europe", "https://www.ft.com/",
"http://mareonline.nl/", "http://global.oup.com/?cc=nl",
"https://www.bepress.com/", "https://www.plusonline.nl/",
"https://www.buzzfeed.com/", "https://forbes.com/",
"https://www.vice.com/nl",
"https://www.theguardian.com/", "https://www.hln.be",
"https://www.dailymail.co.uk/", "https://www.nytimes.com/",
"https://as.com/", "http://www.espn.com/",
"https://nl.hardware.info/",
"https://computertotaal.nl", "https://www.cnet.com/",
"https://www.buienradar.nl/", "https://www.weeronline.nl/",
"https://www.accuweather.com/en/nl/netherlands-weather",
"https://knmi.nl/home", "https://www.weerplaza.nl/",
"https://nos.nl/", "https://www.nrc.nl/", "https://www.volkskrant.nl/",
"https://www.trouw.nl/", "https://www.parool.nl/", "https://www.metronieuws.nl/"]
print(len(sites))
#"https://www.cosmopolitan.com/",
# wait time in seconds
page_load_wait = 10
session_browser_wait = 60
path_csv = "results.csv"
# Set preference http://kb.mozillazine.org/Category:Preferences
def browsersProfiles(usecase):
profile = FirefoxProfile()
# no update, page load time limit, no cache, accept all cookies
profile.set_preference("app.update.enabled", False)
profile.set_preference("app.update.auto", False)
#profile.set_preference("network.http.max-connections", 48)
profile.set_preference("network.http.connection-timeout", page_load_wait)
profile.set_preference("network.http.connection-retry-timeout", page_load_wait)
profile.set_preference("network.http.response.timeout", page_load_wait)
profile.set_preference("dom.max_script_run_time", page_load_wait)
profile.set_preference("browser.cache.disk.enable", False)
profile.set_preference("browser.cache.disk_cache_ssl", False)
profile.set_preference("browser.cache.memory.enable", False)
profile.set_preference("browser.cache.offline.enable", False)
profile.set_preference("network.cookie.cookieBehavior", 0)
profile.set_preference("network.cookie.lifetimePolicy", 2)
profile.set_preference("places.history.enabled",False)
profile.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
# Tracking Protection
if usecase == "TP":
profile.set_preference("privacy.trackingprotection.enabled", True)
# Disable guidance
profile.set_preference("privacy.trackingprotection.introCount", 20)
# Content Blocking
profile.set_preference("browser.contentblocking.enabled", False)
profile.set_preference("browser.contentblocking.introCount", 20)
#Tracking Protection and Content Blocking
elif usecase == "TP and CB":
# Tracking Protection
profile.set_preference("privacy.trackingprotection.enabled", True)
# Disable guidance
profile.set_preference("privacy.trackingprotection.introCount", 20)
# Content Blocking
profile.set_preference("browser.contentblocking.enabled", True)
# Disable guidance
profile.set_preference("browser.contentblocking.introCount", 20)
# No Tracking Protection
elif usecase == "no TP":
profile.set_preference("privacy.trackingprotection.enabled", False)
#disable guidance
profile.set_preference("privacy.trackingprotection.introCount", 20)
# Content Blocking
profile.set_preference("browser.contentblocking.enabled", False)
profile.set_preference("browser.contentblocking.introCount", 20)
else:
pass
return profile
def browserBinary(browser):
path_to_bin = os.path.dirname(os.path.realpath(__file__))
path_to_browser = (('{0}/{1}').format(path_to_bin,browser))
binary = FirefoxBinary(path_to_browser)
return binary
def browserSession(binary, profile, usecase, experiment):
options = Options()
#options.set_headless()
driver = Firefox(firefox_binary=binary, firefox_profile=profile, options=options)
print("================================")
print("{}: {}".format(driver.capabilities['browserName'], driver.capabilities['browserVersion']))
print("geckodriver: {}".format(driver.capabilities['moz:geckodriverVersion']))
print("Selenium: {}".format(__version__))
print("================================")
for site in sites:
print(site)
# seconds to load page
driver.get(site)
cookies = driver.get_cookies()
#print (cookies)
print("Experiment number: {}" .format(experiment))
print("Use case: {}".format(usecase))
print("Number of loaded cookies: {}" .format(len(cookies)))
write_measurements(path_csv, experiment, usecase, driver.capabilities['browserName'], driver.capabilities['browserVersion'], site, len(cookies))
driver.quit()
#wait before new browser session
sleep(session_browser_wait)
def write_measurements(path_csv, experiment, usecase, browserName, browserVersion, site, cookiesAmount):
#writing results in to CSV
if ((not os.path.isfile(path_csv)) or (os.path.isfile(path_csv) and os.stat(path_csv).st_size==0) and (experiment == 0)):
with open(path_csv, 'w', encoding='utf-8') as results:
writer = csv.writer(results)
fields = ['Experiment', 'Use case', 'Browser Name', 'Browser Version', 'site', 'Number of loaded cookies']
writer.writerow(fields)
with open(path_csv, 'a+', encoding='utf-8') as results:
writer = csv.writer(results)
fields = [experiment, usecase, browserName, browserVersion, site, cookiesAmount]
writer.writerow(fields)
else:
with open(path_csv, 'a+', encoding='utf-8') as results:
writer = csv.writer(results)
fields = [experiment, usecase, browserName, browserVersion, site, cookiesAmount]
writer.writerow(fields)
for experiment in range(experiments):
for usecase in usecases:
# usecase 0 no Tracking protection
#profile = browsersProfiles(usecase)
if usecase == "TP":
# Browsers
for browser in browsers:
#print("no Tracking Protection")
browserSession(browserBinary(browser), browsersProfiles(usecase), usecase, experiment)
elif usecase == "TP and CB":
# Browsers
for browser in browsers:
#print("Tracking Protectionand and Content Blocking")
if browser == "firefox-nightly/firefox":
browserSession(browserBinary(browser), browsersProfiles(usecase), usecase, experiment)
else:
pass
elif usecase == "no TP":
# Browsers
for browser in browsers:
#print("no Tracking Protection")
browserSession(browserBinary(browser), browsersProfiles(usecase), usecase, experiment)
else:
print("No usecase selected")
sys.exit()
#Nightly content blocking
#profile.set_preference("privacy.trackingprotection.enabled", False)
#profile.set_preference("privacy.trackingprotection.introCount", 20)
#profile.set_preference("browser.contentblocking.enabled", False)
#456profile.set_preference("browser.contentblocking.introCount", 20)
#Slow-loading Trackers(By default enabled)
#profile.set_preference("browser.fastblock.enabled", True)
# Block Trackers (by default on if contentblocking enabled)
#>profile.set_preference("privacy.trackingprotection.enabled", True)
# 1st default usecase: private
#profile.set_preference("browser.privacy.trackingprotection.menu", "private")
#>profile.set_preference("privacy.trackingprotection.enabled", False)
# 2nd usecase: always
#profile.set_preference("browser.privacy.trackingprotection.menu", "always")
#profile.set_preference("privacy.trackingprotection.enabled", True)
# Disable?
#>profile.set_preference("browser.privacy.trackingprotection.menu", "private")
#>profile.set_preference("privacy.trackingprotection.enabled", False)
#profile.set_preference("privacy.trackingprotection.pbmode.enabled", False)
# Third-Party Cookies
# 1st usecase: Trackers(recommended):
#profile.set_preference("network.cookie.cookieBehavior", 4)
# 2nd usecase (All third-party cookies)
#profile.set_preference("network.cookie.cookieBehavior", 1)
# Disable:
#profile.set_preference("network.cookie.cookieBehavior", 0)
``` |
{
"source": "aafanasyev/StringToPicture",
"score": 4
} |
#### File: aafanasyev/StringToPicture/main.py
```python
import sys
if sys.version_info[0:2] < (3, 3):
raise "Must be using Python 3.3+"
try:
from PIL import Image
except ImportError:
print("\n\nRequires Pillow. See requirements.txt\n\n")
# String is not longer then 8 ASCII characters
STRING = 'AnOctet'
# Basic image size
B_WIDTH = 8
B_HEIGHT = 8
# Updated image size
U_WIDTH = 512
U_HEIGHT = 512
# Bit state related color A Bit value from 7 bits characters set and value painted
#C_7BITS_0 = (154, 205, 50, 255) #yellowgreen
#C_7BITS_1 = (0, 191, 255, 255) #deepbluesky
C_7BITS_0 = (255, 255, 255, 255) #white
C_7BITS_1 = (0, 191, 255, 255) #deepbluesky
# Parity bits bitwise color
C_PBITS_0 = (255, 255, 255, 255)
C_PBITS_1 = (255, 69, 0, 255) #redorange
# Debug
DEBUG = 1
def chars_to_bin(string):
"""
Convert characters of a string to a 7 bits long binary format
"""
#return ['{:b}'.format(ord(character)) for character in string]
list_of_7bits_items = ['{:b}'.format(ord(character)) for character in string]
if len(list_of_7bits_items)<8:
for i in range (8 - len(list_of_7bits_items)):
list_of_7bits_items.append('{0:07b}'.format(ord(' ')))
return list_of_7bits_items
def parity_bit_octets(list_of_7bits_items):
list_octets_items = []
"""
Adding a parity bit to 7 bits to make an octet
"""
for char_of_7bits in list_of_7bits_items:
str_to_int = list(map(int, char_of_7bits))
# add parity bit
str_to_int.append((sum(str_to_int)+1)%2)
list_octets_items.append(str_to_int)
return list_octets_items
def generate_image(b_width, b_height, list_octets_items, debug, u_width, u_height):
"""
Generate an image from list of octets
"""
#Create basic image using basic width and height
b_image = Image.new('RGBA', (b_width, b_height), 'black')
#Create pixel map
pixel_map = b_image.load()
# Columns and rows are indexed to the width and height of the pixel map respectively.
# Pixel map is generated from the defined basic width and height. The 7th bits/pixels
# in a row are defined as a parity bit. Those painted in red (1) and white (0).
# Rest of bits/pixels from 0-6 in a row are in blue (0) and green (1).
for column in range(b_image.size[0]):
for row in range(b_image.size[1]):
if row != 7:
if list_octets_items[column][row]:
pixel_map[column, row] = C_7BITS_1
else:
pixel_map[column, row] = C_7BITS_0
else:
if list_octets_items[column][row]:
pixel_map[column, row] = C_PBITS_1
else:
pixel_map[column, row] = C_PBITS_0
if debug:
print("col: ", column, "row:", row, "bit:", list_octets_items[column][row], "RGBA:", pixel_map[column, row])
else:
pass
# Resizing and transforming of generated basic image in to updated image. Save and show result.
u_image = b_image.resize((u_width, u_height), Image.BOX).rotate(90).transpose(Image.FLIP_TOP_BOTTOM)
u_image.save('logo.png')
u_image.show()
"""
Main function
"""
def main():
list_7bits_items = chars_to_bin(STRING)
list_octets_items = parity_bit_octets(list_7bits_items)
if DEBUG:
print(list_7bits_items)
print(list_octets_items)
generate_image(B_WIDTH, B_HEIGHT, list_octets_items, DEBUG, U_WIDTH, U_HEIGHT)
else:
generate_image(B_WIDTH, B_HEIGHT, list_octets_items, DEBUG, U_WIDTH, U_HEIGHT)
if __name__ == "__main__":
main()
``` |
{
"source": "aafaquerk/ASTR513_Project",
"score": 3
} |
#### File: Utils/ASTR 513 Final Project/Hyman_genericMCMC.py
```python
import numpy as np
import matplotlib.pyplot as plt
import corner
from copy import deepcopy
# Define prior and likelihood function for linear fit
def logLikelihoodRho0(a, b, x, y, sigX, sigY):
return -0.5*np.sum(((y-a-b*x)*(y-a-b*x))/((b*b*sigX*sigX)+(sigY*sigY)))
def logPrior1(b):
return 0.
def logPrior2(b):
return -np.log(np.abs(b))
def logPrior3(b):
return -np.log(1+b*b)
def logProb(a, b, x, y, sigX, sigY, priorFunct):
return logLikelihoodRho0(a, b, x, y, sigX, sigY) + priorFunct(b)
# 2D MCMC code (logarithmic)
def MCMC2D_Log_General(LogProbabilityDistrib, N, sigG, initialGuess, args=()):
"""
Markov Chain Monte Carlo for Log Probability, written by <NAME> 12/3/2020
(modified for general number of samplings)
Inputs:
LogProbabilityDistrib: distribution function in log space (function)
N: number of gaussian steps (integer)
sigG: standard deviation of epsilon Gaussian (float)
args: parameters for distribution function (tuple)
Outputs:
xValues: resulting values from MCMC (list)
acceptanceRate: acceptance rate of MCMC (float)
"""
# get number of free parameters
freeParams = len(initialGuess)
# make acceptance counter and acceptance rate calculator
acceptanceCounter = 0
totalNumberPoints = 0
values = np.zeros([int(N), freeParams])
##
# step 1: draw initial xi
currentVals = initialGuess
##
# for x in range(0,int(N)):
while totalNumberPoints < int(N):
# step 2: take step to xi+1 = xi+epsilon
epsilons = np.random.normal(scale=sigG, size=freeParams)
newVals = currentVals+epsilons
##
# step 3: calc R = P(xi+1)/P(xi)
R = LogProbabilityDistrib(*newVals, *args)-LogProbabilityDistrib(*currentVals, *args)
##
if R < 1:
p = np.log(np.random.uniform(low=0., high=1., size=1) [0])
if p > R:
currentVals= currentVals
values[totalNumberPoints] = deepcopy(currentVals)
totalNumberPoints += 1
else:
currentVals = newVals
values[totalNumberPoints] = deepcopy(currentVals)
acceptanceCounter += 1
totalNumberPoints += 1
else:
currentVals = newVals
values[totalNumberPoints] = deepcopy(currentVals)
acceptanceCounter += 1
totalNumberPoints += 1
##
acceptanceRate = acceptanceCounter/totalNumberPoints
print('\nAcceptance Rate = {}\n'.format(acceptanceRate))
##
return values, acceptanceRate
#### EXAMPLE RUN ####
# MCMC draw - prior case 2
params = (BMAGs, Vrots, e_BMAGs, err_Vrots, logPrior2)
values, acceptanceRate = MCMC2D_Log_General(logProb, 5e5, sigG=0.01, initialGuess=(-1.0,-0.15), args=params)
aVals, bVals = values.T
plt.plot(aVals,bVals,marker='.',ls='None')
plt.xlabel('a')
plt.ylabel('b')
plt.show()
plt.hist2d(aVals,bVals,bins=100)
plt.xlabel('a')
plt.ylabel('b')
plt.show()
combined = np.vstack((aVals,bVals)).T
figure = corner.corner(combined, labels=["a", "b"],
levels=[0.683,0.954,0.997],
plot_datapoints=False,
plot_density=False,
plot_contours=True,
show_titles=True, title_kwargs={"fontsize": 12})
plt.show()
``` |
{
"source": "AAFC-BICoE/elateridae-ortholog-baitset",
"score": 3
} |
#### File: AAFC-BICoE/elateridae-ortholog-baitset/elateridae_baits.py
```python
import glob
import os
from Bio import AlignIO, SeqIO
import time
import argparse
import random
def main():
"""
Main Function to run Staphylinidae Bait Designer
:return:
"""
parser = argparse.ArgumentParser(description='Processes T_Coffee AA alignments to generate a ortholog bait set')
parser.add_argument('-o', type=str, required=True,
help='Output Directory')
parser.add_argument('-i', type=str, required=True,
help='T_Coffee Directory containing aa based .score_ascii files')
parser.add_argument('-n', type=str, required=True,
help='Directory containing tranalign nucleotide alignments')
# parser.add_argument('-p', type=str, required=True,
# help='Priorities File for Staphylinidae')
args = parser.parse_args()
print("Starting Staphylinidae Ortholog Bait Design".format(args.o))
print(args.o, args.i, args.n)
dict_of_max_sums = longest_exon_length(args.i)
sum_file = write_sums(args.o, dict_of_max_sums)
blocks_dir = extract_conserved_blocks(sum_file, args.n, args.o)
window_ranges = [600]
for window in window_ranges:
filtered_blocks_dir = filter_blocks(blocks_dir, args.o, window)
processed_blocks_dir = filtered_blocks_dir
# Original was going to stagger tile the baits, but bait manufacturer inherently does this
# tiled_blocks_dir = tile_blocks(filtered_blocks_dir, args.o, window)
# processed_blocks_dir = tiled_blocks_dir
merge_baits(processed_blocks_dir, args.o, "Elateridae", window)
def extract_conserved_blocks(sum_file, alignment_directory, results_directory):
"""
Takes an AA T_coffee alignment score_ascii file, the corresponding nt fasta tranalign file, and the sum file to
Extract out a conserved block
:param sum_file:
:param alignment_directory:
:param results_directory:
:return: Output Directory of conserved blocks
"""
output_directory = os.path.join(results_directory, "conserved_blocks")
if not os.path.exists(output_directory):
os.makedirs(output_directory)
with open(sum_file) as f:
lines = f.readlines()
lines.pop(0)
for line in lines:
list_of_seqs = []
split = line.rstrip().split(",")
name = split[0].replace(".aa.summarized.score_ascii", "_tranaligned.fa")
window_range = int(split[2])*3
index = int(split[3])*3
file_path = os.path.join(alignment_directory, name)
if os.path.isfile(file_path):
with open(file_path) as g:
alignments = AlignIO.read(g, "fasta")
for alignment in alignments:
list_of_seqs.append(alignment[index:index + window_range])
orthogroup = split[0].split(".")[0]
file_name = "{}_block.fasta".format(orthogroup)
file_path = os.path.join(output_directory, file_name)
with open(file_path, "w") as h:
for seq in list_of_seqs:
h.write(seq.format("fasta"))
return output_directory
def longest_exon_length(directory):
"""
Scans t_coffee alignments in score_ascii format for a region of between 75-2000 positions in length that is
highly conserved, and sorts by the degree of conservation into an output file
:param directory: Directory of T_coffee results (containing score_ascii and aln files)
:return: Dictionary of Orthogroups with a 300bp region TCS scores above 2400
"""
increments = [150, 200]
increments_rev = increments[::-1]
dict_of_max_sums = {}
files = glob.glob(os.path.join(directory, "*.score_ascii"))
count = 0
for file in files:
count += 1
if count % 100 == 0:
print(count)
# Scans an alignment and converts the cons string of numbers into a continous list of numbers
number_string = ""
with open(file) as f:
number_of_specimens = f.read().count(":") - 4
f.seek(0)
if number_of_specimens < 5:
print("Skipping {} Due to Low Specimen Count".format(file))
continue
for line in f:
if line.startswith("cons") and ":" not in line:
number = line.rstrip().split(" ")[-1]
number_string += number
number_list = [int(i) for i in number_string]
# Scans number list for sequence containing the highest window range of conserved bases within 95% of max
# TCS score for said window range aka 9*Window Range
# Sort the list so the highest score block within the window range is first. If the window range
# has 95% quality or higher, add it to dictionary and move on to next file, otherwise decrease
# window range and try again
for window_range in increments_rev:
list_of_sums = []
if len(number_list) > window_range:
for i in range(0, len(number_list) - window_range):
the_sum = sum(number_list[i:i + window_range])
list_of_sums.append((the_sum, window_range, i))
sorted_list = sorted(list_of_sums, reverse=True, key=lambda element: (element[0]))
if float(sorted_list[0][0]) >= float(9 * window_range * .95):
if os.path.basename(file) not in dict_of_max_sums:
dict_of_max_sums[os.path.basename(file)] = sorted_list[0]
break
return dict_of_max_sums
def write_sums(directory, dict_of_max_sums):
"""
Writes the dictionary of all ortholog T_coffee scores/sums to csv file
:param directory:
:param dict_of_max_sums:
:return:
"""
if not os.path.exists(directory):
os.makedirs(directory)
timestr = time.strftime("%Y%m%d-%H%M%S")
file_name = "Conserved_Exons_Sums_{}.csv".format(timestr)
file_path = os.path.join(directory, file_name)
# Sorts dictionary into a list by score sum and then window length
sorted_x = sorted(dict_of_max_sums.items(), reverse=True, key=lambda x: (x[1][0], x[1][1]))
print("Writing T_Coffee score analysis to {}".format(file_path))
with open(file_path, "w") as f:
f.write("Orthogroup,Sum,Window,Index\n")
for entry in sorted_x:
f.write("{},{},{},{}\n".format(entry[0], entry[1][0], entry[1][1], entry[1][2]))
return file_path
def filter_blocks(directory, results_dir, window):
"""
Filters blocks generated by longest exon length and write sum functions based on various criteria
:param directory: Directory of fasta blocks to filter
:param results_dir: Parent Result Folder
:param window: Minimum length of a conserved block in basepairs
:return: Output Directory of filtered blocks
"""
fastas = glob.glob(os.path.join(directory, "*.fasta"))
output_dir = os.path.join(results_dir, "filtered_blocks_{}".format(window))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
total_seq_length = 0
total_after_gap_removal = 0
total_sequences = 0
gene_count = 0
# For each block/file extract out sequences that meet the following critiera:
# Part of Priority List = 1
# Minimum Length of Window size in basepairs
# Gaps represent less than 20% of sequence
# Block contains atleast 5 sequences from priority list = 1
for fasta in fastas:
seqs = []
with open(fasta) as f:
file_name = os.path.basename(fasta).replace(".fasta", "_filtered.fasta")
for seq in SeqIO.parse(f, 'fasta'):
gaps = seq.seq.count("-")
gap_percent = float(gaps / len(seq.seq))
if gap_percent > 0.20:
pass
else:
if len(seq.seq) >= window:
seqs.append(seq)
if len(seqs) < 5:
pass
else:
gene_count += 1
# Randomly take 3 contigs from the bait set to ensure even distribution of species across all orthologs
random.shuffle(seqs)
seqs = seqs[:3]
total_sequences += len(seqs)
for seq in seqs:
total_seq_length += len(seq.seq)
seq.seq = seq.seq.ungap(gap="-")
total_after_gap_removal += len(seq.seq)
new_file = os.path.join(output_dir, file_name)
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
print("Total Genes: {}, "
"Total Sequences: {}, "
"Total Length in bp: {}, "
"After Gap Removal: {}".format(gene_count, total_sequences, total_seq_length, total_after_gap_removal))
return output_dir
def tile_blocks(directory, results_dir, window):
"""
Takes a prefiltered block generated by the filtered_blocks function and tiles each bait
The first 0, 40 or 80 basepairs of each sequence are removed so the baits tile amongst each other
:param directory:
:param results_dir:
:param window:
:return:
"""
fastas = glob.glob(os.path.join(directory, "*.fasta"))
output_dir = os.path.join(results_dir, "tiled_blocks_{}".format(window))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for fasta in fastas:
seqs = []
with open(fasta) as f:
count = 0
for seq in SeqIO.parse(f, 'fasta'):
seq.description = ""
# Remove the first 0, 40 or 80 basepairs of the sequence every 3rd time
count += 1
if count == 1:
pass
if count == 2:
seq.seq = seq.seq[40:]
if count == 3:
seq.seq = seq.seq[80:]
count = 0
seqs.append(seq)
file_name = os.path.basename(fasta).replace("_block_filtered", "_block_tiled")
new_file = os.path.join(output_dir, file_name)
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
return output_dir
def merge_baits(directory, results_dir, prefix, window):
"""
Merges multifastas in the input directory into a single multi fasta file. Can be accomplished with bash cat, but
using biopython ensures each fasta entry is formatted correctly
:param directory: Input directory of fastas
:param results_dir: Output Parent directory
:param prefix: Name of the output file
:param window:
:return:
"""
output_dir = os.path.join(results_dir, "final_baits")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
fastas = glob.glob(os.path.join(directory, "*.fasta"))
seqs = []
total_dna = 0
total_seqs = 0
total_orthologs = 0
for fasta in fastas:
if total_dna > 3900000:
break
total_orthologs += 1
with open(fasta) as f:
for seq in SeqIO.parse(f, 'fasta'):
total_seqs += 1
total_dna += len(seq.seq)
seq.description = ""
seqs.append(seq)
file_name = "{}-{}-final-baits.fasta".format(prefix, window)
new_file = os.path.join(output_dir, file_name)
print("Bait File {} "
"with Total Orthologs {}, "
"Total Seqs {}, Total_Dna {} bp".format(new_file, total_orthologs, total_seqs, total_dna))
with open(new_file, "w") as g:
SeqIO.write(seqs, g, "fasta")
return output_dir
if __name__ == "__main__":
main()
``` |
{
"source": "AAFC-BICoE/json-schema2adoc",
"score": 3
} |
#### File: json-schema2adoc/test/testJsonSchemaAdocGenerator.py
```python
import unittest
import logging
import json
import json_schema2adoc.jsonSchemaAdocGenerator
class TestSchemaAdocGenerator(unittest.TestCase):
def test_build_document(self):
with open('test/TestJsonSchema.json', encoding='utf-8') as json_file:
json_object = json.loads(json_file.read())
asciiDocOutput = json_schema2adoc.jsonSchemaAdocGenerator.build_document(json_object)
self.assertTrue(asciiDocOutput[0].startswith('=='))
def main():
unittest.main()
if __name__ == '__main__':
main()
``` |
{
"source": "AAFC-BICoE/nrc-ngs-downloader",
"score": 3
} |
#### File: nrc-ngs-downloader/nrc_ngs_dl/web_parser.py
```python
import os
import logging
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
from datetime import date
import time
from BeautifulSoup import BeautifulSoup
import math
logger = logging.getLogger('nrc_ngs_dl.web_parser')
class WebParser:
def __init__(self, login_url, runlist_url, username, password):
"""Initialize the object by logging into the web page
Args:
login_url (str): link to the login page (https://lims.bioinfo.nrc.ca/login.html)
runlist_url (str): link to the page with a list of all the sequence runs
(https://lims.bioinfo.nrc.ca/lims/show_runs.html)
username (str): username
password (str): password
"""
login_data = {
'username' : username,
'password' : password,
'submit' : 'Login',
}
session_requests = requests.Session()
try:
session_requests.post(login_url, data=login_data, verify=False)
except:
logger.error('Wrong address of login page %s' % login_url)
raise
self.session_requests = session_requests
self.runlist_url = runlist_url
def get_runlist(self, table_id, link_column, status_column):
"""Get a list of completed sequence runs
Args:
table_id (str): tags to get the table (div id runs_table)
link_column (str): the column which contains the link to a sequence run
status_column (str): the column to show if the sequence run is completed or not
Returns:
A list of links to the completed sequence runs
"""
packages = []
r = self.session_requests.get(self.runlist_url,verify=False)
if r.url != self.runlist_url:
logger.error('Failed to login, check your username, password and link to run_list page %s ' % self.runlist_url)
raise
soup = BeautifulSoup(r.content)
try:
table = self.get_table(soup, table_id)
except:
logger.error('Cannot get the table %s' % (table_id))
raise
title_row = table.findAll('tr')[0]
keys = self.get_text_arow(title_row,'th')
index_link = keys.index(link_column)
index_status = keys.index(status_column)
for row in table.findAll('tr')[1:]:
cells = row.findAll('td')
run_link_here = cells[index_link].find('a',href = True).get('href')
status_here = self.get_text_acell(cells[index_status])
if status_here == 'completed':
packages.append(run_link_here)
reverse_list = list(reversed(packages))
logger.debug('The list of all runs: %s' % reverse_list)
return reverse_list
def get_runinfo(self, run_url):
"""Parse information of a sequence run
i.e. run_name,machine_name,plate_name,platform,run_mode,
run_type,num_cycles,quality_format,operator,creation_date,
description,status
Args:
run_url(str): link to a sequence run
Returns:
dictionary of the information
"""
try:
r = self.session_requests.get(run_url,verify=False)
except:
logger.warn('Cannot access the page of sequence run %s ' % (run_url))
raise
soup = BeautifulSoup(r.content)
run_info = {}
try:
table = soup.find('table', {'class':'label_value'})
except:
logger.warn('Cannot find the run info table')
raise
for a_row in table.findAll('tr'):
two_cells = a_row.findAll('td')
if len(two_cells)!=2:
logging.warn('More than two columns in run info table')
raise
column_name = self.get_text_acell(two_cells[0])
column_value = self.get_text_acell(two_cells[1])
column_name = column_name.lower()
column_name_part = column_name.split(' ')
link = '_'
column_name = link.join(column_name_part)[:-1]
run_info[column_name] = column_value
logger.debug("run_url %s and run_info %s" % (run_url, run_info))
return run_info
def get_laneinfo(self, run_url, table_id, column_lane, column_link):
"""Parse information of all lanes in a sequence run,
Args:
run_url: link of a sequence run
table_id: tags for parsing a table
column_lane: the specific column which contains lane_index
column_link: the specific column which contains a link to data
Returns:
A list of lanes in a sequence run
"""
lane_list = []
file_list = []
try:
r = self.session_requests.get(run_url, verify=False)
except:
logger.warn('Cannot access the page of sequence run %s ' % (run_url))
raise
soup = BeautifulSoup(r.content)
try:
table = self.get_table(soup, table_id)
except:
logger.warn('Cannot find the table %' % (table_id))
raise
title_row = table.findAll('tr')[0]
keys = self.get_text_arow(title_row,'th')
index_lane = keys.index(column_lane)
index_download = keys.index(column_link)
new_keys=[]
for each_key in keys:
each_key = each_key.replace('%', 'pct')
each_key = each_key.lower()
each_key_part = each_key.split(' ')
link = '_'
each_key = link.join(each_key_part)
new_keys.append(each_key)
for a_row in table.findAll('tr')[1:]:
text_all_cell = self.get_text_arow(a_row,'td')
all_cell = a_row.findAll('td')
lane_number = text_all_cell[index_lane]
download_file_url = all_cell[index_download].find('a', href=True)
if lane_number != '' and len(download_file_url) == 1:
a_lane = {}
lane_index_now = lane_number
a_lane['lane_index'] = lane_number
a_lane['package_name'] = download_file_url.string.strip();
a_lane['pack_data_url'] = download_file_url.get('href')
all_headers = self.session_requests.get(a_lane['pack_data_url'], stream=True, verify=False)
logger.debug('all_headers %s' % (all_headers.headers))
if all_headers.status_code != 200:
logger.warn('Wrong headers %s' % (a_lane['pack_data_url']))
raise
a_lane['http_content_length'] = all_headers.headers['content-length']
lane_list.append(a_lane)
else:
if len(new_keys) != len(text_all_cell):
logger.warn('Different length in title and content of a table')
else:
a_file ={}
for index in range(len(new_keys)):
a_file[new_keys[index]] = text_all_cell[index]
a_file['lane_index'] = lane_index_now
old_biomaterial = a_file['biomaterial']
new_biomaterial = old_biomaterial.replace(' ','')
if len(old_biomaterial) != len(new_biomaterial):
logger.warn('Whitespace(s) in user defined name %s' % (old_biomaterial))
a_file['biomaterial'] = new_biomaterial
file_list.append(a_file)
return lane_list, file_list
def get_text_arow(self,a_row, tag):
"""Get the text for all the cells of a row
Args:
a_row: a row of a table
tag (str): tag for a cell, i.e. td or th
Returns:
A list of text in a row of a table
"""
text_list = []
all_cell = a_row.findAll(tag)
for a_cell in all_cell:
a_text = self.get_text_acell(a_cell)
text_list.append(a_text)
return text_list
def get_table(self, soup, table_id):
"""Get a table with the table_id"""
table_id_values = table_id.split()
if table_id_values[0] =='table':
table = soup.find(table_id_values[0], {table_id_values[1]:table_id_values[2]})
else:
a_tag = soup.find(table_id_values[0], {table_id_values[1]:table_id_values[2]})
table = a_tag.findAll('table')[0]
return table
def get_text_acell(self,a_cell):
""" Get the text in a specific cell of a table"""
text = a_cell.findAll(text = True)
text = [i.strip() for i in text if i not in ('\n', '')]
if len(text) == 0:
a_text = ''
else:
link='_'
a_text = link.join(text)
return a_text
def download_zipfile(self, url, file_path):
"""Download a zip file
Args:
url: link to the file
file_path: path and file name to hold the file
Returns:
Date to download the file
Time (in minutes) spend on downloading the file
Size of the file
"""
time_and_size = []
download_date = date.today().strftime('%m/%d/%Y')
time_and_size.append(download_date)
start = time.time()
chunkSize = 1024 * 512
totalSize = 0
res = self.session_requests.get(url, stream=True, verify=False)
whole_file_size = int(res.headers['content-length'])
#print(res.headers['content-length'], real_file_size)
limit_10G = int(10*math.pow(1024,3))
if whole_file_size < limit_10G:
with open(file_path, 'wb') as output:
chunknumber = 0
for chunk in res.iter_content(chunk_size=chunkSize, decode_unicode=False):
if chunk:
totalSize = totalSize + chunkSize
chunknumber += 1
output.write(chunk)
else:
logger.info('HiSeq file ')
#url_xs = url.replace('lane.fastq', 'lane_xs.fastq')
url_xs = url
resume_number = whole_file_size/limit_10G +1
file_size =0
option_for_write = 'wb'
while resume_number >0 and file_size < whole_file_size:
resume_number-=1
resume_header = {'Range': 'bytes=%d-' % file_size}
res = self.session_requests.get(url_xs, headers=resume_header,stream = True, verify = False, allow_redirects = True)
with open(file_path, option_for_write) as output:
for chunk in res.iter_content(chunk_size=chunkSize, decode_unicode=False):
if chunk:
output.write(chunk)
option_for_write = 'ab'
time.sleep(20)
file_size = os.stat(file_path).st_size
logger.info('file size now %s' % (file_size))
res.close()
end = time.time()
time_in_min = (end - start) / 60
time_and_size.append('%.1f' % time_in_min)
fileSize = os.stat(file_path).st_size
time_and_size.append(str(fileSize))
return time_and_size
``` |
{
"source": "AAFC-BICoE/reference-data-manager",
"score": 2
} |
#### File: reference-data-manager/brdm/NcbiBlastData.py
```python
import os
import shutil
import tempfile
import requests
import re
import logging.config
import tarfile
import time
from distutils.dir_util import copy_tree
from bs4 import BeautifulSoup
from brdm.NcbiData import NcbiData
from brdm.RefDataInterface import RefDataInterface
class NcbiBlastData(NcbiData, RefDataInterface):
def __init__(self, config_file):
"""Initialize the object"""
super(NcbiBlastData, self).__init__(config_file)
self.download_folder = \
self.config['ncbi']['blast_db']['download_folder']
self.info_file_name = self.config['ncbi']['blast_db']['info_file_name']
try:
self.destination_dir = os.path.join(
super(NcbiBlastData, self).destination_dir,
self.config['ncbi']['blast_db']['destination_folder'])
self.backup_dir = os.path.join(
super(NcbiBlastData, self).backup_dir,
self.config['ncbi']['blast_db']['destination_folder'])
if not os.path.exists(self.destination_dir):
os.makedirs(self.destination_dir, mode=self.folder_mode)
os.chdir(self.destination_dir)
if not os.path.exists(self.backup_dir):
os.makedirs(self.backup_dir, mode=self.folder_mode)
except Exception as e:
logging.error('Failed to create destination/backup folder: {}'
.format(e))
def update(self, file_number=0):
"""Update NCBI nrnt blast database"""
logging.info('Executing NCBI Blast update')
# Download nrnt data into an intermediate folder
temp_dir = self.create_tmp_dir(self.destination_dir)
if not temp_dir:
logging.error('Failed to create the temp_dir: {}, error{}'
.format(temp_dir, e))
return False
success = self.download(download_file_number=file_number)
if not success:
logging.error('Failed to download nrnt files.')
return False
# Change the file mode
try:
only_files = [f for f in os.listdir('.') if os.path.isfile(f)]
for f in only_files:
os.chmod(f, self.file_mode)
except Exception as e:
logging.error('Failed to change file mode, error{}'.format(e))
return False
# Backup two readme files
backup_success = self.backup()
if not backup_success:
logging.error('Failed to backup readme files.')
return False
# Delete all data from the destination folder
clean_ok = self.clean_destination_dir(self.destination_dir)
if not clean_ok:
logging.error('Failed to remove old files in destination folder')
return False
# Copy data from intermediate folder to destination folder
# Delete intermediate folder
try:
copy_tree(temp_dir, self.destination_dir)
shutil.rmtree(temp_dir)
except Exception as e:
logging.error('Failed to copy file from temp to destination {}'
.format(e))
return False
return True
# Backup readme and readme+ file
def backup(self):
"""Backup readme and README+ files"""
logging.info('Executing NCBI Blast backup')
backup_folder = self.create_backup_dir()
if not backup_folder:
logging.error('Failed to create backup folder.')
return False
# Copy only README files for future reference
app_readme_file = self.config['readme_file']
ncbi_readme_file = self.info_file_name
try:
shutil.copy2(app_readme_file, backup_folder)
shutil.copy2(ncbi_readme_file, backup_folder)
except Exception as e:
logging.exception('NCBI Blast Backup did not succeed. Error: {}'
.format(e))
return False
return True
def restore(self):
# 2018.05.28: As agreed upon, this feature will not be implemented.
# There is no backup functionality for blast databases,
# therefore there is no restore.
pass
# Unzip all of nrnt files
def unzip(self):
"""Unzip all the nrnt files"""
try:
zipped_files = [f for f in os.listdir('.') if os.path.isfile(f)]
for file in zipped_files:
unzipped = self.unzip_file(file)
if not unzipped:
logging.error('Failed to unzip {}'.format(file))
return False
all_files = [f for f in os.listdir('.') if os.path.isfile(f)]
for f in all_files:
os.chmod(f, self.file_mode)
except Exception as e:
logging.error('Failed to unzip and change file mode {}'
.format(e))
return False
return True
# Parse the webpage of ncbi blast to get the list of nrnt files
def get_all_file(self, url):
"""Parse the webpage of ncbi blast to get the list of nrnt files
Args:
url (string): the link to ncbi blast database
Return:
A list of nrnt file names
"""
result = []
try:
session_requests, connected = self.https_connect()
html = session_requests.get(url)
soup = BeautifulSoup(html.content, 'html.parser')
nr_nt_re = re.compile(r'(nr|nt)\.\d{2,3}\.tar\.gz$')
# nr_nt_re = re.compile('(nr|nt).d{2,3}.tar.gz$')
links = soup.find_all('a')
for a_link in links:
file_name = a_link.string
if nr_nt_re.match(file_name):
result.append(file_name)
session_requests.close()
except Exception as e:
logging.info('Failed to get the list of nrnt files: {}'.format(e))
print(len(result))
return result
# Download read me and all the nrnt files
def download(self, download_file_number=0):
"""Download readme and all the nrnt files"""
download_start_time = time.time()
max_download_attempts = self.download_retry_num
folder_url = os.path.join(self.login_url, self.download_folder)
# Download read me
readme_success = False
attempt = 0
while attempt < max_download_attempts and not readme_success:
attempt += 1
try:
session_requests, connected = self.https_connect()
file_name_readme = self.info_file_name
file_url_readme = os.path.join(folder_url, file_name_readme)
readme_success = self.download_a_file(
file_name_readme, file_url_readme, session_requests)
session_requests.close()
except Exception as e:
logging.info('Failed to download readme on attempt {}:{}'
.format(attempt, e))
time.sleep(self.sleep_time)
if not readme_success:
logging.error('Failed to download readme file')
return False
# Get the list of nrnt files
all_file = self.get_all_file(folder_url)
downloaded_file = []
if len(all_file) == 0:
logging.error('Failed to get the file list to download')
return False
if download_file_number == 0:
download_file_number = len(all_file)
# Download nrnt files
attempt = 0
while attempt < max_download_attempts and \
len(downloaded_file) < download_file_number:
try:
session_requests, connected = self.https_connect()
attempt += 1
for file in all_file:
if file not in downloaded_file:
download_success = False
file_success = False
md5_success = False
file_name_nrnt = file
file_url_nrnt = os.path.join(
folder_url, file_name_nrnt)
file_name_md5 = file+'.md5'
file_url_md5 = os.path.join(folder_url, file_name_md5)
file_success = self.download_a_file(
file_name_nrnt, file_url_nrnt, session_requests)
if file_success:
md5_success = self.download_a_file(
file_name_md5, file_url_md5, session_requests)
if md5_success:
download_success = self.checksum(file_name_md5,
file_name_nrnt)
if download_success:
downloaded_file.append(file)
if len(downloaded_file) == download_file_number:
break
session_requests.close()
except Exception as e:
logging.exception('Errors in downloading nrnt files, \
\nretry... {}'.format(e))
time.sleep(self.sleep_time)
if len(downloaded_file) < download_file_number:
logging.error('Failed. downloaded {} out of {} files'
.format(len(downloaded_file), download_file_number))
return False
files_download_failed = []
# Write application's README+ file
comment = 'nr and nt blast datasets downloaded from NCBI.'
self.write_readme(download_url='{}'.format(folder_url),
downloaded_files=downloaded_file,
download_failed_files=files_download_failed,
comment=comment,
execution_time=(time.time() - download_start_time))
return True
# Check the correctness of the downloaded file
def checksum(self, md5_file, file_name):
"""Check the correctness of the downloaded file"""
try:
with open(md5_file, 'r') as f:
md5_file_contents = f.read()
md5_str = md5_file_contents.split(' ')[0]
os.remove(md5_file)
except Exception as e:
logging.exception('Could not read MD5 file {}. \
\nTry to download the file again'.format(file_name))
return False
if not self.check_md5(file_name, md5_str):
logging.error('Failed in checksum. Download the file again.')
return False
return True
```
#### File: reference-data-manager/brdm/NcbiTaxonomyData.py
```python
import os
import shutil
import tempfile
import logging
import time
import requests
from distutils.dir_util import copy_tree
from brdm.NcbiData import NcbiData
from brdm.RefDataInterface import RefDataInterface
class NcbiTaxonomyData(NcbiData, RefDataInterface):
def __init__(self, config_file):
"""Initialize the object"""
super(NcbiTaxonomyData, self).__init__(config_file)
self.download_folder = \
self.config['ncbi']['taxonomy']['download_folder']
self.download_file = self.config['ncbi']['taxonomy']['download_file']
self.taxonomy_file = self.config['ncbi']['taxonomy']['taxonomy_file']
self.info_file_name = self.config['ncbi']['taxonomy']['info_file_name']
# Create destination directory and backup directory
try:
self.destination_dir = os.path.join(
super(NcbiTaxonomyData, self).destination_dir,
self.config['ncbi']['taxonomy']['destination_folder'])
if not os.path.exists(self.destination_dir):
os.makedirs(self.destination_dir, mode=self.folder_mode)
os.chdir(self.destination_dir)
self.backup_dir = os.path.join(
super(NcbiTaxonomyData, self).backup_dir,
self.config['ncbi']['taxonomy']['destination_folder'])
if not os.path.exists(self.backup_dir):
os.makedirs(self.backup_dir, mode=self.folder_mode)
except Exception as e:
logging.error('Failed to create destination_dir/backup_dir {}'
.format(e))
def update(self):
"""Update NCBI taxonomy database
The method first download the most recent taxonomy from NCBI;
then format and backup the taxonomy information.
"""
logging.info('Executing NCBI taxonomy update')
# Download files into the intermediate folder
temp_dir = self.create_tmp_dir(self.destination_dir)
if not temp_dir:
logging.error('Failed to create the temp_dir:{}'.format(e))
return False
success = self.download()
if not success:
logging.error('Download failed. Quit the Update process.')
return False
# Format the taxonomy file and remove unwanted files
# and change file mode
format_success = self.format_taxonomy(self.taxonomy_file)
if not format_success:
logging.error('Failed to format taxonomy file')
return False
# Backup the files
backup_success = self.backup()
if not backup_success:
logging.error('Backup of taxonomy data did not succeed.')
return False
# Delete old files from the destination folder
# Copy new files from intermediate folder to destination folder
clean_ok = self.clean_destination_dir(self.destination_dir)
if not clean_ok:
return False
try:
copy_tree(temp_dir, self.destination_dir)
shutil.rmtree(temp_dir)
except Exception as e:
logging.error('Failed to move files from temp_dir to \
\n destination folder, error{}'.format(e))
return False
return True
# Download taxonomy database
def download(self, test=False):
"""Download the most recent taxonomy database"""
logging.info('Downloading NCBI taxonomy')
download_start_time = time.time()
downloaded_files = []
files_download_failed = []
max_download_attempts = self.download_retry_num
file_name = self.download_file
readme_success = False
download_success = test
unzip_success = False
attempt = 0
completed = False
while attempt < max_download_attempts and not completed:
attempt += 1
try:
file_url = os.path.join(self.login_url, self.download_folder)
session_requests, connected = self.https_connect()
if not readme_success:
# download readme file:
file_name_readme = self.info_file_name
file_url_readme = os.path.join(file_url, file_name_readme)
readme_success = self.download_a_file(
file_name_readme, file_url_readme, session_requests)
if not download_success:
# download md5 file:
file_name_md5 = self.download_file+'.md5'
file_url_md5 = os.path.join(file_url, file_name_md5)
md5_success = self.download_a_file(
file_name_md5, file_url_md5, session_requests)
# download taxdump zipped file
file_name_taxon = self.download_file
file_url_taxon = os.path.join(file_url, file_name_taxon)
taxon_success = self.download_a_file(
file_name_taxon, file_url_taxon, session_requests)
# check md5
download_success = self.checksum(
file_name_md5, file_name_taxon)
if download_success and readme_success:
completed = True
session_requests.close()
except Exception as e:
logging.info('Failed to download taxonomy on attempt {}. \
\nError: {}'.format(attempt, e))
time.sleep(self.sleep_time)
if completed and not test:
unzip_success = self.unzip_file(file_name_taxon)
if not unzip_success and not test:
files_download_failed.append(file_name)
logging.error('Failed to download {} after {} attempts'
.format(file_name, max_download_attempts))
return False
# Write the README+ file
downloaded_files.append(file_name)
comment = 'Taxonomy reference databases that downloaded from NCBI.'
self.write_readme(
download_url='{}/{}/{}'.format(self.login_url,
self.download_folder,
self.download_file),
downloaded_files=downloaded_files,
download_failed_files=files_download_failed,
comment=comment,
execution_time=(time.time() - download_start_time))
return True
def checksum(self, md5_file, file_name):
"""Check the correctness of the downloaded file"""
try:
with open(md5_file, 'r') as f:
md5_file_contents = f.read()
md5_str = md5_file_contents.split(' ')[0]
os.remove(md5_file)
except Exception as e:
logging.exception('Could not read MD5 file {}. \
\nTry to download the file again'.format(file_name))
return False
if not self.check_md5(file_name, md5_str):
logging.warning('Failed in checking MD5. Download file again.')
return False
return True
# Write the taxonomy file in a specific format, redmine #12865-14
def format_taxonomy(self, filename):
"""Write the taxonomy file in a specific format"""
dmp_file = filename+'.dmp'
taxonomy_file = filename+'.txt'
try:
taxonomy = open(taxonomy_file, 'w')
taxonomy.write(
'taxon_id\ttaxon_name\td__domain; k__kingdom; p__phylum; '
+ 'c__class; o__order; f__family; g__genus; s__species\n')
with open(dmp_file) as fp:
content = fp.readlines()
for line in content:
line = line[:-3]
x = line.split('\t|\t')
tax_id, tax_name, species, genus, family, order, \
taxon_class, phylum, kingdom, superkingdom = x
taxonomy.write(tax_id + '\t' + tax_name + '\td__'
+ superkingdom + '; k__' + kingdom
+ '; p__' + phylum + '; c__'
+ taxon_class + '; o__' + order + '; f__'
+ family + '; g__' + genus + '; s__'
+ species + '\n')
taxonomy.close()
except Exception as e:
logging.exception('Failed to format taxonomy file')
return False
# remove unwanted file and change file mode
app_readme_file = self.config['readme_file']
ncbi_readme_file = self.info_file_name
taxonomy_file = self.taxonomy_file + '.txt'
try:
only_files = [f for f in os.listdir('.') if os.path.isfile(f)]
for f in only_files:
if not f == app_readme_file and not f == ncbi_readme_file \
and not f == taxonomy_file:
os.remove(f)
else:
os.chmod(f, self.file_mode)
except Exception as e:
logging.error('Failed to remove unwanted files:{}'.format(e))
return False
return True
def backup(self):
"""Backup the taxonomy information"""
logging.info('Executing NCBI taxonomy backup')
backup_folder = self.create_backup_dir()
if not backup_folder:
logging.error('NCBI taxonomy Backup did not succeed.')
return False
try:
src_files = [f for f in os.listdir('.') if os.path.isfile(f)]
for filename in src_files:
shutil.copy(filename, backup_folder)
except Exception as e:
logging.exception('Failed in NCBI taxonomy Backup: {}'.format(e))
return False
return True
def restore(self, proposed_folder_name, path_to_destination):
"""Restore old version of taxonomy database from backups
Args:
proposed_folder_name (string): in format yyyy-mm-dd; it is
the version of the database you want to restore
path_to_desination (string): The path to a place that you
want to store the restored database
Return:
True if the database restored successfully; False otherwise
"""
logging.info('Executing NCBI taxonomy restore {} to {}'
.format(proposed_folder_name, path_to_destination))
# check the restore folder, return false if not exist or empty folder
try:
restore_folder = self.check_restore_date(
self.backup_dir, proposed_folder_name)
if not restore_folder:
return False
restore_destination = self.check_restore_destination(
path_to_destination)
if not restore_destination:
return False
# create restore destination folder
if not os.path.isdir(restore_destination):
os.makedirs(restore_destination, mode=self.folder_mode)
# copy the all the files in backup_dir/folder_name to
# restore destination
os.chdir(restore_folder)
for filename in os.listdir(restore_folder):
shutil.copy2(filename, restore_destination)
except Exception as e:
logging.exception('Failed in NCBI taxonomy restore: {}'.format(e))
return False
print('The restored database is located at ' + restore_destination)
logging.info('The restored database is located at {}'
.format(restore_destination))
return True
```
#### File: reference-data-manager/tests/TestGreenGene.py
```python
import unittest
import os
from time import gmtime, strftime
from brdm.GreenGeneData import GreenGeneData
class TestGreenGene(unittest.TestCase):
@classmethod
def setUpClass(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.fixture = GreenGeneData('{}/test_config.yaml'
.format(dir_path))
'''
@classmethod
def tearDownClass(self):
if os.path.exists(self.fixture.destination_dir):
shutil.rmtree(self.fixture.destination_dir)
if os.path.exists(self.fixture.backup_dir):
shutil.rmtree(self.fixture.backup_dir)
pass
'''
def test_1_download(self):
print('Check method download...')
success = self.fixture.download(test=True)
self.assertTrue(success, 'Failed in GreenGene download.')
def test_2_update(self):
print('Update GreenGene...')
success = self.fixture.update()
self.assertTrue(success, 'Failed in GreenGene update.')
def test_3_readme(self):
print('Check readme files...')
gg_readme = os.path.join(self.fixture.destination_dir,
self.fixture.info_file_name)
self.assertTrue(os.path.isfile(gg_readme),
'Cannot find GreenGene README file.')
readme_file = os.path.join(self.fixture.destination_dir,
self.fixture.config['readme_file'])
self.assertTrue(os.path.isfile(readme_file),
'Cannot find RDM README+ file.')
def test_4_backup(self):
print('Check GreenGene backup ...')
backup_folder = os.path.join(self.fixture.backup_dir,
strftime('%Y-%m-%d', gmtime()))
gg_readme = os.path.join(backup_folder,
self.fixture.info_file_name)
self.assertTrue(os.path.isfile(gg_readme),
'Failed in backup: Cannot not find GreenGene README.')
readme_file = os.path.join(backup_folder,
self.fixture.config['readme_file'])
self.assertTrue(os.path.isfile(readme_file),
'Failed in backup: Cannot find RDM README+ file.')
if __name__ == '__main__':
unittest.main()
```
#### File: reference-data-manager/tests/TestNcbiBlastData.py
```python
import os
import re
import unittest
from brdm.NcbiBlastData import NcbiBlastData
class TestNcbiBlastData(unittest.TestCase):
@classmethod
def setUpClass(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.fixture = NcbiBlastData('{}/test_config.yaml'.format(dir_path))
'''
@classmethod
def tearDownClass(self):
if os.path.exists(self.fixture.destination_dir):
shutil.rmtree(self.fixture.destination_dir)
if os.path.exists(self.fixture.backup_dir):
shutil.rmtree(self.fixture.backup_dir)
pass
'''
def test_1_get_all_file(self):
print('Get ncbi nrnt blast file list...')
folder_url = os.path.join(self.fixture.login_url,
self.fixture.download_folder)
file_list = self.fixture.get_all_file(folder_url)
self.assertGreater(len(file_list), 0, 'File list is empty')
file_match = []
for i in file_list:
nr_nt_re = re.match("n[r|t]", i)
if nr_nt_re:
file_match.append(i)
self.assertEqual(len(file_list), len(file_match),
'Missing some nrnt files')
def test_2_update(self, files=2):
print('Update ncbi nrnt blast...')
success = self.fixture.update(file_number=files)
self.assertTrue(success, 'NCBI nrnt update did not return True.')
def test_3_unzip(self):
print('Unzip ncbi nrnt blast...')
success = self.fixture.unzip()
self.assertTrue(success, 'NCBI nrnt unzip did not return True.')
def test_4_readme(self):
print('Check readme files...')
ncbi_readme = os.path.join(self.fixture.destination_dir,
self.fixture.info_file_name)
self.assertTrue(os.path.isfile(ncbi_readme),
'Cannot find NCBI README file.')
readme_file = os.path.join(self.fixture.destination_dir,
self.fixture.config['readme_file'])
self.assertTrue(os.path.isfile(readme_file),
'Cannot find RDM README+ file.')
def test_5_download(self):
print("Check file download...")
folder_url = os.path.join(self.fixture.login_url,
self.fixture.download_folder)
file_list = self.fixture.get_all_file(folder_url)
start_time = os.path.getctime(self.fixture.destination_dir)
self.fixture.download(download_file_number=2)
end_time = os.path.getctime(self.fixture.destination_dir)
self.assertGreater(end_time, start_time, "No new files downloaded")
directory_list = os.listdir(self.fixture.destination_dir)
download_file_size = 0
self.assertFalse(set(directory_list).isdisjoint(set(file_list)),
'Expected download file not found')
for directory_file in directory_list:
if directory_file in file_list:
download_file_size = os.path.getsize(directory_file)
self.assertGreater(download_file_size, 0,
'Downloaded file is empty')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "AAFC-BICoE/seqdb-py",
"score": 2
} |
#### File: seqdb_py/api/FeatureApi.py
```python
import json
from api.BaseApiEntity import BaseApiEntity
from api.BaseSeqdbApi import UnexpectedContent
class FeatureApi(BaseApiEntity):
def __init__(self, api_key, base_url):
super(FeatureApi, self).__init__(api_key=api_key, base_url=base_url,
request_url='feature')
def get_param_str(self):
return ''
def create(self, name, feature_type_id, feature_locations, sequence_id,
description='', feature_default=False, parent_id=None):
""" Creates a Feature
Args:
name: name of the feature
Returns:
'result' from the json response
Raises:
requests.exceptions.ConnectionError
requests.exceptions.HTTPError
UnexpectedContent
:param description:
"""
post_data = {
'feature': {
'name': name,
'featureType': {'id': feature_type_id},
'featureLocations': feature_locations,
'description': description,
'featureDefault': feature_default
}
}
if parent_id is not None:
post_data['feature']['parentFeature'] = {'id': parent_id}
resp = super(FeatureApi, self)\
.create('{}sequence/{}/{}'
.format(self.base_url, sequence_id, self.request_url),
json.dumps(post_data))
jsn_resp = resp.json()
if 'result' not in jsn_resp and \
('statusCode' and 'message' not in jsn_resp['metadata']):
raise UnexpectedContent(response=jsn_resp)
return jsn_resp['result']
```
#### File: seqdb_py/api/FeatureTypeApi.py
```python
import json
from api.BaseApiEntity import BaseApiEntity
from api.BaseSeqdbApi import UnexpectedContent
class FeatureTypeApi(BaseApiEntity):
def __init__(self, api_key, base_url):
super(FeatureTypeApi, self).__init__(
api_key=api_key, base_url=base_url, request_url='featureType'
)
def get_param_str(self):
return ''
def getFeatureTypesWithIds(self):
"""
Returns:
a dictionary of Feature types with feature name as keys and
featureIds as values
Raises:
requests.exceptions.ConnectionError
requests.exceptions.ReadTimeout
requests.exceptions.HTTPError
UnexpectedContent
"""
feature_types = ''
jsn_resp, result_offset = self.retrieve_json_with_offset(
request_url=self.request_url
)
if jsn_resp:
feature_type_ids = jsn_resp['result']
# Get the rest of the results, if all where not returned
# with the first query
while result_offset:
jsn_resp, result_offset = self.\
retrieve_json_with_offset(request_url=self.request_url,
offset=result_offset)
feature_type_ids.extend(jsn_resp['result'])
feature_types = {}
for feat_type_id in feature_type_ids:
jsn_resp = self.retrieve_json('{}/{}'.format(self.request_url,
feat_type_id))
if jsn_resp:
feature_name = jsn_resp['result']['name']
feature_types[feature_name] = feat_type_id
return feature_types
def create(self, feature_type_name, feature_type_description=''):
""" Creates a FeatureType
Returns:
'result' from the json response
Raises:
requests.exceptions.ConnectionError
requests.exceptions.ReadTimeout
requests.exceptions.HTTPError
UnexpectedContent
"""
post_data = {'featureType': {
'description': feature_type_description, 'name': feature_type_name}
}
resp = super(FeatureTypeApi, self).create(
'{}{}'.format(self.base_url, self.request_url),
json.dumps(post_data))
jsn_resp = resp.json()
if ('result' not in jsn_resp and
('statusCode' and 'message' not in jsn_resp['metadata'])):
raise UnexpectedContent(response=jsn_resp)
return jsn_resp['result']
```
#### File: seqdb_py/api/SeqSourceApi.py
```python
from api.BaseApiEntity import BaseApiEntity
class SeqSourceApi(BaseApiEntity):
def __init__(self, api_key, base_url, sequence_id):
# self.clear_all_filters()
super(SeqSourceApi, self).__init__(api_key=api_key,
base_url=base_url,
request_url="sequence/{}/seqSource"
.format(sequence_id))
def get_param_str(self):
return ''
```
#### File: api/test/TestConsensusSequenceApi.py
```python
import unittest
import yaml
from config import config_root
from api.ConsensusSequenceApi import ConsensusSequenceApi
class TestConsensusSequenceApi(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(config_root.path() +
'/config4tests.yaml', 'r') as config_file:
config = yaml.safe_load(config_file)
cls.fixture = ConsensusSequenceApi(
api_key=config['seqdb']['api_key'],
base_url=config['seqdb']['base_url'])
def setUp(self):
pass
def tearDown(self):
pass
def test_filters(self):
# specimenNumber - tested below, skipping
# sequenceName
self.fixture.sequence_name_filter = 'Pyt_arrhenomanes_BR0'
self.assertEqual([358381, 358485], self.fixture.get_ids(),
'Expecting 2 consensus sequences filtered by '
'sequenceName = Pyt_arrhenomanes_BR0')
self.fixture.clear_all_filters()
self.fixture.sequence_name_filter = 'Pyt_arrhenomanes_'
self.assertEqual(5, self.fixture.get_number(),
'Expecting 5 consensus sequences filtered by '
'sequenceName = Pyt_arrhenomanes_, but got {}'
.format(self.fixture.get_number()))
self.fixture.clear_all_filters()
# sample_name_filter
self.fixture.sample_name_filter = 'LEV5508'
self.assertEqual(1, self.fixture.get_number(),
'Expecting 1 consensus sequence filtered by '
'sampleName = LEV5508, but got {}'
.format(self.fixture.get_number()))
self.fixture.clear_all_filters()
self.fixture.sample_name_filter = 'LEV4183'
self.assertEqual(1, self.fixture.get_number(),
'Expecting 1 consensus sequence filtered by '
'sampleName = LEV4183, but got {}'
.format(self.fixture.get_number()))
self.fixture.clear_all_filters()
# pub_ref_seq_filter
# TODO
'''
self.fixture.pub_ref_seq_filter = True
#TODO: this fails, i.e. curl -H 'apikey: ***REMOVED***' '***REMOVED***?
filterName=sequence.submittedToInsdc&filterValue=true&filterWildcard=false'
# Investigate why
va = self.fixture.get_number()
self.assertEqual(15, self.fixture.get_number(),
'Expecting 15 raw sequences filtered by pubRefSeq = ,
but got {}'.format(self.fixture.get_number()))
self.fixture.clear_all_filters()
'''
# gen_bank_GI_filter
# TODO: fix this test
'''
self.fixture.gen_bank_GI_filter = 'gi_'
self.assertEqual(15, self.fixture.get_number(),
'Expecting 15 raw sequences filtered by
genBankGI = LEV4277, but got {}'
.format(self.fixture.get_number()))
self.fixture.clear_all_filters()
'''
# region_name_filter
self.fixture.region_name_filter = 'ITS2-28S'
self.assertEqual(4, self.fixture.get_number(),
'Expecting 4 consensus sequences filtered by '
'regionName = ITS2-28S, but got {}'
.format(self.fixture.get_number()))
self.fixture.clear_all_filters()
self.fixture.region_name_filter = '18s-28s'
self.assertEqual(1, self.fixture.get_number(),
'Expecting 1 consensus sequences filtered by '
'regionName = 18s-28s, but got {}'
.format(self.fixture.get_number()))
self.fixture.clear_all_filters()
# projectName
# Note: This test is failing
'''
self.fixture.project_name_filter = 'grdi'
self.assertEqual(5555, self.fixture.get_number(),
'Expecting 5,555 consensus sequences filtered by
projectName = grdi, but got {}'.format(self.fixture.get_number()))
self.fixture.clear_all_filters()
'''
# collection_code_filter
self.fixture.collection_code_filter = 'lev'
self.assertEqual(229, self.fixture.get_number(),
'Expecting 229 consensus sequences filtered by '
'collectionCode = lev, but got {}'
.format(self.fixture.get_number()))
self.fixture.clear_all_filters()
# taxonomy_rank_filter
self.fixture.taxonomy_rank_filter = 'species'
self.fixture.taxonomy_value_filter = 'megasperma'
self.assertEqual(3, self.fixture.get_number(),
'Expecting 3 consensus sequences filtered by '
'taxonomy, but got {}'
.format(self.fixture.get_number()))
self.fixture.clear_all_filters()
# TODO: test combinations of filters
def test_get_consensus_sequence_ids(self):
self.fixture.specimen_num_filter = 4405
actual = self.fixture.get_ids()
self.assertTrue(actual, 'No Sequence IDs returned.')
self.assertEqual(1, len(actual),
'Expecting 1 consensus sequence associated '
'with this specimen.')
self.assertIn(358301, actual,
'Sequence ID 358301 is expected to be in results, '
'since it is consensus.')
self.assertNotIn(27755, actual,
'Sequence ID 27755 is not expected to '
'be in results, since it is consensus.')
self.fixture.specimen_num_filter = 4264
actual = self.fixture.get_ids()
self.assertTrue(actual, 'No Sequence IDs returned.')
self.assertEqual(2, len(actual),
'Expecting 2 consensus sequences associated with '
'this specimen, but got {}'.format(len(actual)))
self.assertIn(358302, actual,
'Sequence ID 358302 is expected to be in results, '
'since it is consensus')
self.assertIn(4825628, actual,
'Sequence ID 4825628 is expected to be in results, '
'since it is consensus')
self.assertNotIn(27755, actual,
'Sequence ID 27755 is not expected to be in '
'results, since it is consensus.')
def test_create_get_delete_sequence(self):
# create
seq_id, err_cod, msg = self.fixture.create(name='Test',
sequence='ACGTCTGATCGATC')
self.assertTrue(seq_id,
'Creating consensus sequence did not return an id.')
self.assertEqual(err_cod, 201,
'Did not get successful exit code for '
'create consensus sequence.')
# get
self.fixture.sequence_name_filter = 'Test'
seq_ids = self.fixture.get_ids()
self.assertTrue(seq_ids,
'Creating consensus sequence did not return an id.')
self.assertIn(seq_id, seq_ids,
'Expected sequence id was not in the result.')
# delete
delete_jsn_resp = self.fixture.delete(seq_id)
self.assertEqual(200,
delete_jsn_resp['metadata']['statusCode'],
'Could not delete feature type.')
def test_get_fasta_sequences_with_offset(self):
self.fixture.specimen_num_filter = 4405
actual_fasta, result_offset = self.fixture.get_fasta_sequences_with_offset(offset=0)
self.assertTrue(actual_fasta, 'No Sequences returned.')
self.assertIn('>seqdb|358301',
actual_fasta,
'Fasta return is expected to have sequence 3'
'58301, since it is consensus.')
self.assertNotIn('>seqdb|27755',
actual_fasta,
'Fasta return is not expected to have '
'sequence id 27755, since it is raw.')
def test_get_fasta_seq(self):
actual = self.fixture.get_fasta_sequence('358301')
self.assertTrue(actual, 'Fasta sequence is empty.')
self.assertIn('>seqdb|358301', actual, 'Fasta does not contain >seqdb|358301.')
if __name__ == '__main__':
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: api/test/TestFeatureApi.py
```python
import unittest
import yaml
from config import config_root
from api.FeatureApi import FeatureApi
class TestFeatureApi(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(config_root.path() +
'/config4tests.yaml', 'r') as config_file:
config = yaml.safe_load(config_file)
cls.fixture = FeatureApi(
api_key=config['seqdb']['api_key'],
base_url=config['seqdb']['base_url'])
def setUp(self):
pass
def tearDown(self):
pass
def test_create_get_delete_feature(self):
# Create feature
sample_feature_locations1 = \
[{'start': 3, 'end': 5, 'frame': 1, 'strand': 1}]
sample_feature_locations2 = [
{'start': 3, 'end': 5, 'frame': 1, 'strand': 1},
{'start': 334, 'end': 454, 'frame': 2, 'strand': 1}
]
feature_id = self.fixture.create('testName', 1,
sample_feature_locations1, 1,
'sample description', True)
self.assertTrue(feature_id,
'Feature id was not returned after feature creation.')
# Get feature
retrieved_feature = self.fixture.get_entity(feature_id)
self.assertTrue(retrieved_feature, 'No feature was retrieved.')
self.assertEqual('testName',
retrieved_feature['name'],
'Name of the retrieved feature does not match.')
self.assertEqual('sample description',
retrieved_feature['description'],
'Feature description does not match')
self.assertEqual(sample_feature_locations1,
retrieved_feature['featureLocations'],
'')
self.assertEqual(1, retrieved_feature['featureType']['id'],
'Feature type id does not match.')
self.assertTrue(retrieved_feature['featureDefault'],
'Feature default does not match')
# Delete feature
delete_jsn_resp = self.fixture.delete(feature_id)
self.assertEqual(200,
delete_jsn_resp['metadata']['statusCode'],
'Could not delete feature type.')
# Get feature again
retrieved_feature = self.fixture.get_entity(feature_id)
self.assertFalse(retrieved_feature,
'Unexpected: Feature was found after being deleted.')
if __name__ == '__main__':
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: api/test/TestFeatureTypeApi.py
```python
import requests
import unittest
import yaml
from config import config_root
from api.FeatureTypeApi import FeatureTypeApi
class TestFeatureTypeApi(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(config_root.path() +
'/config4tests.yaml', 'r') as config_file:
config = yaml.safe_load(config_file)
cls.fixture = FeatureTypeApi(
api_key=config['seqdb']['api_key'],
base_url=config['seqdb']['base_url'])
def setUp(self):
pass
def tearDown(self):
pass
def test_get_feature_types(self):
# 'http:/***REMOVED***\/api/v1/featureType'
actual = self.fixture.getFeatureTypesWithIds()
self.assertTrue(actual, 'No feature types returned.')
self.assertIn('Quality Trim', actual,
"No feature type 'Quality Trim' found.")
def test_create_delete_feature_type(self):
# curl -X POST -H 'apikey:***REMOVED***' -H
# 'Content-Type: application/json' -d '{'featureType':
# {'featureDescription':'test description 1231q',
# 'featureName':'test type 123123'}}'
# '***REMOVED***/api/v1/featureType'
# self.fixture.deleteFeatureType('19')
feature_type_id = self.fixture.create('test type',
'test type description')
self.assertTrue(feature_type_id,
'Feature type ID was not returned after '
'feature type creation.')
# curl -X DELETE -H 'apikey: ***REMOVED***'
# '***REMOVED***/api/v1/featureType/6'
actual = self.fixture.delete(feature_type_id)
self.assertEqual(200, actual['metadata']['statusCode'],
'Could not delete feature type.')
def test_create_delete_feature_type_multiple_duplicate(self):
# select * from FeatureTypes where Name like 'Test%';
# self.fixture.delete('18')
# Create
feature_type_id1 = self.fixture.create('Test1', 'Test')
feature_type_id2 = self.fixture.create('Test2', 'Test')
self.assertTrue(feature_type_id2,
'Second feature type ID was not returned '
'after feature type creation.')
'''Test creation of a feature - expected to fail because of duplicate feature type id'''
self.assertRaises(requests.exceptions.HTTPError,
self.fixture.create, 'Test1', 'Duplicate of Test1')
# Delete
actual = self.fixture.delete(feature_type_id1)
self.assertEqual(200, actual['metadata']['statusCode'],
'Could not delete feature type.')
actual = self.fixture.delete(feature_type_id2)
self.assertEqual(200, actual['metadata']['statusCode'],
'Could not delete feature type.')
if __name__ == '__main__':
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: api/test/TestSpecimenApi.py
```python
import requests
import unittest
import yaml
from api.SpecimenApi import SpecimenApi
from config import config_root
class TestSpecimenApi(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(config_root.path() +
'/config4tests.yaml', 'r') as config_file:
config = yaml.safe_load(config_file)
cls.fixture = SpecimenApi(
api_key=config['seqdb']['api_key'],
base_url=config['seqdb']['base_url'])
def test_retrieve(self):
# Test faulty connection
self.assertRaises(requests.exceptions.ConnectionError,
self.fixture.retrieve, 'http://jibberish')
def testGetEntity(self):
actual = self.fixture.get_entity(6601)
self.assertTrue(actual, 'No Specimen returned.')
self.assertEqual(6601, actual['id'], 'Expecting specimen 6601.')
def testGetIdsWithOffset(self):
# TODO: fix this test
# self.fixture.otherIds='|CV-F:CV547|'
self.fixture.other_ids_filter = 'M.'
actual_entity_ids = self.fixture.get_ids()
self.assertEquals(2, len(actual_entity_ids),
'Expecting 10 ids, but got {}.'
.format(len(actual_entity_ids)))
if __name__ == '__main__':
unittest.main()
```
#### File: seqdb_py/tools/seqdb_gb_insert.py
```python
from Bio import Entrez
import logging.config
import os
import re
import shelve
import sys
import urllib
import pdb
from config import config_root
import httplib as http_client
import tools_helper
from api.BaseSeqdbApi import UnexpectedContent
from api.ConsensusSequenceApi import ConsensusSequenceApi
from api.FeatureTypeApi import FeatureTypeApi
from api.FeatureApi import FeatureApi
from api.DeterminationApi import DeterminationApi
from api.GeneRegionApi import GeneRegionApi
from api.RawSequenceApi import RawSequenceApi
from api.SeqSourceApi import SeqSourceApi
from api.SpecimenApi import SpecimenApi
def merge(a, b, path=None):
"merges b into a"
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
a[key] = b[key]
return a
def format_sequence_name(genbank_id, record):
"""Return the value to be added to the SeqDB Sequence Name field.
Args:
genbank_id (str): GenBank Id of the record being processed
record (obj): GenBank record set retrieved from Entrez
Kargs:
None
Returns:
str. The formatted name
Raises:
None
"""
logging.info("Formating Sequence Name for GI: {}".format(genbank_id))
# name = "gi|" + str(genbank_id) + "|"
# name = name + "gb|" + record[0]["GBSeq_accession-version"] + "|"
# name = name + record[0]["GBSeq_definition"].replace(", ", "_").replace(
# "; ", "_").replace(" ", "_").replace(";", "").replace(",", "")
# return name
name = record[0]["GBSeq_definition"]
if len(name) > 255:
name = name[:252] + "..."
logging.info("Returning: {}".format(name))
return name
def format_tracefile_name(**keywds):
"""Return the value to be added to the SeqDB Sequence TracefileName field.
Args:
Any number of key = value pairs
Kargs:
None
Returns:
The urllib urlencoded string representing the string = value pairs
Raises:
None
Currently using the URL encoded query portion of the Entrez URL as this
field will be updated in SeqDB.
"""
logging.info("Formatting Tracefile Name")
result = "?" + urllib.urlencode(keywds)
logging.info("Returning: {}".format(result))
return result
def extract_gene_names(record):
"""Return an array of gene names from the Entrez record.
Args:
record (obj): Genbank record retrieved from Entrez
Kargs:
None
Returns:
str. A list of gene names contained in the record.
Raises:
None
"""
logging.info("Extracting gene names from record: {}".format(record["GBSeq_accession-version"]))
genes = {}
for feature in record["GBSeq_feature-table"]:
if feature["GBFeature_key"] == "gene":
for qualifier in feature["GBFeature_quals"]:
# TODO: Should only be looking at GBQualifier_name == "gene"
if qualifier["GBQualifier_name"] == "gene":
genes[qualifier["GBQualifier_value"]] = 1
logging.debug("Gene name: {}".format(qualifier["GBQualifier_value"]))
logging.debug("Found {} Gene Names".format(len(genes.keys())))
return genes.keys()
def check_feature_type(api_key, url, ftn, create=False, lookup=None):
"""Check to see if SeqDB contains a Feature Type with desired name
Args:
ftn (str): name of feature type to lookup
Kargs:
create (bool): If True, create feature types which are not found
Returns:
int or None. SeqDB feature type id if found/created. None if not
found/created or there are multiple gene regions
with the same name.
Raises:
None
"""
logging.debug("Checking SeqDB for feature type: {}. Create == {}".format(ftn, create))
featureTypeApi = FeatureTypeApi(api_key=api_key, base_url=url)
feature_type_id = None
if lookup is not None and ftn in lookup:
feature_type_id = lookup[ftn]
if feature_type_id is None:
#feature_types = seqdb_ws.getFeatureTypesWithIds()
feature_types = featureTypeApi.getFeatureTypesWithIds()
if ftn in feature_types:
feature_type_id = feature_types[ftn]
elif create is True:
#feature_type_id = seqdb_ws.createFeatureType(ftn, "Genbank Feature Type: %s" % (ftn))
feature_type_id = featureTypeApi.create(ftn, "GenBank Feature Type: {}".format(ftn))
if lookup is not None:
lookup[ftn] = feature_type_id
logging.debug("Returning Feature Type ID: {}".format(feature_type_id))
return feature_type_id
# TODO filter by group
def check_region(api_key, url, gene, create=False):
"""Check to see if SeqDB contains a region with a name as per gene.
Args:
gene (str): name of region to lookup
Kargs:
create (bool): If True, create regions which are not found
Returns:
int or None. SeqDB Gene Region Id if found/created. None if not
found/created or there are multiple gene regions
with the same name.
Raises:
None
"""
logging.debug("Checking SeqDB for region: {}. Create == {}".format(gene, create))
region_id = None
geneRegionApi = GeneRegionApi(api_key=api_key, base_url=url)
geneRegionApi.name_filter = gene
#region_ids = seqdb_ws.getRegionIdsByName(gene)
region_ids = geneRegionApi.get_ids()
if len(region_ids) == 0 and create is True:
#region_id = seqdb_ws.createRegion(gene, "GenBank Gene: {}".format(gene))
region_id = geneRegionApi.create(gene, "GenBank Gene: {}".format(gene))
logging.debug("Created region: {} in SeqDB for {}".format(region_id, gene))
elif len(region_ids) == 1:
region_id = region_ids[0]
logging.debug("Found region: {} in SeqDB for {}".format(region_id, gene))
else:
logging.warn("Found multiple regions for '{}' in SeqDB."
"Currently unable to assign sequences to regions."
"with non-unique names.".format(gene))
logging.debug("Returning region ID: {}".format(region_id))
return region_id
def entrez_search(query, retmax=1, retstart=0, database="nucleotide"):
"""Return record set retrieved via an Entrez search.
Args:
query (str): Entrez query to execute.
Kargs:
retmax (int): Limit number of results returned from Entrez (default=1).
retstart (int): First result to return from Entrez (default=0).
database (str): The Entrze database to query (default="nucleotide")
Returns:
obj. The entrez record set.
Raises:
None
"""
handle = Entrez.esearch(db=database, retstart=retstart, retmax=retmax, term=query)
record = Entrez.read(handle)
handle.close()
return record
def entrez_fetch(genbank_id, rettype="gb", database="nucleotide", retmode=None, cache=None):
"""Retrieve record retrieved via an Entrez fetch.
Args:
genbank_id (str): The genbank id of the record to retrieve.
Kargs:
rettype (str): The Entrez rettype (default="gb")
retmode (str): The Entrez retmode (default="xml")
database (str): The Entrze database to query (default="nucleotide")
Returns:
obj. The Entrez record.
Raises:
None
"""
logging.info("Retrieving record for GI: {} from GenBank".format(genbank_id))
add_to_cache = False
record = None
if cache is not None:
if genbank_id in cache:
logging.debug("Using cached record for GI: {}".format(genbank_id))
record = cache[genbank_id]
else:
add_to_cache = True
if record is None:
handle_text = Entrez.efetch(db=database, id=genbank_id, rettype=rettype, retmode="text")
first_line = handle_text.readline().strip()
possible_error_keywords = ["Error", "Bad", "Cannot", "unavailable", "unable", "is empty"]
while first_line and any(error_substring in first_line for error_substring in possible_error_keywords):
print "Entrez Error:{}".format(first_line)
handle_text = Entrez.efetch(db=database, id=genbank_id, rettype=rettype, retmode="text")
first_line = handle_text.readline().strip()
handle_xml = Entrez.efetch(db=database, id=genbank_id, rettype=rettype, retmode="xml")
try:
record = Entrez.read(handle_xml)
except Entrez.Parser.ValidationError:
logging.error("Failed to parse GenBank record for GI:{}".format(genbank_id))
add_to_cache = False
handle_xml.close()
handle_text.close()
if add_to_cache:
logging.debug("Adding record for GI: {} to cache".format(genbank_id))
cache[genbank_id] = record
tools_helper.pretty_log_json(record[0], level="debug", message="Retrieved GenBank Record: ")
return record
def seqdb_ret_entrez_gene_region_id(api_key, url, record, products=None):
"""Retrieve the SeqDB gene region id corresponding to the gene name on this Entrez sequence.
Args:
genbank_id (str): The GenBank id of the record being processed
record (obj): GenBank record retrieved from Entrez
Kargs:
None
Returns:
int or None. SeqDB Gene Region Id if found.
None if not found or if the Entrez record contains
multiple gene annotations.
Raises:
None
Note, SeqDB only permits a single gene region to be associated with a
sequence, hence we warn on and ignore Entrez sequences with more than one
annotated gene.
"""
seqdb_gene_region_id = None
genes = extract_gene_names(record[0])
if len(genes) > 1:
#seqdb_gene_region_id = check_region(seqdb_ws, "Multigene Region", create=True)
seqdb_gene_region_id = check_region(api_key, url, "Multigene Region", create=True)
logging.debug("Adding multigene sequence, SeqDB region ID: {}".format(seqdb_gene_region_id))
elif len(genes) == 1:
#seqdb_gene_region_id = check_region(seqdb_ws, genes[0], create=True)
seqdb_gene_region_id = check_region(api_key, url, genes[0], create=True)
logging.debug("Found gene: {}, SeqDB region ID: {}".format(genes[0], seqdb_gene_region_id))
elif len(genes) == 0 and products is not None:
if "18S ribosomal RNA" or "internal transcribed spacer 1" or \
"5.8S ribosomal RNA" or "internal transcribed spacer 2" or \
"28S ribosomal RNA" in products:
#seqdb_gene_region_id = check_region(seqdb_ws, "Ribosomal Cistron", create=True)
seqdb_gene_region_id = check_region(api_key, url, "Ribosomal Cistron", create=True)
logging.debug("Identified Ribosomal Cistron based on features in "
"0 gene region, SeqDB region ID: {}".format(seqdb_gene_region_id))
else:
logging.debug("No gene region for 0 gene region, SeqDB region "
"ID: {}".format(seqdb_gene_region_id))
return seqdb_gene_region_id
def seqdb_insert_entrez_sequence(consensusSequenceEntity, genbank_id, record):
"""Insert the GenBank sequence into SeqDB.
Args:
genbank_id (str): The GenBank id of the record being inserted
record (obj): GenBank record retrieved from Entrez
Kargs:
None
Returns:
int. SeqDB id for the inserted consensus sequence.
Raises:
None
"""
logging.info("Adding sequence for GI: {} to SeqDB".format(genbank_id))
seq_name = format_sequence_name(genbank_id, record)
sequence = record[0]["GBSeq_sequence"]
sequence = sequence.upper()
tracefile_dir = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
tracefile_name = format_tracefile_name(db="nucleotide", id=genbank_id, rettype="gb", retmode="xml")
additional = {
'fileDir': tracefile_dir,
'fileName': tracefile_name,
'genBankGI': genbank_id,
'genBankAccession': record[0]["GBSeq_primary-accession"],
'genBankVersion': record[0]["GBSeq_accession-version"],
'submittedToInsdc': 'true'
}
dict_for_logging = {'name': seq_name, 'sequence': sequence}
dict_for_logging.update(additional)
tools_helper.pretty_log_json(dict_for_logging, level="debug", message="Creating consensus (non-default values): ")
#seqdb_id, code, message = seqdb_ws.createConsensusSequence(seq_name, sequence, additional=additional)
seqdb_id, code, message = consensusSequenceEntity.create(seq_name, sequence, additional=additional)
logging.info(
"Created Consensus Sequence (seqdbid: {}) "
"for GI: {} ({}), Status: {}, Message: {}".format(seqdb_id, genbank_id, record[0]["GBSeq_accession-version"], code, message))
return seqdb_id
def seqdb_link_to_specimen(api_key, url, seqdb_id, feature):
"""Associate the sequence with a Specimen in SeqDB based on
supported GBQualifier values. Currently supported values include:
* culture collection
* strain
* specimen voucher
* isolate
Currently looks for entries beginning with "DAOM" or "CBS" in the
above, or with the prefix "personal:". May need to look for additional
prefixes and check values for additional qualifier keys.
"""
logging.info("Linking sequence to available source Specimen")
specimenApi = SpecimenApi(api_key=api_key, base_url=url)
for supported in ['culture_collection', 'strain', 'specimen_voucher', 'isolate']:
if supported in feature['qualifiers']:
for source_entry in feature['qualifiers'][supported]:
code = None
identifier = None
specimenIds = None
#jsn_resp = None
# some source entries are a list split with a semi-colon
sources = source_entry.split(";")
for source in sources:
source = source.strip()
if source.startswith("DAOM") or \
source.startswith("CCFC") or \
source.startswith("CBS") or \
source.startswith("ATCC") or \
source.startswith("INVAM") or \
source.startswith("NISK") or \
source.startswith("BR"):
logging.info("\tPossible known source {}.".format(source))
matched = None
if source.startswith("DAOM"):
# pdb.set_trace()
# TODO Also search / instead based on DAOM field in
# FungalInfo
if source.startswith("DAOMC"):
matched = re.search(r"(?P<collection>DAOM)[C: ]+(?P<identifier>[\w.]+)", source)
else:
matched = re.search(r"(?P<collection>[a-zA-Z]+)[_: ]?(?P<identifier>[\w.]+)", source)
if matched.groupdict()['identifier'].startswith("BR"):
matched = re.search(r"(?P<collection>BR)(?P<identifier>[\d-]+)", matched.groupdict()['identifier'])
elif source.startswith("CCFC"):
# TODO Also search / instead based on CCFC field in
# FungalInfo
matched = re.search(r"CCFC:DAOM (?P<collection>BR) (?P<identifier>[\d.]+)", source)
elif source.startswith("BR"):
matched = re.search(r"(?P<collection>\w+)[: ]?(?P<identifier>[\d.]+) \(DAOM\)", source)
elif source.startswith("INVAM"):
matched = re.search(r"(?P<collection>\w+)[ ]?(?P<identifier>[\w\d]+)", source)
elif source.startswith("CBS") or \
source.startswith("ATCC"):
matched = re.search(r"(?P<collection>\w+)[: ]?(?P<identifier>[\d.]+)", source)
elif source.startswith("NISK"):
matched = re.search(r"(?P<collection>NISK) (?P<identifier>\d+-\d+/\d+)", source)
if matched:
code = matched.groupdict()['collection']
identifier = matched.groupdict()['identifier']
logging.debug("\tChecking SeqDB for Specimen with OtherIds containing "
"Code: {}, Identifier: {}".format(code, identifier))
try:
#jsn_resp = seqdb_ws.getJsonSpecimenIdsByOtherIds(code, identifier)
specimenApi.other_ids_filter = code + identifier
specimenIds = specimenApi.get_ids()
if (not specimenIds):
# pdb.set_trace()
specimenApi.other_ids_filter = code + " " + identifier
specimenIds = specimenApi.get_ids()
except UnexpectedContent, e:
logging.error("Exception querying Specimen using "
"Code: {}, Identifier: {}. {}".format(code, identifier, e))
elif source.startswith("personal:"):
logging.info("\tUnevaluated personal identifier {}.".format(source))
prefix, code, identifier = source.split(":")
if isinstance(identifier, int):
logging.debug("\tChecking SeqDB for Specimen "
"Code: {}, Identifier: {}".format(code, identifier))
#jsn_resp = seqdb_ws.getJsonSpecimenIdsBySpecimenId(code, identifier)
specimenApi.other_ids_filter = code + identifier
specimenIds = specimenApi.get_ids()
else:
logging.warn("\tReview non-numeric identifier: {}".format(source))
# the following prefixes don't appear in our otherIds
# column could change in the future, but ignoring for now
elif source.startswith("RGR"):
logging.debug("\tSkiping unused source {}".format(source))
else:
logging.error("\tUnevaluated source {}".format(source))
continue
if not specimenIds:
logging.info("\tNo specimen found for code: {} and id: {}".format(code, identifier))
elif len(specimenIds) > 1:
logging.warn("\tUnable to link Sequence to Specimen using "
"Code: {}, Identifier: {}. "
"Found multiple Specimen with OtherIds containing".format(code, identifier))
else: # jsn_resp['count'] == 1:
seqdb_update_seqsource_specimen(api_key, url, seqdb_id, specimenIds[0])
def seqdb_link_to_taxonomy(api_key, url, seqdb_id, taxon_id, organism, feature):
"""Associate the sequence with a Taxa in the SeqDB Taxonomy based on the
GBQualifier "organism" value"""
determinationApi = DeterminationApi(api_key=api_key, base_url=url)
taxon_id_value = taxon_id.split(":")
org_split = organism.split(" ")
taxonomy = {
"genus":org_split[0],
"species":org_split[1],
"notes":"Created from genbank script"
}
determinationId = determinationApi.create_sequence_determination(sequence_id=seqdb_id, taxonomy=taxonomy,
is_accepted="true",
ncbi_taxon_id=taxon_id_value[1],
notes="Created from genbank script")
logging.info("Sequence determination: {}".format(organism))
def seqdb_update_seqsource_region(api_key, url, seqdb_id, seqdb_region_id):
"""Associate the sequence with a gene region.
Args:
seqdb_id (int): SeqDB id of the sequence to associate with the Region
seqdb_region_id (int):
SeqDB id of the Region to associate with the Sequence
Kargs:
None
Returns:
int. SeqDB id of updated Sequence (Strikes me as odd, but this
is the result of the SeqDB WS API)
Raises:
None
"""
logging.info("Linking Sequence (seqdbid: {}) to Region (seqdbid: {})".format(seqdb_id, seqdb_region_id))
seqsource = {
"seqSource": {
"region": {
"id": seqdb_region_id,
}
}
}
seqSourceApi = SeqSourceApi(api_key=api_key, base_url=url, sequence_id=seqdb_id)
#existing = seqdb_ws.getJsonSeqSource(seqdb_id)
existing = seqSourceApi.retrieve_json(seqSourceApi.request_url)
region_id = None
if 'result' in existing:
# drop headers from response returned above by creating a new dict
# containing only response 'result' portion and then add additional
# properties to it
existing = {"seqSource": existing['result']}
tools_helper.pretty_log_json(seqsource, level="debug", message="Merging")
tools_helper.pretty_log_json(existing, level="debug", message="Into")
merge(existing, seqsource)
response = seqSourceApi.update(seqSourceApi.request_url, existing)
code = response.status_code
message = ""
if (code == 200):
region_id = seqdb_region_id
message = response.json()['metadata']['message']
logging.debug("Updated SeqSource for sequence region linking "
"seqdbid: {}, Status: {}, Message: {}".format(seqdb_id, code, message))
else:
tools_helper.pretty_log_json(existing, level="error", message="Failed to retrieve seqSource for Sequence (seqdbid: {}):".format(seqdb_id))
return region_id
def seqdb_update_seqsource_specimen(api_key, url, seqdb_id, seqdb_specimen_id):
"""Associate the sequence with a specimen.
Args:
seqdb_id (int): SeqDB id of the sequence to associate with the Region
seqdb_specimen_id (int):
SeqDB id of the Specimen to associate with the Sequence
Kargs:
None
Returns:
int. SeqDB id of updated Sequence (Strikes me as odd, but this
is the result of the SeqDB WS API)
Raises:
None
"""
logging.info("Linking Sequence (seqdbid: {}) to Specimen (seqdbid: {})".format(seqdb_id, seqdb_specimen_id))
seqSourceApi = SeqSourceApi(api_key=api_key, base_url=url, sequence_id=seqdb_id)
specimenApi = SpecimenApi(api_key=api_key, base_url=url, specimen_request_url="specimen/{}".format(seqdb_specimen_id))
specimenJson = specimenApi.retrieve_json(specimenApi.request_url)
seqsource = {
"seqSource": {
"specimen": {
"id": seqdb_specimen_id,
},
"group":{"id":specimenJson['result']['group']['id']}
}
}
#existing = seqdb_ws.getJsonSeqSource(seqdb_id)
existing = seqSourceApi.retrieve_json(seqSourceApi.request_url)
region_id = None
if 'result' in existing:
# drop headers from response returned above by
# creating a new dict containing only response 'result' portion
existing = {'seqSource': existing['result']}
merge(existing, seqsource)
# region_id, code, message = seqdb_ws.updateSeqSource(seqdb_id, existing)
# region_id, code, message = seqSourceApi.update(seqSourceApi.request_url, existing)
response = seqSourceApi.update(seqSourceApi.request_url, existing)
code = response.status_code
message = ""
if (code == 200):
region_id = 1
message = response.json()['metadata']['message']
logging.debug("Updated SeqSource for sequence specimen linking "
"seqdbid: {}, Status: {}, Message: {}".format(seqdb_id, code, message))
else:
tools_helper.pretty_log_json(existing, level="error", message="Failed to retrieve seqSource for Sequence (seqdbid: {}):".format(seqdb_id))
return region_id
def parse_locations(gb_feature):
"""Parse the GBFeature_intervals block and create an array of locations in
the local dictionary representation.
Args:
gb_feature: Reference to GBFeature block from Entrez XML
Kargs:
None
Returns:
Array of locations extracted from the Entrez GBFeature block.
Raises:
None
"""
locations = []
#print "******GBFeature_interval: {}\n".format(gb_feature['GBFeature_intervals'])
for interval in gb_feature['GBFeature_intervals']:
#According to Entrez documentation (example: https://github.com/biopython/biopython/blob/master/Tests/Entrez/nucleotide1.xml)
#'GBInterval_from' and 'GBInterval_to' may not exist for a record.
#if 'GBInterval_from' in interval and 'GBInterval_to' in interval:
# TODO determined frame and strand, don't just default to 1
# There is another spot where I adjust the frame
#while 'GBInterval_from' not in interval and 'GBInterval_to' not in interval:
# print "=======Need to wait for GBInterval_from / to"
# time.sleep(3)
locations.append(
{
"start": interval['GBInterval_from'],
"end": interval['GBInterval_to'], "frame": 1, "strand": 1
})
return locations
def parse_qualifiers(gb_feature):
"""Parse the GBFeature_quals block of the GBFeature entry and create a
dictionary of qualifiers using the GBQualifier_name as a key.
Args:
gb_feature: Reference to GBFeature block from Entrez XML
Kargs:
None
Raises:
None
"""
qualifiers = {}
#print "*******GBQualifier: {}\n".format(gb_feature['GBFeature_quals'])
for qual in gb_feature['GBFeature_quals']:
if qual['GBQualifier_name'] not in qualifiers:
qualifiers[qual['GBQualifier_name']] = []
qualifiers[qual['GBQualifier_name']].append(qual['GBQualifier_value'])
return qualifiers
def parse_feature(gb_feature, api_key, url, lookup=None):
"""Parse the GBFeature to create a dict from the record.
Args:
gb_feature: Reference to GBFeature block from Entrez XML
Kargs:
lookup (obj): Reference to a dict to locally cache feature type entries
Returns:
Array of locations extracted from the Entrez GBFeature block.
Raises:
None
"""
logging.debug("Parsing feature: \"{}\"".format(gb_feature['GBFeature_key']))
gb_feature_record = {}
gb_feature_record['location_description'] = gb_feature['GBFeature_location']
gb_feature_record['feature_key'] = gb_feature['GBFeature_key']
gb_feature_record['feature_type_id'] = check_feature_type(api_key, url, gb_feature['GBFeature_key'], create=True, lookup=lookup)
gb_feature_record['locations'] = parse_locations(gb_feature)
gb_feature_record['qualifiers'] = parse_qualifiers(gb_feature)
return gb_feature_record
def adjust_feature_for_codon_start(feature):
"""Adjust feature start if feature contains a "codon_start" annotation.
Args:
feature (dict): Feature entry from internal representation
Kargs:
None
Returns:
The (possibly updated) feature.
Raises:
None
"""
# adjust frame for feature based on codon_start qualifier
if 'codon_start' in feature['qualifiers']:
# doesn't make sense to me that there would be more than one
# codon_start value
if len(feature['qualifiers']['codon_start']) > 1:
logging.warn(
"Multiple codon_start feature qualifiers found. Using first.")
feature['locations'][0]['frame'] = feature[
'qualifiers']['codon_start'][0]
return feature
def process_features(seqdb_id, record, api_key, url, lookup=None):
"""Process features contained in Entrez GBSeq_feature-table and add them to
the sequence in seqdb.
Args:
seqdb_id (int): SeqDB id of the sequence
record (obj): reference to the GBSeq block of the Entrez record
Kargs:
lookup (obj): Reference to a dict to locally cache feature type entries
Returns:
dict containing all unique "product" GBFeature_quals observed on this record.
Raises:
None
"""
logging.info("Adding features from Entry: {} to Sequence (seqdbid: {})".format(record['GBSeq_accession-version'], seqdb_id))
features = []
for gb_feature in record['GBSeq_feature-table']:
#while 'GBFeature_intervals' not in gb_feature:
# print "=======Need to wait for GBFeature_intervals"
# time.sleep(3)
#while 'GBFeature_quals' not in gb_feature:
# print "======Need to wait for GBFeature_quals"
# time.sleep(5)
features.append(parse_feature(gb_feature, api_key, url, lookup=lookup))
products = {}
gene_id = None
mrna_id = None
cds_id = None
for feature in features:
# create hash of unique product names for future use (returned from
# function)
if 'product' in feature['qualifiers']:
for product in feature['qualifiers']['product']:
products[product] = 1
# default / initial name and description
name = "unnamed " + feature['feature_key']
description = feature['location_description']
# update name, based on one of the following keys in order of
# preference don't expect gene will ever have a 'product', but it
# doesn't hurt anything
name_keys = ['product', 'protein_id', 'gene', 'locus_tag']
for key in name_keys:
if key in feature['qualifiers']:
# if the feature appeared multiple times, use the first entry
# value for the name
name = feature['qualifiers'][key][0]
break
# supplement description (prepend key value pairs to base description)
taxon_id = None;
organism_name = None;
description_keys = ['db_xref', 'protein_id', 'locus_tag', 'note', 'rpt_type', 'gene', 'product', 'organism']
for key in description_keys:
if key in feature['qualifiers']:
for value in feature['qualifiers'][key]:
description = key + ": " + value + "; " + description
#logging.warn("description: %s" % description)
if key =='db_xref':
taxon_id = value
if key =='organism':
organism_name = value
# currently unsupported features that I've encountered
# 1 STS
# 276 assembly_gap
# 46 exon
# 1 gap
# 39 intron
# 1 mRNA
# 4 misc_RNA
# 98 misc_difference
# 2 misc_feature
# 2650 mobile_element
# 1 source
# 20 stem_loop
# 39 tmRNA
# Assumes I will encounter another gene before a feature that is not a
# child of these gene.
# TODO check range of parent and null gene/cds/mrna ids once we are
# outside the range
if feature['feature_key'] == 'gene':
#gene_id = seqdb_ws.insertFeature(name, feature['feature_type_id'], feature['locations'], seqdb_id, description=description)
featureApi = FeatureApi(api_key=api_key, base_url=url)
gene_id = featureApi.create(name, feature['feature_type_id'], feature['locations'], seqdb_id, description=description)
elif feature['feature_key'] == 'mRNA':
feature = adjust_feature_for_codon_start(feature)
#mrna_id = seqdb_ws.insertFeature(name, feature['feature_type_id'], feature['locations'], seqdb_id, description=description, parentId=gene_id)
featureApi = FeatureApi(api_key=api_key, base_url=url)
mrna_id = featureApi.create(name, feature['feature_type_id'], feature['locations'], seqdb_id, description=description, parentId=gene_id)
elif feature['feature_key'] == 'CDS':
parent_id = mrna_id
if parent_id is None:
parent_id = gene_id
feature = adjust_feature_for_codon_start(feature)
#cds_id = seqdb_ws.insertFeature(name, feature['feature_type_id'], feature['locations'], seqdb_id, description=description, parentId=parent_id)
featureApi = FeatureApi(api_key=api_key, base_url=url)
cds_id = featureApi.create(name, feature['feature_type_id'], feature['locations'], seqdb_id, description=description, parentId=parent_id)
# TODO do these necessarily have a parent gene?
elif feature['feature_key'] in ['tRNA', 'rRNA', 'misc_RNA']:
#seqdb_ws.insertFeature(name, feature['feature_type_id'], feature['locations'], seqdb_id, description=description, parentId=gene_id)
featureApi = FeatureApi(api_key=api_key, base_url=url)
featureApi.create(name, feature['feature_type_id'], feature['locations'], seqdb_id, description=description, parentId=gene_id)
elif feature['feature_key'] in ['repeat_region', 'misc_feature', 'misc_difference']:
#seqdb_ws.insertFeature(name, feature['feature_type_id'], feature['locations'], seqdb_id, description=description)
featureApi = FeatureApi(api_key=api_key, base_url=url)
featureApi.create(name, feature['feature_type_id'], feature['locations'], seqdb_id, description=description)
elif feature['feature_key'] == 'source':
#seqdb_link_to_specimen(api_key, url, seqdb_id, feature)
seqdb_link_to_taxonomy(api_key, url, seqdb_id, taxon_id, organism_name, feature)
seqdb_link_to_specimen(api_key, url, seqdb_id,feature)
else:
logging.warn("Unsupported feature type: {}".format(feature['feature_key']))
return products
def process_entrez_entry(consensusSequenceEntity, api_key, url, genbank_id, cache=None, lookup=None, delete=False, update=False):
"""Process an Entrez entry.
Args:
genbank_id (str): The GenBank id of the record being processed
Kargs:
cache (obj): a "stash" in which to cache Entrez results returned from GenBank.
lookup (dict): a dict to hold SeqDB Features and save queries.
delete (bool): Default False. Delete existing SeqDB records and recreate them.
Returns:
None
Raises:
None
Overview:
Check if the entry already exists in SeqDB. If so, skip this entry.
Otherwise:
* Insert the sequence into SeqDB.
* If this GenBank entry contains a single gene region, and it is
present in SeqDB (or we have created the corresponding gene
region), associate the Sequence with the Gene Region.
* If this GenBank entry contains feature annotations, add them
to SeqDB
"""
logging.info("Processing GI: {}".format(genbank_id))
# Ensure the record doesn't already exist in SeqDB
# If it does, continue with the next record
logging.debug("Checking for GI: {} in SeqDB".format(genbank_id))
#result = seqdb_ws.getJsonConsensusSequenceIdsByGI(genbank_id)
#seq_ids = seqdb_ws.getConsensusSequenceIds(genBankGI=genbank_id)
consensusSequenceEntity.gen_bank_GI_filter = genbank_id
seq_ids = consensusSequenceEntity.get_ids()
if seq_ids and delete:
logging.info("Deleting existing Sequence (seqdbid: {})".format(seq_ids[0]))
#seqdb_ws.deleteConsensusSequence(seq_ids[0])
consensusSequenceEntity.delete(seq_ids[0])
record = None
if not seq_ids or update:
# retrieve GenBank record
record = entrez_fetch(genbank_id, cache=cache)
seqdb_id = None
if seq_ids and update:
seqdb_id = seq_ids[0]
elif seq_ids:
logging.info("Sequence for GI: {} already exists in SeqDB. Skipping.".format(genbank_id))
if not seq_ids:
if "GBSeq_sequence" in record[0]:
seqdb_id = seqdb_insert_entrez_sequence(consensusSequenceEntity, genbank_id, record)
else:
print("Skipping GI: {}, which does not contain a sequence.".format(genbank_id))
if record is not None and seqdb_id is not None:
features = process_features(seqdb_id, record[0], api_key, url, lookup=lookup)
seqdb_gene_region_id = seqdb_ret_entrez_gene_region_id(api_key, url, record, features)
if seqdb_gene_region_id is not None:
seqdb_update_seqsource_region(api_key, url, seqdb_id, seqdb_gene_region_id)
if logging.getLogger().isEnabledFor(logging.DEBUG):
# retrieve inserted record and display to users for validation
# purposes
#result = seqdb_ws.getJsonSequence(seqdb_id)
rawSequenceEntity = RawSequenceApi(api_key=api_key, base_url=url)
result = rawSequenceEntity.retrieve_json(rawSequenceEntity.request_url)
tools_helper.pretty_log_json(result, level="debug", message="Final Consensus Sequence:")
def main():
"""Load sequences matching Entrez query into SeqDB.
Args:
None
Kargs:
None
Returns:
None
Raises:
None
"""
print("Loading configuration file: {}".format(config_root.path()) + '/config.yaml')
print("Loading tools configuration file: {}".format(os.path.dirname(__file__)) + '/seqdb_gb_insert_config.yaml')
main_config = tools_helper.load_config(config_root.path() + '/config.yaml')
if not main_config:
logging.error(tools_helper.log_msg_noConfig)
sys.exit(tools_helper.log_msg_sysExit)
tool_config = tools_helper.load_config(os.path.dirname(__file__) + '/seqdb_gb_insert_config.yaml')
if not tool_config:
logging.error(tools_helper.log_msg_noConfig)
sys.exit(tools_helper.log_msg_sysExit)
url = main_config['seqdb']['url']
api_key = tool_config['seqdb']['api_key']
logging.config.dictConfig(main_config['logging'])
http_client.HTTPConnection.debuglevel = main_config['http_connect']['debug_level']
# caching the entrez records shaved 2 minutes off the time to load
# ~740 sequences from query: "(*DAOM*[source] and levesque and not
# 'unplaced genomics scaffold')"
# real 11m40.754s
# user 1m31.726s
# sys 0m14.760s
# - vs -
# real 9m21.112s
# user 1m27.726s
# sys 0m13.619s
entrez_cache = shelve.open(tool_config['entrez']['cache'])
# however, caching the lookup shaved an additional ~7 minutes off the total
# time to load above query
# real 2m35.773s
# user 0m16.539s
# sys 0m2.486s
# TODO May not be necessary any longer; instead use API lookup by feature
# type name
feature_type_lookup = {}
logging.info("Script executed with the following command and arguments: {}".format(sys.argv))
consensusSequenceEntity = ConsensusSequenceApi(api_key=api_key, base_url=url)
Entrez.email = tool_config['entrez']['email']
query = tool_config['entrez']['query']
logging.info("Querying GenBank: \'{}\'".format(tool_config['entrez']['query']))
# preliminary query to find out how many records there are
record = entrez_search(query)
# setup loop counters; retrieving records 50 at a time
count = int(record["Count"])
start = 0
retrieve = 50
logging.info("Query returned {} records. Retrieving them in batches of {}".format(count, retrieve))
# repeat until we have all records
while start < count:
print 'Count:' + str(count)
print 'Start:' + str(start)
# retrieve block of records
logging.debug("Retrieving {}..{}".format(start, start + retrieve))
record = entrez_search(query, retstart=start, retmax=retrieve)
# process each returned id in the batch
for genbank_id in record["IdList"]:
process_entrez_entry(
consensusSequenceEntity,
api_key,
url,
genbank_id,
cache=entrez_cache,
lookup=feature_type_lookup,
delete=tool_config['gb_insert']['delete'],
update=tool_config['gb_insert']['update'])
print ("\n >Seqid: {}".format(genbank_id))
start += retrieve
print "***Done***"
if __name__ == "__main__":
main()
```
#### File: tools/test/test_seqdb_config_maker.py
```python
import unittest, os
from tools.seqdb_config_maker import SeqdbConfigMaker
class TestSeqdbConfigMaker(unittest.TestCase):
def setUp(self):
self.config_file_name = "test_config.yaml"
def tearDown(self):
if os.path.isfile(self.config_file_name):
os.remove(self.config_file_name)
def testCreateConfigFile(self):
config_file_abs = SeqdbConfigMaker(api_url="***REMOVED***b", config_file_name=self.config_file_name).createConfigFile(api_key=***REMOVED***)
self.assertTrue(config_file_abs, "Config file was not created.")
self.assertTrue(self.config_file_name in config_file_abs, "Created config file is not named as expected.")
config_file_abs = open(self.config_file_name, 'r')
actual = config_file_abs.read()
config_file_abs.close()
actual_lines = actual.splitlines()
expected_lines = ['seqdb:',' api_key: "***REMOVED***"',' api_url: "***REMOVED***']
self.assertEqual(expected_lines, actual_lines, "User config file does not match expected content.")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "AAFC-BICoE/snakemake-barcoding-assembly-pipeline",
"score": 3
} |
#### File: pipeline_files/bold_retriever-master/engine.py
```python
import csv
from typing import List, Dict
import xml.etree.ElementTree as ET
from Bio.SeqIO import SeqRecord
HEADERS = [
"ID", "OtuID", "BIN", "tax_id", "sequencedescription", "database", "citation",
"taxonomicidentification", "similarity", "url", "country", "lat", "lon",
"phylum", "class", "order", "family", "subfamily", "tribe", "genus", "species",
]
def generate_output_content(all_ids: List[Dict[str, str]], output_filename: str,
seq_record: SeqRecord):
if all_ids:
with open(output_filename, "a") as handle:
csv_writer = csv.DictWriter(handle, fieldnames=HEADERS)
for item in all_ids:
try:
del item["seq_record"]
except KeyError:
pass
csv_writer.writerow(item)
else:
out = {"OtuID": seq_record.id}
for header in HEADERS:
if header not in out:
out[header] = "nohit"
with open(output_filename, "a") as handle:
csv_writer = csv.DictWriter(handle, fieldnames=HEADERS)
csv_writer.writerow(out)
def parse_id_engine_xml(xml: str) -> List[Dict[str, str]]:
try:
root = ET.fromstring(xml)
except ET.ParseError as error:
print("\n>> Error got malformed XML from BOLD: " + str(error))
except TypeError as error:
print("\n>> Error got malformed XML from BOLD: " + str(error))
identifications = []
for match in root.findall('match'):
identification = dict()
for element in match:
if element.tag == "specimen":
for element_child in element:
if element_child.tag == "collectionlocation":
for collection in element_child:
if collection.tag == "coord":
for coord in collection:
identification[coord.tag] = coord.text
else:
identification[collection.tag] = collection.text
else:
identification[element_child.tag] = element_child.text
else:
identification[element.tag] = element.text
identifications.append(identification)
return identifications
```
#### File: snakemake-barcoding-assembly-pipeline/pipeline_files/fastq_to_fasta.py
```python
import argparse
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import glob
import os
def main():
parser = argparse.ArgumentParser(description='Parses Fastq Files')
parser.add_argument('-d', type=str,
help='Directory of consensus fastqs', required=True)
parser.add_argument('-o', type=str,
help='Output File', required=True)
args = parser.parse_args()
parse_fastq(args.d, args.o)
def parse_fastq(directory, output):
"""
Takes a fastq generated by consensus and extracts fasta sequence with basic quality metric
Quality is deteremined by counting the presence of Ns, uncapitilized letters and degenerate bases
:param directory:
:param output
:return:
"""
fastqs = glob.glob(os.path.join(directory, "*.fq"))
records = []
for fastq in fastqs:
with open(fastq) as f:
basename = os.path.basename(fastq).replace(".fq", "")
for seq in SeqIO.parse(f, "fastq"):
length = len(seq.seq)
good_bases = \
str(seq.seq).count("A") + str(seq.seq).count("C") \
+ str(seq.seq).count("T") + str(seq.seq).count("G")
rough_quality = length - good_bases
record = SeqRecord(Seq(str(seq.seq)), id=basename,
description="Low Quality Positions: {}".format(rough_quality))
records.append(record)
# Sort list based on quality of sequences
records.sort(key=lambda x: int(str(x.description).replace("Low Quality Positions: ", "")))
# Writes fasta files to sequence
with open(output, "w") as g:
for record in records:
SeqIO.write(record, g, "fasta")
if __name__ == "__main__":
main()
``` |
{
"source": "AAFC-MBB/annotation-scripts",
"score": 3
} |
#### File: AAFC-MBB/annotation-scripts/gff2genbank.py
```python
import sys
import os
from Bio import SeqIO
from Bio.Alphabet import generic_dna
from Bio import Seq
from BCBio import GFF
def main(gff_file, fasta_file = None):
# Use splitext to remove the extension of the original input file
out_file = "%s.gb" % os.path.splitext(gff_file)[0]
# Parser will differ slightly if fasta file is given
if os.stat(gff_file) == 0 or ((fasta_file is not None) and os.stat(fasta_file)):
print "ERROR: Empty file provided or cannot stat files"
exit(64);
elif fasta_file is None:
gff_iter = GFF.parse(gff_file) #Parser/generator object
else:
fasta_input = SeqIO.to_dict(SeqIO.parse(fasta_file, "fasta", generic_dna)) # Process fasta file
gff_iter = GFF.parse(gff_file, fasta_input) # Give fasta file to parser
# One line to call all the checking function and to write in genbank format
SeqIO.write(_check_gff(_fix_ncbi_id(gff_iter)), out_file, "genbank")
def _fix_ncbi_id(fasta_iter):
"""GenBank identifiers can only be 16 characters;
Use arbitrary naming system to ensure unique locus names.
Though SeqIO only uses rec.name to generate locus names, so we only need to change that.
Note that the contig # might not actually match the number of the actual contig. It depends on the file order.
"""
#Generate unique IDs based on this.
base = "Contig_"
count = 1
for rec in fasta_iter:
new_id = base + `count` #String concat
rec.description = rec.id #Save the ID name so we know what it is.
# rec.id = new_id
rec.name = new_id
count += 1
yield rec
def _check_gff(gff_iterator):
"""Check GFF files before feeding to SeqIO to be sure they have sequences.
"""
for rec in gff_iterator:
# We'd want to ensure that all contigs have the sequences attached to them properly.
if isinstance(rec.seq, Seq.UnknownSeq):
print "FATAL: FASTA sequence not found for '%s' in GFF file" % (
rec.id)
exit(63);
#Strangely, the seq alphabet is set to SingleLetterAlphabet by default.
rec.seq.alphabet = generic_dna
yield _flatten_features(rec)
def _flatten_features(rec):
"""Make sub_features in an input rec flat for output.
GenBank does not handle nested features, so we want to make
everything top level.
No idea what happens here... (Michael)
"""
out = []
for f in rec.features:
cur = [f]
while len(cur) > 0:
nextf = []
for curf in cur:
out.append(curf)
if len(curf.sub_features) > 0:
nextf.extend(curf.sub_features)
cur = nextf
rec.features = out
return rec
if __name__ == "__main__":
main(*sys.argv[1:])
``` |
{
"source": "AAFC-MBB/blackbox-pipeline",
"score": 2
} |
#### File: blackbox-pipeline/blackbox/BuscoParser.py
```python
from accessoryFunctions import *
from glob import iglob
import os
import shutil
__author__ = 'mikeknowles,akoziol'
class Busco(object):
def buscoprocess(self):
from threading import Thread
os.chdir(self.path)
# Find the fasta files for each sample
# Only make as many threads are there are samples with fasta files
for i in range(len([sample.general for sample in self.metadata if sample.general.bestassemblyfile != "NA"])):
# Send the threads to the merge method. :args is empty as I'm using
threads = Thread(target=self.analyze, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata:
# Save augustus, blast and BUSCO versions
sample.software.BUSCO, sample.software.Blastn, sample.software.Augustus, sample.software.python3 = \
self.version, self.blast, self.augustus, self.pyversion
if sample.general.bestassemblyfile != "NA":
sample.general.buscoresults = '{}/busco_results'.format(sample.general.outputdirectory)
buscotemp = os.path.join(sample.general.buscoresults, "run_{}".format(sample.name))
sample.commands.BUSCO = "python3 {} -in {} -o {} -l /accessoryfiles/{} -m genome". \
format(self.executable, sample.general.bestassemblyfile, sample.name, self.lineage)
self.qqueue.put((sample, buscotemp))
else:
sample.commands.BUSCO = "NA"
self.qqueue.join()
def analyze(self):
"""Run the quast command in a multi-threaded fashion"""
while True:
sample, temp = self.qqueue.get()
summary = 'short_summary_{}'.format(sample.name)
tempfile, moved = [os.path.join(x, summary) for x in [temp, sample.general.buscoresults]]
# Make sure assembled data exists and BUSCO results do not exist
if sample.general.bestassemblyfile != 'NA' and map(os.path.isfile, [tempfile, moved]) == [False] * 2:
if os.path.isdir(temp): # force incomplete BUSCO runs
sample.commands.BUSCO += " -f"
else:
make_path(sample.general.buscoresults)
execute(sample.commands.BUSCO, cwd=sample.general.buscoresults)
if os.path.isfile(tempfile):
for tempfolder in iglob(os.path.join(temp, '*')):
shutil.move(tempfolder, sample.general.buscoresults)
os.rmdir(temp)
if os.path.isfile(moved):
self.metaparse(sample, moved)
# Signal to the queue that the job is done
self.qqueue.task_done()
@staticmethod
def metaparse(sample, resfile):
pc = lambda x: x if x[0].isupper() else x.title()
if not os.path.isfile(resfile):
print "There was an issue getting the metadata from {0:s}".format(sample.name)
else:
busco = dict()
# Open BUSCO short_summary file and make list of key value pairs then add those the assembly metadata
with open(resfile) as report:
for line in report:
# neccesary to split up ifs to avoid exceptions IndexError
if line.strip():
if line.strip()[0].isdigit():
v, k = [[n, "".join([pc(y) for y in k.split()])] for n, k in [line.strip().split('\t')]][0]
busco[k] = v
# TODO: Add support for list of missed BUSCOs
# This should probably update the datasore to include new busco keyvalue pairs
sample.assembly.datastore.update(busco)
# busco.update(sample.assembly.datastore)
# sample.assembly = GenObject(busco)
def __init__(self, inputobject):
from Queue import Queue
from Bio.Blast.Applications import NcbiblastnCommandline
from distutils import spawn
# Find blastn and augustus version
self.version = "v1.1b1"
self.augustus = " ".join(get_version(['augustus', '--version']).split()[:2])
self.blast = NcbiblastnCommandline(version=True)()[0].replace('\n', ' ').rstrip()
self.metadata = inputobject.runmetadata.samples
# Retrieve abspath of BUSCO executable using spawn
self.executable = os.path.abspath(spawn.find_executable("BUSCO_{}.py".format(self.version)))
self.pyversion = get_version(['python3', '-c', 'import sys; print(sys.version)']).rstrip()
self.start = inputobject.starttime
self.threads = inputobject.cpus
self.path = inputobject.path
self.qqueue = Queue()
printtime('Running BUSCO {} for gene discovery metrics'.format(self.version.split(",")[0]), self.start)
# Testing with bacterial HMMs
self.lineage = inputobject.clade
self.buscoprocess()
```
#### File: blackbox-pipeline/blackbox/spadesRun.py
```python
from accessoryFunctions import printtime, execute
import os
__author__ = 'adamkoziol,mikeknowles'
class Spades(object):
def spades(self):
from threading import Thread
import spades
# __file__ returns pyc!
spadespath = spades.__file__
if spadespath.endswith('.pyc') and os.path.exists(spadespath[:-1]):
spadespath = spadespath[:-1]
# Find the fastq files for each sample
# Only make as many threads are there are samples with fastq files
for i in range(len([sample.general for sample in self.metadata if type(sample.general.fastqfiles) is list])):
# Send the threads to the merge method. :args is empty as I'm using
threads = Thread(target=self.assemble, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata:
# Initialise the spades command
spadescommand = ''
# Split the string of the provided kmer argument
kmerlist = self.kmers.split(',')
# Regenerate the list of kmers to use if the kmer is less than the readlength
sample.general.kmers = ','.join([kmer for kmer in kmerlist if int(kmer) <= sample.run.forwardlength])
# Initialise the fastqfiles variable - will store trimmed fastq file names if they exist, and raw fastq
# file names if trimmed fastq files were not created for whatever reason
if type(sample.general.trimmedfastqfiles) is list:
fastqfiles = sorted(sample.general.trimmedfastqfiles)
elif type(sample.general.fastqfiles) is list:
fastqfiles = sorted(sample.general.fastqfiles)
else:
fastqfiles = ''
# Only proceed if fastq files exists
if fastqfiles:
# Set the the forward fastq files
forward = fastqfiles[0]
# Set the output directory
sample.general.spadesoutput = '{}/spades_output'.format(sample.general.outputdirectory)
spadescommand = '-k {} --careful -o {} -t {} '.format(sample.general.kmers, sample.general.spadesoutput,
self.threads)
# If a previous assembly was partially completed, continue from the most recent checkpoint
if os.path.isdir(sample.general.spadesoutput):
spadescommand += ' --continue '
# If there are two fastq files
if self.yaml:
# TODO: implement complex yaml input for spades
yaml = os.path.join(self.path, sample.name + '.yml')
if os.path.isfile(yaml):
spadescommand += '--dataset {} '.format(yaml)
sample.general.dataset = yaml
if "dataset" not in dict(sample.general):
if len(fastqfiles) == 2:
if 'Mate Pair' in sample.run.Assay:
spadescommand += '--mp1-1 {} --mp2-2 {} '.format(forward, fastqfiles[1])
else:
spadescommand += '--pe1-1 {} --pe1-2 {} '.format(forward, fastqfiles[1])
else:
if 'Mate Pair' in sample.run.Assay:
spadescommand += '--mp1-12 {} --mp2-2 {} '.format(forward, fastqfiles[1])
else:
spadescommand += '--s1 {} '.format(forward)
# SPAdes 3.6.2 supports python 3.5
if self.version >= "3.6.2":
spadescommand = "python3 {} {}".format(spadespath, spadescommand.rstrip())
else:
spadescommand = "spades.py " + spadescommand.strip()
# If there are no fastq files, populate the metadata appropriately
else:
sample.general.spadesoutput = 'NA'
# Put the arguments to pass to the assemble method into the queue
self.assemblequeue.put((spadescommand, sample.general.spadesoutput))
# Add the command to the metadata
sample.commands.spadescall = spadescommand
# Record SPAdes version
sample.software.SPAdes = self.version
# Join the threads
self.assemblequeue.join()
# Filter contigs shorter than 1000 bp, and rename remaining contigs with sample.name
printtime('Filtering sequences', self.start)
self.filter()
self.insertsize()
self.parse()
def assemble(self):
"""Run the assembly command in a multi-threaded fashion"""
while True:
command, output = self.assemblequeue.get()
if command and not os.path.isfile('{}/contigs.fasta'.format(output)):
execute(command)
# Signal to the queue that the job is done
self.assemblequeue.task_done()
def filter(self):
"""Filter contigs greater than 1000 bp in length, and copy the filtered files to a common assemblies folder"""
from accessoryFunctions import make_path
from Bio import SeqIO
import shutil
for sample in self.metadata:
# Set the name of the unfiltered spades assembly output file
contigsfile = '{}/contigs.fasta'.format(sample.general.spadesoutput)
# Set the name of the filtered assembly file
filteredfile = '{}/{}.fasta'.format(sample.general.outputdirectory, sample.name)
# Only run on samples that have been processed with spades
if os.path.isfile(contigsfile) and not os.path.isfile(filteredfile):
# http://biopython.org/wiki/SeqIO#Input.2FOutput_Example_-_Filtering_by_sequence_length
over1000bp = []
for record in SeqIO.parse(open(contigsfile, "rU"), "fasta"):
# Include only contigs greater than 1000 bp in length
if len(record.seq) >= 1000:
# Replace 'NODE' in the fasta header with the sample name
# >NODE_1_length_705814_cov_37.107_ID_4231
# newid = re.sub("NODE", sample.name, record.id)
record.id = record.id.replace('NODE', sample.name)
# record.id = newid
# Clear the name and description attributes of the record
record.name = ''
record.description = ''
# Add this record to our list
over1000bp.append(record)
# Open the filtered assembly file
with open(filteredfile, 'wb') as formatted:
# Write the records in the list to the file
SeqIO.write(over1000bp, formatted, 'fasta')
# If the filtered file was successfully created, copy it to the BestAssemblies folder
if os.path.isfile(filteredfile):
# Set the assemblies path
sample.general.bestassembliespath = '{}BestAssemblies'.format(self.path)
# Make the path (if necessary)
make_path(sample.general.bestassembliespath)
# Set the name of the file in the best assemblies folder
bestassemblyfile = '{}/{}.fasta'.format(sample.general.bestassembliespath, sample.name)
# Add the name and path of the best assembly file to the metadata
sample.general.bestassemblyfile = bestassemblyfile
# Copy the filtered file to the BestAssemblies folder
if not os.path.isfile(bestassemblyfile):
shutil.copyfile(filteredfile, bestassemblyfile)
else:
sample.general.bestassemblyfile = ''
def insertsize(self):
"""Extracts the insert size and its deviation from the spades.log file"""
for sample in self.metadata:
# Only look if the spades output folder exists, and if there are two fastq files (can't find the insert
# size of single reads
if os.path.isdir(sample.general.spadesoutput) and len(sample.general.fastqfiles) == 2:
# Set the name of the log file
spadeslogfile = '{}/spades.log'.format(sample.general.spadesoutput)
# Open the log file
with open(spadeslogfile, 'rb') as spadeslog:
# Iterate through the file
for line in spadeslog:
# Find the line with the insert size on it. Will look something like this:
"""
0:02:07.605 144M / 9G INFO General (pair_info_count.cpp : 191) \
Insert size = 240.514, deviation = 105.257, left quantile = 142, right quantile = 384, \
read length = 301
"""
if 'Insert size =' in line:
# Extract the relevant data and add it to the metadata
sample.general.insertsize = line.split('= ')[1].split(',')[0]
sample.general.insertsizestandarddev = line.split('= ')[2].split(',')[0]
# Otherwise, populate with NA
else:
sample.general.insertsize = 'NA'
sample.general.insertsizestandarddev = 'NA'
def parse(self):
import yaml
for sample in self.metadata:
yamlfile = os.path.join(sample.general.spadesoutput, 'corrected', 'corrected.yaml')
if os.path.isfile(yamlfile):
with open(yamlfile) as spades:
for seq in yaml.load(spades):
for group in seq:
main = lambda x: getattr(sample.general, x).extend(seq[group]) \
if hasattr(sample.general, x) else setattr(sample.general, x, seq[group])
if group.startswith('interlaced'):
main('CorrectedSingleReads')
elif group.endswith('reads'):
main('Corrected' + group.title().replace(" ", ""))
def __init__(self, inputobject):
from Queue import Queue
self.metadata = inputobject.runmetadata.samples
self.start = inputobject.starttime
self.kmers = inputobject.kmers
self.threads = inputobject.cpus
self.path = inputobject.path
self.assemblequeue = Queue()
printtime('Assembling sequences', self.start)
import spades_init
spades_init.init()
self.version = spades_init.spades_version.rstrip()
self.yaml = inputobject.dataset
self.spades()
```
#### File: AAFC-MBB/blackbox-pipeline/setup.py
```python
try:
from setuptools import setup, Command
from setuptools.command.install import install
except ImportError:
from distutils.core import setup, Command
from distutils.command.install import install
import os
class RecordGit(install):
description = 'include git SHA-1 sum'
def run(self):
import sys
if not os.path.isfile(os.path.join(os.path.split(__file__)[0], 'blackbox', 'data')):
print 'recording git commit'
from blackbox.accessoryFunctions import make_path
make_path(os.path.join(os.path.split(__file__)[0], 'blackbox', 'data'))
with open(os.path.join(os.path.split(__file__)[0], 'blackbox', 'data', 'git.dat'), 'w') as git:
git.write(os.popen('git rev-parse --short HEAD').read().rstrip())
# Attempt to detect whether we were called from setup() or by another
# command. If we were called by setup(), our caller will be the
# 'run_command' method in 'distutils.dist', and *its* caller will be
# the 'run_commands' method. If we were called any other way, our
# immediate caller *might* be 'run_command', but it won't have been
# called by 'run_commands'. This is slightly kludgy, but seems to
# work.
#
caller = sys._getframe(2)
caller_module = caller.f_globals.get('__name__', '')
caller_name = caller.f_code.co_name
if caller_module != 'distutils.dist' or caller_name != 'run_commands':
# We weren't called from the command line or setup(), so we
# should run in backward-compatibility mode to support bdist_*
# commands.
install.run(self)
else:
self.do_egg_install()
setup(
name='docker-assembly',
version='0.0.dev1',
packages=['blackbox'],
url='https://github.com/MikeKnowles/docker-assembly',
package_data=dict(MBBspades=['blackbox/data/*.dat']),
include_package_data=True,
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Assembly pipeline using Docker and SPAdes',
long_description=open('README.md').read(),
install_requires=['biopython >= 1.65',
'argparse >= 1.4.0',
'pyyaml'],
scripts=['bin/MBBspades'],
cmdclass=dict(install=RecordGit)
)
``` |
{
"source": "AAFC-MBB/LicenseFixer",
"score": 2
} |
#### File: AAFC-MBB/LicenseFixer/licenseFixer.py
```python
import requests
import argparse
import sys
import getpass
import os
from git import Repo
from os import path
import datetime
correctCopyright = """Government of Canada
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
DefaultLicense = "The MIT License (MIT)\n"
correctCopyrightHolder = "Government of Canada"
def cloneRepos(overwrite, dryrun):
username = sys.argv[1]
password = getpass.getpass("Input password: ")
#TODO add in if /else to check for ORG vs user repo's
#TODO add in if/else when dealing with Github vs bitbucket
baseAPI = "https://api.bitbucket.org/2.0/repositories/"+username + "/"
sshBitBucket = "<EMAIL>:"+username + "/"
baseGithubAPIAAFC = "https://api.github.com/orgs/AAFC-MBB/repos"
baseGithubAPIUser = "https://api.github.com/user/" + username + "/repos"
sshGithub = "<EMAIL>:" + username +"/"
req = requests.get(baseAPI, auth=(username,password))
statCode = req.status_code
homePath = os.path.expanduser("~")
i = 0
copyrightDate = ""
if(statCode == 200):
jobj = req.json()
for value in jobj['values']:
i=i+1
# get name, check if it's seqdb in this case and ignore
name = value['name'].lower()
if name == "seqdb":
continue
print("RepoName: " + name)
req = requests.get(baseAPI+name, auth=(username,password))
print(sshBitBucket + name + ".git")
repoPathName = path.join(homePath, homePath+name)
repo = Repo.clone_from(sshBitBucket + name + ".git",repoPathName)
#check if license is correct
licensePath = path.join(repoPathName,repoPathName+"/LICENSE")
if path.exists(licensePath):
(equality,copyrightDate) = isLicenseEqual(licensePath, DefaultLicense)
if(not equality and overwrite):
editLicense(licensePath, copyrightDate)
else:
print("License doesn't exist")
editLicense(licensePath, "Copyright (c) " + str(datetime.datetime.now().year) + " ")
if(not dryrun):
#commit and push
index = repo.index
index.add(["LICENSE"])
index.commit("Updated license")
repo.remotes.origin.push()
else:
print("ERROR: " + str(statCode))
exit()
# commit with message "updated license", push
def isLicenseEqual(file1,myLicense):
oFile = open(file1,'r')
fileToStr = oFile.readline()
oFile.readline()
copyrightDate = oFile.readline()
copyrightHolder = copyrightDate[19:-1]
copyrightDate = copyrightDate[:19]
print(copyrightHolder == correctCopyrightHolder)
print ("Copyrightholder: " + copyrightHolder)
if (fileToStr == myLicense and copyrightHolder == correctCopyrightHolder):
print("License is in fact equal")
return (True, copyrightDate)
else:
return (False, "Copyright (c) " + str(datetime.datetime.now().year) + " ")
def editLicense(repoPath,copyrightDate):
f = open(repoPath,'w')
print(f)
f.write(DefaultLicense+"\n"+copyrightDate+correctCopyright)
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("username")
parser.add_argument("-o", help="Overwrite option")
parser.add_argument("-d", help="Dryrun option, doesn't commit or push")
overwrite=False
dryrun=False
if(len(sys.argv) <= 1):
print("Usage: licenseFixer.py username [-o overwrite] [-d Dryrun]")
exit()
args = parser.parse_args()
if (args.d):
dryrun = True
if (args.o):
overwrite = True
cloneRepos( overwrite,dryrun)
``` |
{
"source": "AAFC-MBB/metAMOS",
"score": 2
} |
#### File: AAFC-MBB/metAMOS/INSTALL.py
```python
import os, sys, string, subprocess, distutils.util, site, glob, multiprocessing
def addEnvironmentVar(varName, newValue, sep = " "):
oldVal = ""
if varName in os.environ:
oldVal = os.environ[varName]
os.environ[varName] = newValue + sep + oldVal
else:
os.environ[varName] = newValue
return oldVal
def updateMakeFileForDarwin(fileName, addedCFlags, addedLDFlags, addFlagsToCompile=False):
if OSTYPE == "Darwin":
os.system("cp %s %s.orig"%(fileName, fileName))
numCF=utils.getCommandOutput("grep -c \"CFLAGS*=\" %s.orig"%(fileName), False).strip()
numCX=utils.getCommandOutput("grep -c \"CXXFLAGS*=\" %s.orig"%(fileName), False).strip()
numLD=utils.getCommandOutput("grep -c \"LDFLAGS*=\" %s.orig"%(fileName), False).strip()
numD=utils.getCommandOutput("grep -c \^DFLAGS*=\" %s.orig"%(fileName), False).strip()
addCF = False
addLD = False
if ((numCF == "" or int(numCF) == 0) and (numCX == "" or int(numCX) == 0)):
addCF = True
if ((numCF == "" or int(numCF) == 0) and (numD == "" or int(numD) == 0)):
addLD = True
os.system("cat %s.orig |awk '{if (match($0, \"^CFLAGS.*=\")) { print $0\" %s\"; } else if (match($0, \"^CXXFLAGS.*=\")) { print $0\" %s\"; } else if (match($0, \"^LDFLAGS.*=\")) { print $0\" %s\" } else if (match($0, \"^DFLAGS =\")) { print $0\" %s\"; } else { print $0; } }' >%s"%(fileName, addedCFlags, addedCFlags, addedLDFlags, addedLDFlags, fileName))
if addCF:
os.system("cp %s %s.orig"%(fileName, fileName))
os.system("cat %s.orig |awk '{if (NR == 1) { print \"CFLAGS=%s\\nCXXFLAGS=%s\\n\"$0; } else { print $0; } }' > %s"%(fileName, addedCFlags, addedCFlags, fileName))
if addLD:
os.system("cp %s %s.orig"%(fileName, fileName))
os.system("cat %s.orig |awk '{if (NR == 1) { print \"LDFLAGS=%s\\n\"$0; } else { print $0; } }' > %s"%(fileName, addedLDFlags, fileName))
if addFlagsToCompile:
os.system("cp %s %s.orig"%(fileName, fileName))
os.system("cat %s.orig |awk '{if (match($1, \"g++\")) { sub(/g\\+\\+/, \"g++ \\$(CXXFLAGS) \\$(LDFLAGS)\", $0) } print $0; }' > %s"%(fileName, fileName))
def copyPerlLib(pathToCopy, dest):
if pathToCopy != "":
pathsToCopy = pathToCopy.strip().split("\n")
for path in pathsToCopy:
pathToCopy = os.path.dirname(path)
os.system("mkdir -p %s"%(dest))
# copy one at a time in case of conflicts
for file in os.listdir("%s%s"%(pathToCopy, os.sep)):
toCopy = file
file = "%s%s%s"%(pathToCopy, os.sep, toCopy)
if os.path.exists("%s/%s"%(dest, toCopy)):
os.system("mv %s/* %s/%s/"%(file, dest, toCopy))
else:
os.system("mv %s %s/"%(file, dest))
user_home = os.environ["HOME"]
print "<<Welcome to metAMOS install>>"
#check for python version
if (sys.version_info[0] < 2) or (sys.version_info[0] == 2 and sys.version_info[1] < 6):
print "Python version is %s. metAMOS requires at least 2.6"%(sys.version)
sys.exit(1)
#add access to utils.py, for utils dir
METAMOS_ROOT = os.getcwd().strip()
INITIAL_SRC = "%s%ssrc"%(METAMOS_ROOT, os.sep)
sys.path.append(INITIAL_SRC)
import utils
import workflow
sys.path.append(utils.INITIAL_UTILS)
shellv = os.environ["SHELL"]
#add site dir
site.addsitedir(utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib"+os.sep+"python")
site.addsitedir(utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib64"+os.sep+"python")
if "PYTHONPATH" not in os.environ:
os.environ["PYTHONPATH"] = ""
os.environ["PYTHONPATH"]+=utils.INITIAL_UTILS+os.sep+"python"+os.pathsep
os.environ["PYTHONPATH"] += utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib"+os.pathsep
os.environ["PYTHONPATH"] += utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib"+os.sep+"python"+os.pathsep
os.environ["PYTHONPATH"] += utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib64"+os.pathsep
os.environ["PYTHONPATH"] += utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib64"+os.sep+"python"+os.pathsep
sys.path.append(utils.INITIAL_UTILS+os.sep+"python")
sys.path.append(utils.INITIAL_UTILS+os.sep+"python" + os.sep+"lib"+ os.sep+"python")
sys.path.append(utils.INITIAL_UTILS+os.sep+"python" + os.sep+"lib64"+ os.sep+"python")
if 'bash' in shellv or utils.cmdExists('export'):
os.system("export PYTHONPATH=%s:$PYTHONPATH"%(utils.INITIAL_UTILS+os.sep+"python"))
os.system("export PYTHONPATH=%s:$PYTHONPATH"%(utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib"+os.sep+"python"))
os.system("export PYTHONPATH=%s:$PYTHONPATH"%(utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib64"+os.sep+"python"))
elif utils.cmdExists('setenv'):
os.system("setenv PYTHONPATH %s:$PYTHONPATH"%(utils.INITIAL_UTILS+os.sep+"python"))
os.system("setenv PYTHONPATH %s:$PYTHONPATH"%(utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib"+os.sep+"python"))
os.system("setenv PYTHONPATH %s:$PYTHONPATH"%(utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib64"+os.sep+"python"))
else:
print "Cannot set PYTHONPATH variable, unknown shell %s\n"%(shellv)
if not os.path.exists("%s"%utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib"):
os.system("mkdir %s"%utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib")
if not os.path.exists("%s"%utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib64"):
os.system("mkdir %s"%utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib64")
if not os.path.exists("%s"%utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib"+os.sep+"python"):
os.system("mkdir %s"%utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib"+os.sep+"python")
if not os.path.exists("%s"%utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib64"+os.sep+"python"):
os.system("mkdir %s"%utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib64"+os.sep+"python")
ALLOW_FAST=True
HAVE_GCC42=False
HAVE_RT=False
HAVE_QUIET_HEAD=False
GCC_VERSION=0.0
try:
GCC_VERSION=float(utils.getCommandOutput("gcc --version|grep gcc|awk '{print $NF}' |awk -F \".\" '{print $1\".\"$2}'", False))
except:
try:
GCC_VERSION=float(utils.getCommandOutput("gcc --version|grep gcc|awk '{print $3}' |awk -F \".\" '{print $1\".\"$2}'", False))
except:
try:
GCC_VERSION=float(utils.getCommandOutput("gcc --version|grep gcc|awk '{print $4}' |awk -F \".\" '{print $1\".\"$2}'", False))
except:
print "Warning: cannot determine GCC version"
OSTYPE="Linux"
OSVERSION="1"
MACHINETYPE="x86_64"
kronaTools = "KronaTools-2.4"
#identify machine type
p = subprocess.Popen("echo `uname`", shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(checkStdout, checkStderr) = p.communicate()
if checkStderr != "":
print "Warning: Cannot determine OS, defaulting to %s"%(OSTYPE)
else:
OSTYPE = checkStdout.strip()
p = subprocess.Popen("echo `uname -r`", shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(checkStdout, checkStderr) = p.communicate()
if checkStderr != "":
print "Warning: Cannot determine OS version, defaulting to %s"%(OSVERSION)
else:
OSVERSION = checkStdout.strip()
p = subprocess.Popen("echo `uname -m`", shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(checkStdout, checkStderr) = p.communicate()
if checkStderr != "":
print "Warning: Cannot determine system type, defaulting to %s"%(MACHINETYPE)
else:
MACHINETYPE = checkStdout.strip()
addedCFlags=""
addedLDFlags=""
oldCFlags = ""
oldCPPFlags = ""
oldCXXFlags = ""
oldLDFlags = ""
if OSTYPE == "Darwin":
p = subprocess.Popen("echo `gcc --version`", shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(checkStdout, checkStderr) = p.communicate()
if "Apple" not in checkStdout:
ALLOW_FAST=False
gcc42 = utils.getCommandOutput("which g++-4.2", False)
if gcc42 == "":
HAVE_GCC42=False
else:
HAVE_GCC42=True
# global vars for building
libPath=""
clib=utils.getCommandOutput("g++ -print-file-name=libgcc.a", False)
if clib != "":
libPath="%s %s"%(libPath, clib)
cpplib=utils.getCommandOutput("g++ -print-file-name=libstdc++.a", False)
if cpplib != "":
libPath="%s %s"%(libPath, cpplib)
omplib=utils.getCommandOutput("g++ -print-file-name=libgomp.a", False)
if omplib != "":
libPath="%s %s"%(libPath, omplib)
commonFlags="-mmacosx-version-min=10.6 -static-libgcc -static-libstdc++ "
oldCFlags = addEnvironmentVar("CFLAGS", " %s "%(commonFlags))
oldCPPFlags = addEnvironmentVar("CPPFLAGS", " %s "%(commonFlags))
oldCXXFlags = addEnvironmentVar("CXXFLAGS", " %s "%(commonFlags))
oldLDFlags = addEnvironmentVar("LDFLAGS", " %s "%(libPath))
addedCFlags="%s %s"%(commonFlags, libPath)
addedLDFlags="-static-libgcc -static-libstdc++ %s"%(libPath)
libPaths = [ "/usr/lib", "/usr/lib64", "/usr/local/lib/", "/usr/local/lib64/", "/opt/local/lib/", "/opt/local/lib64/"]
for libPath in libPaths:
if os.path.exists(libPath + os.sep + "librt.a") or os.path.exists(libPath + os.sep + "librt.so"):
HAVE_RT=True
break
p = utils.getCommandOutput("head --help |grep \"\\-q\" |wc -l", False)
if int(p) >= 1:
HAVE_QUIET_HEAD=True
# get list of supported workflows
enabledWorkflows = set()
packagesToInstall = set()
knownPackages = set()
workflows = workflow.getAllWorkflows("%s/Utilities/workflows"%(METAMOS_ROOT))
for flow in workflows:
knownPackages.update(workflows[flow].programList)
manual = False
fail = False
nodbs = False
availableWf = workflow.getSupportedWorkflows("%s/Utilities/workflows"%(METAMOS_ROOT), False)
for wf in availableWf:
enabledWorkflows.update(wf.getDerivedName())
packagesToInstall.update(wf.programList)
if (len(sys.argv) > 1):
# should support tool list as well added
for i in range(1, len(sys.argv)):
arg = sys.argv[i]
if arg.lower() in workflows.keys():
packagesToInstall.update(workflows[arg.lower()].programList)
enabledWorkflows.update(workflows[arg.lower()].getDerivedName())
elif arg.lower() == "full":
for flow in workflows:
packagesToInstall.update(workflows[flow].programList)
enabledWorkflows.update(workflows[flow].getDerivedName())
print "Installing all available workflows"
elif arg.lower() == "manual":
manual = True
for flow in workflows:
enabledWorkflows.update(workflows[flow].getDerivedName())
elif arg.lower() == "nodbs":
nodbs = True
elif arg.lower() in knownPackages:
packagesToInstall.add(arg.lower())
for flow in workflows:
if arg.lower() in workflows[flow].programList:
enabledWorkflows.update(workflows[flow].getDerivedName())
break
else:
if arg != "help":
print "Unknown program or workflow %s specified."%(arg)
fail = True
if fail or help in sys.argv:
print "Available workflows: %s"%(" ".join(workflows.keys()))
print "Available packages: %s"%("\n\t".join(knownPackages))
exit(1)
if manual:
packagesToInstall = set()
for workflowName in enabledWorkflows:
print "Selected to install workflowName: %s."%(workflowName.upper())
print "Will automatically install:"
for p in packagesToInstall:
print "\t%s"%(p.title())
if not os.path.exists("./Utilities/config/usage.ok") and not os.path.exists("./Utilities/config/usage.no"):
print "MetAMOS would like to record anonymous usage statistics, is this ok ? "
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("echo ok > ./Utilities/config/usage.ok")
else:
os.system("echo no > ./Utilities/config/usage.no")
# first the needed python packages
# make sure we have setuptools available
if 1:
fail = 0
try:
import setuptools
except ImportError:
fail = 1
if "setuptools" in packagesToInstall:
dl = 'y'
elif fail:
print "setuptools not found, required for install, download now?"
dl = raw_input("Enter Y/N: ")
if fail and (dl == 'y' or dl == "Y"):
os.system("curl -L https://bitbucket.org/pypa/setuptools/raw/0.7.4/ez_setup.py -o ez_setup.py")
os.system("python ez_setup.py --user")
if 1:
fail = 0
try:
import psutil
except ImportError:
fail = 1
if "psutil" in packagesToInstall:
dl = 'y'
elif fail:
print "psutil not found, required for memory usage estimation, download now?"
dl = raw_input("Enter Y/N: ")
if fail and (dl == 'y' or dl == "Y"):
os.system("curl -L https://github.com/giampaolo/psutil/archive/release-0.6.1.tar.gz -o ./psutil.tar.gz")
os.system("tar -C ./Utilities/python -xvf psutil.tar.gz")
os.system("mv ./Utilities/python/psutil-release-0.6.1 ./Utilities/python/psutil")
os.chdir("./Utilities/python/psutil")
# dont set static building libs on OSX, sseems to cause compile issues for jellyfish
os.environ["CFLAGS"] = oldCFlags
os.environ["CPPFLAGS"] = oldCPPFlags
os.environ["CXXFLAGS"] = oldCXXFlags
os.environ["LDFLAGS"] = oldLDFlags
os.system("python setup.py install --home=%spython"%(utils.INITIAL_UTILS+os.sep))
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf psutil.tar.gz")
if OSTYPE == "Darwin":
# reset env variables again
addEnvironmentVar("CFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CPPFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CXXFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("LDFLAGS", " %s "%(addedLDFlags))
if 1:
fail = 0
try:
import cython
except ImportError:
fail = 1
if "cython" in packagesToInstall:
dl = 'y'
elif fail:
print "cython modules not found, necessary for c-compiling python code, download now?"
dl = raw_input("Enter Y/N: ")
if fail and (dl == 'y' or dl == "Y"):
os.system("curl -L https://github.com/cython/cython/archive/master.zip -o ./cython.zip")
os.system("unzip ./cython.zip")
os.system("mv ./cython-master ./Utilities/python/cython")
os.chdir("./Utilities/python/cython")
# dont set static building libs on OSX, sseems to cause compile issues for jellyfish
os.environ["CFLAGS"] = oldCFlags
os.environ["CPPFLAGS"] = oldCPPFlags
os.environ["CXXFLAGS"] = oldCXXFlags
os.environ["LDFLAGS"] = oldLDFlags
os.system("python setup.py install --home=%spython"%(utils.INITIAL_UTILS+os.sep))
if OSTYPE == "Darwin":
# reset env variables again
addEnvironmentVar("CFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CPPFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CXXFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("LDFLAGS", " %s "%(addedLDFlags))
os.chdir(METAMOS_ROOT)
os.system("rm -rf cython.zip")
if 1:
fail = 0
try:
import pysam
except ImportError:
fail = 1
if "pysam" in packagesToInstall:
dl = 'y'
elif fail:
print "pysam python modules not found, necessary for bowtie2 alignments, download now?"
dl = raw_input("Enter Y/N: ")
if fail and (dl == 'y' or dl == "Y"):
os.system("curl -L https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/pysam/pysam-0.6.tar.gz -o ./pysam.tar.gz")
os.system("tar -C ./Utilities/python -xvf pysam.tar.gz")
os.system("mv ./Utilities/python/pysam-0.6 ./Utilities/python/pysam")
doInstall = True
#for root install
#os.system("sudo python ./Utilities/python/pysam/setup.py install")
os.chdir("./Utilities/python/pysam")
if OSTYPE == "Darwin":
if utils.getFromPath("llvm-gcc-4.2", "LLVM GCC"):
os.system("export CC=llvm-gcc-4.2")
os.system("export CXX=llvm-g++-4.2")
os.environ["CC"]="llvm-gcc-4.2"
os.environ["CXX"]="llvm-g++-4.2"
else:
print "Warning: Cannot install pysam on your system. Please install LLVM compiler first."
doInstall=False
if doInstall:
# dont set static building libs on OSX, sseems to cause compile issues for jellyfish
os.environ["CFLAGS"] = oldCFlags
os.environ["CPPFLAGS"] = oldCPPFlags
os.environ["CXXFLAGS"] = oldCXXFlags
os.environ["LDFLAGS"] = oldLDFlags
os.system("python setup.py build_ext --inplace")
os.system("python setup.py build")
os.system("python setup.py install --home=%spython"%(utils.INITIAL_UTILS+os.sep))
if OSTYPE == "Darwin":
# reset env variables again
addEnvironmentVar("CFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CPPFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CXXFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("LDFLAGS", " %s "%(addedLDFlags))
os.chdir(METAMOS_ROOT)
os.system("rm -rf pysam.tar.gz")
#WARNING: matplotlib causes install issues for multiple users
fail = 0
try:
import numpy
except ImportError:
fail = 1
if "numpy" in packagesToInstall:
dl = 'y'
elif fail:
print "numpy python modules not found, necessary for html report, download now?"
dl = raw_input("Enter Y/N: ")
if fail and (dl == 'y' or dl == "Y"):
os.system("curl -L http://downloads.sourceforge.net/project/numpy/NumPy/1.7.1/numpy-1.7.1.tar.gz -o ./numpy.tar.gz")
os.system("tar -C ./Utilities/python -xvf numpy.tar.gz")
os.system("mv ./Utilities/python/numpy-1.7.1 ./Utilities/python/numpy")
os.chdir("./Utilities/python/numpy")
os.environ["CFLAGS"] = oldCFlags
os.environ["CPPFLAGS"] = oldCPPFlags
os.environ["CXXFLAGS"] = oldCXXFlags
os.environ["LDFLAGS"] = oldLDFlags
os.system("python setup.py install --home=%spython"%(utils.INITIAL_UTILS+os.sep))
if OSTYPE == "Darwin":
# reset env variables again
addEnvironmentVar("CFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CPPFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CXXFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("LDFLAGS", " %s "%(addedLDFlags))
os.chdir(METAMOS_ROOT)
os.system("rm -rf numpy.tar.gz")
if 1:
fail = 0
try:
import matplotlib
if (matplotlib.__version__ < "1.1.0"):
fail = 1
except ImportError:
fail = 1
if "matplotlib" in packagesToInstall:
dl = 'y'
elif fail:
print "Current matplot lib version is incompatible with metAMOS or matplotlib is not installed. Need version 1.1.0+, download now?"
dl = raw_input("Enter Y/N: ")
if fail and (dl == 'y' or dl == "Y"):
os.system("curl -L http://downloads.sourceforge.net/project/matplotlib/matplotlib/matplotlib-1.1.0/matplotlib-1.1.0.tar.gz -o ./matplotlib.tar.gz")
os.system("tar -C ./Utilities/python -xvf matplotlib.tar.gz")
os.system("mv ./Utilities/python/matplotlib-1.1.0 ./Utilities/python/matplotlib")
os.chdir("./Utilities/python/matplotlib")
os.environ["CFLAGS"] = oldCFlags
os.environ["CPPFLAGS"] = oldCPPFlags
os.environ["CXXFLAGS"] = oldCXXFlags
os.environ["LDFLAGS"] = oldLDFlags
if OSTYPE == "Darwin":
# reset env variables again
addEnvironmentVar("CFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CPPFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CXXFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("LDFLAGS", " %s "%(addedLDFlags))
os.chdir(METAMOS_ROOT)
os.system("rm -rf matplotlib.tar.gz")
# now software
if not os.path.exists("./AMOS") or 0:
if "amos" in packagesToInstall:
dl = 'y'
else:
print "AMOS binaries not found, needed for all steps, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/amos-3.2-BETA-%s-%s.binaries.tar.gz -o ./amos-binaries.tar.gz"%(OSTYPE, MACHINETYPE))
os.system("tar -xvf amos-binaries.tar.gz")
os.system("rm -rf amos-binaries.tar.gz")
# descriptive perl module
stat = utils.getCommandOutput("perl -MStatistics::Descriptive -e 0 && echo $?", True)
if stat == "":
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/Statistics-Descriptive-3.0203.tar.gz -o stat.tar.gz")
os.system("tar -xvzf stat.tar.gz")
os.chdir("Statistics-Descriptive-3.0203")
os.system("perl Makefile.PL PREFIX=`pwd`/build")
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
pathToCopy = utils.getCommandOutput("find Statistics-Descriptive-3.0203/build -type d -name \"Statistics\" |grep -v auto", False)
copyPerlLib(pathToCopy, "AMOS%s%s-%s%slib"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("rm -rf stat.tar.gz")
os.system("rm -rf Statistics-Descriptive-3.0203")
if not os.path.exists("./Utilities/cpp%s%s-%s%skraken"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "kraken" in packagesToInstall:
dl = 'y'
else:
print "Kraken not found, optional for Annotate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
archive = "kraken.tar.gz"
os.system("curl -L http://ccb.jhu.edu/software/kraken/dl/kraken-0.10.4-beta.tgz -o %s"%(archive))
os.system("rm -rf ./Utilities/cpp%s%s-%s%skraken"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("tar -xvzf %s"%(archive))
os.system("mv kraken-0.10.4-beta ./Utilities/cpp/%s%s-%s%skraken"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%skraken"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("./install_kraken.sh `pwd`/bin")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm %s"%archive)
os.system("rm -rf ./Utilities/DB/kraken")
if not os.path.exists("./Utilities/DB/kraken"):
if "kraken" in packagesToInstall:
dl = 'y'
else:
print "Kraken DB not found, required for Kraken, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
settings = utils.Settings(1, 1, "", "")
settings.OSTYPE = OSTYPE
mem = utils.getAvailableMemory(settings)
if (mem < 100) and not nodbs:
print "Insufficient memory to build full Kraken database. Requires at least 100GB of memory, using mini DB"
archive = "minikraken.tgz"
os.system("curl -L http://ccb.jhu.edu/software/kraken/dl/%s -o %s"%(archive, archive))
os.system("tar xvzf %s"%(archive))
os.system("mv minikraken_* ./Utilities/DB/kraken")
os.system("rm %s"%(archive))
elif not nodbs:
# first we need jellyfish which is used to build DB
# kraken needs jellyfish, if we don't find it build it and add to path
jellyfish = utils.getFromPath("jellyfish", "Jellyfish", False)
# check jellyfish version, kraken needs version 1
version=""
if jellyfish != "":
version = utils.getCommandOutput("%s/jellyfish --version |awk '{print substr($NF, 1, index($NF, \".\")-1)}'"%(jellyfish), False)
if int(version) > 1:
jellyfish=""
if jellyfish == "":
archive = "jellyfish.tar.gz"
os.system("curl -L http://www.cbcb.umd.edu/software/jellyfish/jellyfish-1.1.11.tar.gz -o %s"%(archive))
os.system("tar xvzf %s"%(archive))
os.system("mv jellyfish-1.1.11 ./Utilities/cpp%s%s-%s%s/jellyfish"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%s/jellyfish"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("./configure --prefix=`pwd`")
os.system("make")
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
pathUpdate = "%s/Utilities/cpp%s%s-%s%sjellyfish/bin/"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep)
if "PATH" in os.environ:
pathUpdate = "%s%s%s"%(os.environ["PATH"], os.pathsep, pathUpdate)
os.environ["PATH"]=pathUpdate
os.chdir("./Utilities/cpp/%s%s-%s%skraken"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("./bin/kraken-build --standard --threads %d --db %s/Utilities/DB/kraken"%(multiprocessing.cpu_count() - 1, METAMOS_ROOT))
os.chdir("%s"%(METAMOS_ROOT))
if not os.path.exists("./LAP"):
if "lap" in packagesToInstall:
dl = 'y'
else:
print "LAP tool not found, needed for multiple assembly pipeline, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L http://www.cbcb.umd.edu/~cmhill/files/lap_release_1.1.zip -o lap_release_1.1.zip")
os.system("unzip lap_release_1.1.zip")
os.system("mv ./lap_release_1.1 ./LAP")
os.system("rm -rf lap_release_1.1.zip")
if not os.path.exists("KronaTools") or 0:
if "kronatools" in packagesToInstall:
dl = 'y'
else:
print "KronaTools not found, needed for Postprocess, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
# TODO: KronaTools should be on the FTP site for robustness to URL changes
os.system("curl -L 'ftp://ftp.cbcb.umd.edu/pub/data/metamos/" + kronaTools + ".tar' -o %s.tar"%(kronaTools))
os.system("tar -xvf %s.tar"%(kronaTools))
os.system("rm -rf %s.tar"%(kronaTools))
os.system("mv %s KronaTools"%(kronaTools))
os.system("cd KronaTools && ./install.pl --prefix=.")
if not os.path.exists("KronaTools/taxonomy/taxonomy.tab") or 0:
if "kronatools" in packagesToInstall:
dl = 'y'
else:
print "KronaTools taxonomy data not found, needed for Postprocess, download now (will take around 20 minutes)?"
dl = raw_input("Enter Y/N: ")
if (dl == 'y' or dl == 'Y') and not nodbs:
os.system("cd KronaTools && ./updateTaxonomy.sh")
os.chdir("%s"%(METAMOS_ROOT))
os.system("cat KronaTools/taxonomy/taxonomy.tab |awk -F \"\\t\" '{print $1\"\\t\"$NF}' > ./Utilities/DB/tax_key.tab")
if not os.path.exists("./FastQC"):
if "fastqc" in packagesToInstall:
dl = 'y'
else:
print "FastQC not found, optional for Preprocess, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
archive = "fastqc_v0.10.0.zip"
os.system("curl -L http://www.bioinformatics.babraham.ac.uk/projects/fastqc/%s -o %s" % (archive,archive))
os.system("unzip %s" % archive)
os.system("rm %s" % archive)
os.system("chmod a+rx FastQC/fastqc")
os.system("chmod -R a+rX FastQC/")
if not os.path.exists("./Utilities/DB/uniprot_sprot.fasta"):
if "uniprot" in packagesToInstall:
dl = 'y'
else:
print "Uniprot/Swissprot DB not found, optional for Functional Annotation, download now?"
dl = raw_input("Enter Y/N: ")
if (dl == 'y' or dl == 'Y') and not nodbs:
archive = "uniprot.tar.gz"
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/%s -o %s" %(archive, archive))
os.system("tar -C ./Utilities/DB/ -xvf %s" % archive)
os.system("rm %s"%archive)
# velvet
if not os.path.exists("./Utilities/cpp%s%s-%s%svelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "velvet" in packagesToInstall:
dl = 'y'
else:
print "Velvet not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
archive = "velvet.tar.gz"
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/velvet_1.2.10.tgz -o %s"%(archive))
os.system("rm -rf ./Utilities/cpp%s%s-%s%svelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("tar -xvzf %s"%(archive))
os.system("mv velvet_1.2.10 ./Utilities/cpp/%s%s-%s%svelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%svelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make clean")
os.system("make CATEGORIES=16 MAXKMERLENGTH=127 OPENMP=1 BUNDLEDZLIB=1")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm %s"%archive)
# velvet-sc
if not os.path.exists("./Utilities/cpp%s%s-%s%svelvet-sc"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "velvet-sc" in packagesToInstall:
dl = 'y'
else:
print "Velvet-SC not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
archive = "velvet-sc.tar.gz"
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/velvet-sc.tar.gz -o %s"%(archive))
os.system("rm -rf ./Utilities/cpp%s%s-%s%svelvet-sc"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("tar -xvzf %s"%(archive))
os.system("mv velvet-sc ./Utilities/cpp/%s%s-%s%svelvet-sc"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%svelvet-sc"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make clean")
os.system("make CATEGORIES=16 MAXKMERLENGTH=127 OPENMP=1")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm %s"%archive)
# metavelvet
if not os.path.exists("./Utilities/cpp%s%s-%s%sMetaVelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "metavelvet" in packagesToInstall:
dl = 'y'
else:
print "MetaVelvet not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
archive = "MetaVelvet-1.2.02.tgz"
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/MetaVelvet-1.2.02.tgz -o %s"%(archive))
os.system("rm -rf ./Utilities/cpp%s%s-%s%sMetaVelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("tar -xvzf %s"%(archive))
os.system("mv MetaVelvet-1.2.02 ./Utilities/cpp/%s%s-%s%sMetaVelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%sMetaVelvet"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
if OSTYPE == "Darwin":
os.system("cp Utils/Utils.hh Utils/Utils.hh.orig")
os.system("cat Utils/Utils.hh.orig |awk '{if (match($0, \"#define MAX_STRING_LENGTH\")) { print \"#include <sys/types.h>\\n\"$0; } else { print $0; }}' > Utils/Utils.hh")
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make clean")
os.system("make CATEGORIES=16 MAXKMERLENGTH=127")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm %s"%archive)
if "viritas" in enabledWorkflows or manual:
if not os.path.exists("./Utilities/cpp%s%s-%s%strnascan"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "trnascan" in packagesToInstall:
dl = 'y'
else:
print "tRNAscan not found, optional for Annotate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L http://lowelab.ucsc.edu/software/tRNAscan-SE.tar.gz -o trnascan.tar")
os.system("tar xvf trnascan.tar")
os.system("mv tRNAscan-SE-1.3.1 ./Utilities/cpp/%s%s-%s%strnascan"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%strnascan"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf trnascan.tar")
# now workflow specific tools
if "optional" in enabledWorkflows or manual:
if not os.path.exists("./Utilities/cpp/%s-%s/metaphylerClassify"%(OSTYPE, MACHINETYPE)) or not os.path.exists("./Utilities/perl/metaphyler/markers/markers.protein") or not os.path.exists("./Utilities/perl/metaphyler/markers/markers.dna"):
if "metaphyler" in packagesToInstall:
dl = 'y'
else:
print "Metaphyler (latest version) not found, optional for Annotate, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L http://metaphyler.cbcb.umd.edu/MetaPhylerV1.25.tar.gz -o metaphyler.tar.gz")
os.system("tar -C ./Utilities/perl/ -xvf metaphyler.tar.gz")
os.system("mv ./Utilities/perl/MetaPhylerV1.25 ./Utilities/perl/metaphyler")
os.system("mv ./Utilities/perl/metaphyler/installMetaphyler.pl ./Utilities/perl/metaphyler/installMetaphylerFORMATDB.pl");
os.system("cat ./Utilities/perl/metaphyler/installMetaphylerFORMATDB.pl |sed 's/formatdb/\.\/Utilities\/cpp\/%s-%s\/formatdb/g' > ./Utilities/perl/metaphyler/installMetaphyler.pl"%(OSTYPE, MACHINETYPE));
os.system("perl ./Utilities/perl/metaphyler/installMetaphyler.pl")
os.system("cp ./Utilities/perl/metaphyler/metaphylerClassify ./Utilities/cpp/%s-%s/metaphylerClassify"%(OSTYPE, MACHINETYPE))
if not os.path.exists("./Utilities/models") or not os.path.exists("./Utilities/DB/blast_data"):
if "fcp" in packagesToInstall:
dl = 'y'
else:
print "Genome models not found, optional for FCP/NB, download now?"
dl = raw_input("Enter Y/N: ")
if (dl == 'y' or dl == 'Y') and not nodbs:
archive = "fcp_models.tar.gz"
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/%s -o %s" %(archive, archive))
os.system("rm -rf ./Utilities/DB/blast_data")
os.system("rm -rf ./Utilities/models")
os.system("tar -C ./Utilities/ -xvf %s" % archive)
os.system("rm %s"%archive)
if not os.path.exists("./phylosift") or not os.path.exists("./phylosift/legacy/version.pm") or not os.path.exists("./phylosift/lib/Params"):
if "phylosift" in packagesToInstall:
dl = 'y'
else:
print "PhyloSift binaries not found, optional for Annotate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
if not os.path.exists("./phylosift"):
#phylosift OSX binaries included inside Linux X86_64 tarball..
os.system("curl -L http://edhar.genomecenter.ucdavis.edu/~koadman/phylosift/devel/phylosift_20130829.tar.bz2 -o ./phylosift.tar.bz2")
os.system("tar -xvjf phylosift.tar.bz2")
os.system("rm -rf phylosift.tar.bz2")
os.system("mv phylosift_20130829 phylosift")
if not os.path.exists("./phylosift/legacy/version.pm"):
#phylosift needs version but doesn't include it
os.system("curl -L http://www.cpan.org/authors/id/J/JP/JPEACOCK/version-0.9903.tar.gz -o version.tar.gz")
os.system("tar xvzf version.tar.gz")
os.chdir("./version-0.9903/")
os.system("perl Makefile.PL")
os.system("make")
os.system("cp -r blib/lib/* ../phylosift/legacy")
os.chdir(METAMOS_ROOT)
os.system("rm -rf version.tar.gz")
os.system("rm -rf version-0.9903")
if not os.path.exists("./phylosift/lib/Params"):
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/params-validate.tar.gz -o ./params-validate.tar.gz")
os.system("tar xvzf params-validate.tar.gz")
os.system("rm -rf params-validate.tar.gz")
# download markers dbs
if not os.path.exists("./phylosift/share"):
markerUrl = utils.getCommandOutput("cat phylosift/phylosiftrc |grep marker_base |awk '{print $NF}' |sed s/\;//g", False)
ncbiUrl = utils.getCommandOutput("cat phylosift/phylosiftrc |grep ncbi_url |awk '{print $NF}' |sed s/\;//g", False)
os.system("mkdir -p ./phylosift/share/phylosift")
os.chdir("./phylosift/share/phylosift")
os.system("curl -L %s/markers.tgz -o marker.tgz"%(markerUrl))
os.system("tar xvzf marker.tgz")
os.system("rm marker.tgz")
os.system("curl -L %s -o ncbi.tgz"%(ncbiUrl))
os.system("tar xvzf ncbi.tgz")
os.system("rm ncbi.tgz")
os.chdir(METAMOS_ROOT)
# check the number of files the DB currently is and see if we have the expected number
dbResult = ""
if not nodbs:
dbResult = utils.getCommandOutput("perl ./Utilities/perl/update_blastdb.pl refseq_protein --numpartitions", False)
if not nodbs and dbResult == "":
print "Error: could not connect to NCBI, will not be installing refseq protein DB"
elif not nodbs:
(dbName, numPartitions) = dbResult.split("\t", 1)
print "Checking whether %s is complete. Expecting %d partitions.\n"%(dbName, int(numPartitions))
numPartitions = int(numPartitions) - 1
if not os.path.exists("./Utilities/DB/refseq_protein.pal") or not os.path.exists("./Utilities/DB/refseq_protein.%02d.psq"%(int(numPartitions))) or not os.path.exists("./Utilities/DB/allprots.faa"):
if "phmmer" in packagesToInstall:
dl = 'y'
else:
print "refseq protein DB not found or incomplete, needed for Annotate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
print "Download and install refseq protein DB.."
os.system("perl ./Utilities/perl/update_blastdb.pl refseq_protein")
os.system("mv refseq_protein.*.tar.gz ./Utilities/DB/")
fileList = glob.glob("./Utilities/DB/refseq_protein.*.tar.gz")
for file in fileList:
os.system("tar -C ./Utilities/DB/ -xvf %s"%(file))
print " running fastacmd (might take a few min)..."
os.system(".%sUtilities%scpp%s%s-%s%sfastacmd -d ./Utilities/DB/refseq_protein -p T -a T -D 1 -o ./Utilities/DB/allprots.faa"%(os.sep, os.sep, os.sep, OSTYPE, MACHINETYPE, os.sep))
# sra toolkit
if not os.path.exists("./Utilities/cpp%s%s-%s%ssra"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
sra = utils.getFromPath("srapath", "SRA PATH", False)
if sra == "":
if "sra" in packagesToInstall:
dl = 'y'
else:
print "SRA binaries not found, optional for initPipeline step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
if OSTYPE == 'Linux' and MACHINETYPE == "x86_64":
os.system("curl -L http://ftp-trace.ncbi.nlm.nih.gov/sra/sdk/2.3.3-3/sratoolkit.2.3.3-3-centos_linux64.tar.gz -o sra.tar.gz")
elif OSTYPE == "Darwin" and MACHINETYPE == "x86_64":
os.system("curl -L http://ftp-trace.ncbi.nlm.nih.gov/sra/sdk/2.3.3-3/sratoolkit.2.3.3-3-mac64.tar.gz -o sra.tar.gz")
os.system("tar xvzf sra.tar.gz")
os.system("mv sratoolkit.2.3.3-3-* ./Utilities/cpp%s%s-%s%ssra"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("rm -rf sra.tar.gz")
if "isolate" in enabledWorkflows or "imetamos" in enabledWorkflows or manual:
# check for cmake
if not os.path.exists("./Utilities/cpp%s%s-%s%scmake"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
cmake = utils.getFromPath("cmake", "CMAKE", False)
if cmake == "":
if "cmake" in packagesToInstall:
dl = 'y'
else:
print "cmake binaries not found, optional for initPipeline step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L http://www.cmake.org/files/v2.8/cmake-2.8.12.tar.gz -o cmake.tar.gz")
os.system("tar xvzf cmake.tar.gz")
os.system("mv cmake-2.8.12 ./Utilities/cpp%s%s-%s%scmake"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%scmake"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("./bootstrap --prefix=`pwd`/build;make;make install")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm cmake.tar.gz")
if os.path.exists("./Utilities/cpp%s%s-%s%scmake"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
pathUpdate = "%s/Utilities/cpp%s%s-%s%scmake/build/bin"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep)
if "PATH" in os.environ:
pathUpdate = "%s%s%s"%(os.environ["PATH"], os.pathsep, pathUpdate)
os.environ["PATH"]=pathUpdate
os.chdir("%s"%(METAMOS_ROOT))
if not os.path.exists("./CA"):
if "ca" in packagesToInstall:
dl = 'y'
else:
print "Celera Assembler binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L https://downloads.sourceforge.net/project/wgs-assembler/wgs-assembler/wgs-8.1/wgs-8.1.tar.bz2 -o wgs-8.1.tar.bz2")
os.system("tar xvjf wgs-8.1.tar.bz2")
os.system("rm -rf wgs-8.1.tar.bz2")
os.system("mv wgs-8.1 CA")
# patch CA to support PacBio sequences and non-apple compilers on OSX
if not ALLOW_FAST:
os.system("cd CA/kmer/ && cp configure.sh configure.original")
os.system("cd CA/kmer/ && cat configure.original |sed s/\-fast//g > configure.sh")
os.system("cd CA/src/ && cp c_make.as c_make.original")
os.system("cd CA/src/ && cat c_make.original |sed s/\-fast//g > c_make.as")
if not HAVE_GCC42:
os.system("cd CA/src/ && cp c_make.as c_make.original")
os.system("cd CA/src/ && cat c_make.original |sed s/\-4.2//g > c_make.as")
if GCC_VERSION >= 4.7:
os.system("cd CA/src/ && cp c_make.as c_make.original")
os.system("cd CA/src/ && cat c_make.original |sed s/\-rdynamic//g > c_make.as")
updateMakeFileForDarwin("CA/kmer/Makefile", addedCFlags, addedLDFlags)
updateMakeFileForDarwin("CA/samtools/Makefile", addedCFlags, addedLDFlags)
updateMakeFileForDarwin("CA/src/c_make.as", addedCFlags, addedLDFlags)
os.system("cd CA/samtools && make")
os.system("cd CA/kmer && ./configure.sh && gmake install")
os.system("cd CA/src && gmake")
if not os.path.exists("./Utilities/cpp%s%s-%s%sRay"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "ray" in packagesToInstall:
dl = 'y'
else:
print "Ray binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
# check for mpi which is required
command="mpicxx"
mpi=utils.getFromPath(command, "MPI", False)
if not os.path.exists("%s%s%s"%(mpi, os.sep, command)):
command="openmpicxx"
mpi=utils.getFromPath(command, "MPI", False)
if not os.path.exists("%s%s%s"%(mpi, os.sep, command)):
mpi = command = ""
print "Error: cannot find MPI, required to build Ray. Please add it to your path."
if command != "":
os.system("curl -L http://downloads.sourceforge.net/project/denovoassembler/Ray-v2.2.0.tar.bz2 -o Ray-v2.2.0.tar.bz2")
os.system("tar xvjf Ray-v2.2.0.tar.bz2")
os.system("mv Ray-v2.2.0 ./Utilities/cpp/%s%s-%s%sRay"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%sRay"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("make PREFIX=bin MPICXX=%s%s%s MAXKMERLENGTH=128 MPI_IO=y DEBUG=n ASSERT=n EXTRA=\" -march=native\""%(mpi, os.sep, command))
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf Ray-v2.2.0.tar.bz2")
if not os.path.exists("./Utilities/cpp%s%s-%s%skmergenie"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
kmerGenie = utils.getFromPath("kmergenie", "Kmer Genie", False)
if kmerGenie == "":
if "kmergenie" in packagesToInstall:
dl = 'y'
else:
print "Kmer Genie was not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/kmergenie-1.5692.tar.gz -o kmer.tar.gz")
os.system("tar xvzf kmer.tar.gz")
os.system("mv kmergenie-1.5692 ./Utilities/cpp%s%s-%s%skmergenie"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%skmergenie"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("makefile", addedCFlags, addedLDFlags)
os.system("make k=300")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf kmer.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%sspades"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
spades = utils.getFromPath("spades.py", "SPAdes", False)
if spades == "":
if "spades" in packagesToInstall:
dl = 'y'
else:
print "SPAdes was not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
if OSTYPE == "Darwin":
if GCC_VERSION < 4.7:
print "Error: SPAdes requires gcc at least version 4.7, found version %s. Please update and try again"%(GCC_VERSION)
else:
os.system("curl -L http://spades.bioinf.spbau.ru/release3.0.0/SPAdes-3.0.0.tar.gz -o spades.tar.gz")
os.system("tar xvzf spades.tar.gz")
os.system("mv SPAdes-3.0.0 ./Utilities/cpp%s%s-%s%sspades"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%sspades"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("export CC=`which gcc` && bash spades_compile.sh")
os.chdir("%s"%(METAMOS_ROOT))
else:
os.system("curl -L http://spades.bioinf.spbau.ru/release3.0.0/SPAdes-3.0.0-Linux.tar.gz -o spades.tar.gz")
os.system("tar xvzf spades.tar.gz")
os.system("mv SPAdes-3.0.0-Linux ./Utilities/cpp%s%s-%s%sspades"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("rm -rf spades.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%sprokka"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
prokaBin = utils.getFromPath("prokka", "Prokka", False)
dl = 'n'
if prokaBin == "":
if "prokka" in packagesToInstall:
dl = 'y'
else:
print "Prokka binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
signalp = utils.getFromPath("signalp", "SignalP", False)
if signalp == "":
print "Warning: SignalP is not installed and is required for Prokka's gram option. Please download it and add it to your path."
os.system("curl -L http://www.vicbioinformatics.com/prokka-1.11.tar.gz -o prokka-1.11.tar.gz")
os.system("tar xvzf prokka-1.11.tar.gz")
os.system("mv prokka-1.11 ./Utilities/cpp%s%s-%s%sprokka"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("rm prokka-1.11.tar.gz")
bioperl = utils.getCommandOutput("perl -MBio::Seq -e 0 && echo $?", True)
perltime = utils.getCommandOutput("perl -MTime::Piece -e 0 && echo $?", True)
xmlsimple = utils.getCommandOutput("perl -MXML::Simple -e 0 && echo $?", True)
storable = utils.getCommandOutput("perl -MStorable -e 0 && echo $?", True)
xmlparser = utils.getCommandOutput("perl -MXML:Parser -e 0 && echo $?", True)
# always install bioperl, otherwise parts may be missing or it may be the wrong version
# phylosift comes with BioPerl, use it
os.system("curl -L http://edhar.genomecenter.ucdavis.edu/~koadman/phylosift/devel/phylosift_20130829.tar.bz2 -o ./phylosift.tar.bz2")
os.system("tar -xvjf phylosift.tar.bz2")
os.system("rm -rf phylosift.tar.bz2")
os.system("mv phylosift_20130829/lib ./Utilities/cpp%s%s-%s%sprokka"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("rm -rf phylosift_20130829")
if perltime == "":
os.system("curl -L http://search.cpan.org/CPAN/authors/id/M/MS/MSERGEANT/Time-Piece-1.08.tar.gz -o time.tar.gz")
os.system("tar -xvzf time.tar.gz")
os.chdir("Time-Piece-1.08")
os.system("perl Makefile.PL PREFIX=`pwd`/build")
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
pathToCopy = utils.getCommandOutput("find Time-Piece-1.08/build -type d -name \"Time\" |grep -v auto", False)
copyPerlLib(pathToCopy, "./Utilities/cpp%s%s-%s%sprokka/lib/"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("rm -rf time.tar.gz")
os.system("rm -rf Time-Piece-1.08")
if xmlparser == "":
os.system("curl -L http://search.cpan.org/CPAN/authors/id/M/MS/MSERGEANT/XML-Parser-2.36.tar.gz -o parse.tar.gz")
os.system("tar -xvzf parse.tar.gz")
os.chdir("XML-Parser-2.36")
os.system("perl Makefile.PL PREFIX=`pwd`/build")
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
pathToCopy = utils.getCommandOutput("find XML-Parser-2.36/build -type d -name \"XML\" |grep -v auto", False)
copyPerlLib(pathToCopy, "./Utilities/cpp%s%s-%s%sprokka/lib/"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
libUpdate = "%s/Utilities/cpp%s%s-%s%sprokka/lib/"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep)
if "PERL5LIB" in os.environ:
libUpdate = "%s%s%s"%(os.environ["PERL5LIB"], os.pathsep, libUpdate)
os.environ["PERL5LIB"]=libUpdate
os.system("rm -rf parse.tar.gz")
os.system("rm -rf XML-Parser-2.36")
if xmlsimple == "":
os.system("curl -L http://search.cpan.org/CPAN/authors/id/G/GR/GRANTM/XML-Simple-1.08.tar.gz -o xml.tar.gz")
os.system("tar -xvzf xml.tar.gz")
os.chdir("XML-Simple-1.08")
os.system("perl Makefile.PL PREFIX=`pwd`/build")
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
pathToCopy = utils.getCommandOutput("find XML-Simple-1.08/build -type d -name \"XML\" |grep -v auto", False)
copyPerlLib(pathToCopy, "./Utilities/cpp%s%s-%s%sprokka/lib/"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("rm -rf xml.tar.gz")
os.system("rm -rf XML-Simple-1.08")
if os.path.exists("./Utilities/cpp%s%s-%s%sprokka/lib"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
os.chdir("./Utilities/cpp%s%s-%s%sprokka/bin"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("cp prokka prokka.original")
os.system("cat prokka.original |awk '{if (match($0, \"use strict\")) { print \"use lib \\\"%s/Utilities/cpp%s%s-%s%sprokka/lib\\\";\"; print $0; } else { print $0}}' |awk '{if (match($1, \"my\") && match($2, \"MAXCONTIGIDLEN\")) { print \"my $MAXCONTIGIDLEN = 250;\"; } else { print $0; }}' > prokka"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("%s"%(METAMOS_ROOT))
# for some reason prokka adds its binaries to the end of path, not beginning so if your path has the wrong version of a program, it will crash. Update
os.chdir("./Utilities/cpp%s%s-%s%sprokka/bin"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("cp prokka prokka.original")
os.system("cat prokka.original |awk '{if (match($0, \"ENV{PATH}\")) { print \"$ENV{PATH} = $BINDIR . \\\":$FindBin::RealBin:\\\" . $ENV{PATH};\"; } else { print $0}}' > prokka")
os.chdir("%s"%(METAMOS_ROOT))
aragorn = utils.getFromPath("aragorn", "aragorn", False)
aragornVersion = ""
if aragorn != "":
aragornVersion = utils.getCommandOutput("%s/aragorn -h 2>&1 | grep -i '^ARAGORN v' |sed s/v//g |awk '{printf(\"%%2.2f\n\", $2)}'", True)
if float(aragornVersion) < 1.2:
aragorn = ""
if aragorn == "":
print "Aragorn missing, will install"
os.system("curl -L http://130.235.46.10/ARAGORN/Downloads/aragorn1.2.36.tgz -o aragorn.tar.gz")
os.system("tar xvzf aragorn.tar.gz")
os.chdir("aragorn1.2.36")
os.system("gcc -O3 -ffast-math -finline-functions %s %s -o aragorn aragorn1.2.36.c"%(addedCFlags, addedLDFlags))
os.chdir("%s"%(METAMOS_ROOT))
os.system("mv aragorn1.2.36/aragorn ./Utilities/cpp%s%s-%s%sprokka/binaries%s%s"%(os.sep, OSTYPE, MACHINETYPE, os.sep, os.sep, OSTYPE.lower()))
os.system("rm -rf aragorn1.2.36")
os.system("rm aragorn.tar.gz")
infernal = utils.getFromPath("cmscan", "Infernal", False)
if infernal == "" and not os.path.exists("./Utilities/cpp%s%s-%s%sprokka/binaries/%s/infernal"%(os.sep, OSTYPE, MACHINETYPE, os.sep, OSTYPE.lower())):
print "Infernal missing, will install"
if OSTYPE == "Darwin":
os.system("curl -L http://eddylab.org/infernal/infernal-1.1.2-macosx-intel.tar.gz -o infernal.tar.gz")
else:
os.system("curl -L http://eddylab.org/infernal/infernal-1.1.2-linux-intel-gcc.tar.gz -o infernal.tar.gz")
os.system("tar xvzf infernal.tar.gz")
os.system("mv infernal*/binaries/* ./Utilities/cpp%s%s-%s%sprokka/binaries%s%s"%(os.sep, OSTYPE, MACHINETYPE, os.sep, os.sep, OSTYPE.lower()))
os.system("rm -rf infernal*")
barrnap = utils.getFromPath("barrnap", "barrnap", False)
if barrnap == "" and not os.path.exists("./Utilities/cpp%s%s-%s%sprokka/binaries/%s/barrnap"%(os.sep, OSTYPE, MACHINETYPE, os.sep, OSTYPE.lower())):
print "Barrnap missing, will install"
os.system("curl -L http://www.vicbioinformatics.com/barrnap-0.4.tar.gz -o barrnap.tar.gz")
os.system("tar xvzf barrnap.tar.gz")
os.system("mv barrnap-0.4/bin/barrnap ./Utilities/cpp%s%s-%s%sprokka/binaries%s%s"%(os.sep, OSTYPE, MACHINETYPE, os.sep, os.sep, OSTYPE.lower()))
os.system("mv barrnap-0.4/db ./Utilities/cpp%s%s-%s%sprokka/binaries%s%s"%(os.sep, OSTYPE, MACHINETYPE, os.sep, os.sep, OSTYPE.lower()))
if os.path.exists("./Utilities/cpp%s%s-%s%sprokka/lib"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
os.chdir("./Utilities/cpp%s%s-%s%sprokka/binaries%s%s"%(os.sep, OSTYPE, MACHINETYPE, os.sep, os.sep, OSTYPE.lower()))
os.system("cp barrnap barrnap.original")
os.system("cat barrnap.original |awk '{if (match($0, \"use strict\")) { print \"use lib \\\"%s/Utilities/cpp%s%s-%s%sprokka/lib\\\";\"; print $0; } else { print $0}}' > barrnap"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf barrnap-0.4")
os.system("rm barrnap.tar.gz")
hmmscan = utils.getFromPath("hmmscan", "HMMER3", False)
hmmscanVersion = ""
if hmmscan != "":
hmmscanVersion = utils.getCommandOutput("%s/hmmscan -h | grep '^# HMMER' |awk '{printf(\"%%2.2f\\n\", $3)}'"%(hmmscan), True)
print "Found HMM SCAN %s %s"%(hmmscan, hmmscanVersion)
if float(hmmscanVersion) < 3.1:
hmmscan = ""
if hmmscan == "" and not os.path.exists("./Utilities/cpp%s%s-%s%sprokka/binaries/%s/hmmscan"%(os.sep, OSTYPE, MACHINETYPE, os.sep, OSTYPE.lower())):
print "HMMER3 is missing, will install"
if OSTYPE == "Darwin":
os.system("curl -L ftp://selab.janelia.org/pub/software/hmmer3/3.1b1/hmmer-3.1b1-macosx-intel.tar.gz -o hmmer.tar.gz")
elif OSTYPE == "Linux" and MACHINETYPE == "x86_64":
os.system("curl -L ftp://selab.janelia.org/pub/software/hmmer3/3.1b1/hmmer-3.1b1-linux-intel-x86_64.tar.gz -o hmmer.tar.gz")
elif OSTYPE == "Linux":
os.system("curl -L ftp://selab.janelia.org/pub/software/hmmer3/3.1b1/hmmer-3.1b1-linux-intel-ia32.tar.gz -o hmmer.tar.gz")
os.system("tar xvzf hmmer.tar.gz")
os.system("mv hmmer*/binaries/* ./Utilities/cpp%s%s-%s%sprokka/binaries%s%s"%(os.sep, OSTYPE, MACHINETYPE, os.sep, os.sep, OSTYPE.lower()))
os.system("rm -rf hmmer*")
gnuparallel = utils.getFromPath("parallel", "GNU Parallel", False)
if gnuparallel == "" and not os.path.exists("./Utilities/cpp%s%s-%s%sprokka/binaries/%s/parallel"%(os.sep, OSTYPE, MACHINETYPE, os.sep, OSTYPE.lower())):
print "GNU Parallel is missing, will install"
os.system("curl -L http://ftp.gnu.org/gnu/parallel/parallel-20130822.tar.bz2 -o parallel.tar.gz")
os.system("tar xvjf parallel.tar.gz")
os.chdir("parallel-20130822")
os.system("./configure --prefix=`pwd`")
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
os.system("mv parallel-20130822/bin/parallel ./Utilities/cpp%s%s-%s%sprokka/binaries%s%s"%(os.sep, OSTYPE, MACHINETYPE, os.sep, os.sep, OSTYPE.lower()))
os.system("rm -rf parallel-20130822")
os.system("rm parallel.tar.gz")
blastp = utils.getFromPath("blastp", "BLAST+", False)
if blastp == "" and not os.path.exists("./Utilities/cpp%s%s-%s%sprokka/binaries/%s/blastp"%(os.sep, OSTYPE, MACHINETYPE, os.sep, OSTYPE.lower())):
os.system("ln %s/Utilities/cpp%s%s-%s%sblastp %s/Utilities/cpp%s%s-%s%sprokka/binaries%s%s%sblastp"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep, METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep, os.sep, OSTYPE.lower(), os.sep))
prodigal = utils.getFromPath("prodigal", "PRODIGAL", False)
if prodigal != "":
prodigalVersion = utils.getCommandOutput("%s/prodigal -v 2>&1 | grep -i '^Prodigal V' |sed s/V//g |awk '{printf(\"%%2.2f\\n\", $2)}'"%(prodigal), True)
print "Found prodigal %s %s"%(prodigal, prodigalVersion)
if float(prodigalVersion) < 2.6:
prodigal = ""
if prodigal == "":
os.system("curl -L https://github.com/hyattpd/Prodigal/archive/v2.60.tar.gz -o prodigal.tar.gz")
os.system("tar xvzf prodigal.tar.gz")
os.system("mv Prodigal-2.60 prodigal.v2_60")
os.system("rm -rf prodigal.tar.gz")
os.chdir("prodigal.v2_60")
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
os.system("mv prodigal.v2_60/prodigal ./Utilities/cpp%s%s-%s%sprokka/binaries%s%s"%(os.sep, OSTYPE, MACHINETYPE, os.sep, os.sep, OSTYPE.lower()))
os.system("rm -rf prodigal.tar.gz")
os.system("rm -rf prodigal.v2_60")
tbl2asn = utils.getFromPath("tbl2asn", "NCBI Tbl2Asn", False)
if tbl2asn == "":
print "NCBI Tbl2Asn is missing, will install"
if OSTYPE == "Darwin":
os.system("curl -L ftp://ftp.ncbi.nih.gov/toolbox/ncbi_tools/converters/by_program/tbl2asn/mac.tbl2asn.gz -o tbl2asn.gz")
elif OSTYPE == "Linux" and MACHINETYPE == "x86_64":
os.system("curl -L ftp://ftp.ncbi.nih.gov/toolbox/ncbi_tools/converters/by_program/tbl2asn/linux64.tbl2asn.gz -o tbl2asn.gz")
elif OSTYPE == "Linux":
os.system("curl -L ftp://ftp.ncbi.nih.gov/toolbox/ncbi_tools/converters/by_program/tbl2asn/linux.tbl2asn.gz -o tbl2asn.gz")
os.system("gunzip tbl2asn.gz")
os.system("chmod a+rx tbl2asn")
os.system("mv tbl2asn ./Utilities/cpp%s%s-%s%sprokka/binaries%s%s"%(os.sep, OSTYPE, MACHINETYPE, os.sep, os.sep, OSTYPE.lower()))
os.system("rm tbl2asn.gz")
# finally set up prokka DBs
os.system("./Utilities/cpp%s%s-%s%s/prokka/bin/prokka --setupdb"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
if not os.path.exists("./Utilities/cpp%s%s-%s%ssoap2"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "soap2" in packagesToInstall:
if "soap2" in packagesToInstall:
dl = 'y'
else:
print "SOAPdenovo2 binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
if OSTYPE == "Darwin":
os.system("curl -L http://sourceforge.net/projects/soapdenovo2/files/SOAPdenovo2/src/r240/SOAPdenovo2-src-r240-mac.tgz -o soap2.tar.gz")
os.system("tar xvzf soap2.tar.gz")
os.system("mv r240_noAIOinPregraph ./Utilities/cpp%s%s-%s%ssoap2"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%ssoap2"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make clean")
os.system("make")
os.system("mkdir bin")
os.system("mv SOAPdenovo-* bin/")
os.chdir("%s"%(METAMOS_ROOT))
else:
os.system("curl -L http://sourceforge.net/projects/soapdenovo2/files/SOAPdenovo2/bin/r240/SOAPdenovo2-bin-LINUX-generic-r240.tgz -o soap2.tar.gz")
os.system("tar xvzf soap2.tar.gz")
os.system("mkdir ./Utilities/cpp%s%s-%s%ssoap2"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("mv SOAPdenovo2-bin-LINUX-generic-r240 ./Utilities/cpp%s%s-%s%ssoap2/bin"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("curl -L http://downloads.sourceforge.net/project/soapdenovo2/GapCloser/src/r6/GapCloser-src-v1.12-r6.tgz -o gapcloser.tar.gz")
os.system("tar xvzf gapcloser.tar.gz")
os.chdir("v1.12-r6")
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
os.system("mv v1.12-r6/Release/* ./Utilities/cpp%s%s-%s%ssoap2/bin"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("rm -rf soap2.tar.gz")
os.system("rm -rf v1.12-r6")
os.system("rm -rf gapcloser.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%sMaSuRCA"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
masurca = utils.getFromPath("masurca", "MaSuRCA", False)
if masurca == "":
if "masurca" in packagesToInstall:
dl = 'y'
else:
print "MaSuRCA binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
if GCC_VERSION < 4.4:
print "Error: MaSuRCA requires gcc at least version 4.4, found version %s. Please update and try again"%(GCC_VERSION)
else:
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/MaSuRCA-2.2.0.tar.gz -o msrca.tar.gz")
os.system("tar xvzf msrca.tar.gz")
os.system("mv ./MaSuRCA-2.2.0 ./Utilities/cpp%s%s-%s%sMaSuRCA"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%sMaSuRCA"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("cp install.sh install.orig")
os.system("cat install.orig |sed s/\-\-prefix/\-\-disable\-shared\ \-\-prefix/g > install.sh")
# patch CA
if not ALLOW_FAST:
os.system("cd CA/kmer/ && cp configure.sh configure.original")
os.system("cd CA/kmer/ && cat configure.original |sed s/\-fast//g > configure.sh")
if not HAVE_RT:
os.system("cd SuperReads/ && cp Makefile.am Makefile.am.original")
os.system("cd SuperReads/ && cat Makefile.am.original |sed s/\-lrt//g > Makefile.am")
os.system("cd SuperReads/ && cp Makefile.in Makefile.in.original")
os.system("cd SuperReads/ && cat Makefile.in.original |sed s/\-lrt//g > Makefile.in")
if not HAVE_QUIET_HEAD:
os.system("cd SuperReads/src && cp masurca masurca.original")
os.system("cd SuperReads/src && cat masurca.original |sed s/head\ \-q/head/g > masurca")
os.system("rm -f CA/kmer/makepath")
# fix compilation on OSX
if OSTYPE == "Darwin":
os.system("cp SuperReads/include/reallocators.hpp SuperReads/include/reallocators.hpp.orig")
testIn = open("SuperReads/include/reallocators.hpp.orig", 'r')
testOut = open("SuperReads/include/reallocators.hpp", 'w')
for line in testIn.xreadlines():
if "T* res = reallocator<T>::operator()(" in line:
testOut.write("T* res = reallocator<T>::realloc(ptr, osize, nsize);\n")
else:
testOut.write(line.strip() + "\n")
testIn.close()
testOut.close()
# dont set static building libs on OSX, sseems to cause compile issues for jellyfish
os.environ["CFLAGS"] = oldCFlags
os.environ["CPPFLAGS"] = oldCPPFlags
os.environ["CXXFLAGS"] = oldCXXFlags
os.environ["LDFLAGS"] = oldLDFlags
updateMakeFileForDarwin("CA/src/c_make.as", addedCFlags, addedLDFlags)
os.system("bash install.sh")
fileOptions = utils.getCommandOutput("file -b --mime-type INSTALL.py", False)
if fileOptions == "":
fileOptions = utils.getCommandOutput("file -b --mime INSTALL.py", False)
if fileOptions != "":
# fix file command used by MaSuRCA, its not compatible with the system
if os.path.exists("bin/expand_fastq"):
os.system("cp bin/expand_fastq bin/expand_fastq.orig")
testIn = open("bin/expand_fastq.orig", 'r')
testOut = open("bin/expand_fastq", 'w')
for line in testIn.xreadlines():
if "case $(file" in line:
testOut.write("case $(file -b --mime \"$FILE\" |awk '{print $1}'|sed s/\\;//g) in\n")
else:
testOut.write(line.strip() + "\n")
testIn.close()
testOut.close()
else:
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf ./Utilities/cpp%s%s-%s%sMaSuRCA"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
# update path to CA which is always hardcoded to Linux-amd64
os.system("cp bin/masurca bin/masurca.orig")
os.system("cat bin/masurca.orig | sed s/Linux-amd64/%s-%s/g |sed s/check_exec\\(\\\"jellyfish\\\"/check_exec\\(\\\"jellyfish-2.0\\\"/g > bin/masurca"%(OSTYPE, MACHINETYPE.replace("x86_64", "amd64")))
if OSTYPE == "Darwin":
os.system("cp bin/masurca bin/masurca.orig")
os.system("cat bin/masurca.orig | awk '{if (match($0, \"save NUM_SUPER_READS\")) { print $0\"\\n\\tprint FILE \\\"export NUM_SUPER_READS=\\\\$NUM_SUPER_READS\\\\n\\\";\"; } else { print $0}}' | sed s/\\(\\'..TOTAL_READS\\'/\\(\\\\\\\\\\$ENV{\\'TOTAL_READS\\'}/g| sed s/'<..$NUM_SUPER_READS.'/\"<ENVIRON[\\\\\\\\\\\"NUM_SUPER_READS\\\\\\\\\\\"]\"/g | sed s/'>=..$NUM_SUPER_READS.'/\">=ENVIRON[\\\\\\\\\\\"NUM_SUPER_READS\\\\\\\\\\\"]\"/g > bin/masurca")
# reset env variables again
addEnvironmentVar("CFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CPPFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CXXFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("LDFLAGS", " %s "%(addedLDFlags))
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf ./MaSuRCA-2.2.0")
os.system("rm msrca.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%smira"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
mira = utils.getFromPath("mira", "MIRA", False)
if mira == "":
if "mira" in packagesToInstall:
dl = 'y'
else:
print "MIRA binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
if OSTYPE == "Darwin":
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/mira_4.0rc5_darwin13.0.0_x86_64_static.tar.bz2 -o mira.tar.bz2")
else:
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/mira_4.0rc5_linux-gnu_x86_64_static.tar.bz2 -o mira.tar.bz2")
os.system("tar xvjf mira.tar.bz2")
os.system("rm -f mira.tar.bz2")
os.system("mv `ls -d mira*` ./Utilities/cpp%s%s-%s%smira"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
if not os.path.exists("./Utilities/cpp%s%s-%s%sidba"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
idba = utils.getFromPath("idba", "IDBA-UD", False)
if idba == "":
if "idba" in packagesToInstall:
dl = 'y'
else:
print "IDBA-UD binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L https://github.com/loneknightpy/idba/releases/download/1.1.3/idba-1.1.3.tar.gz -o idba.tar.gz")
os.system("tar xvzf idba.tar.gz")
os.system("mv idba-1.1.3 ./Utilities/cpp%s%s-%s%sidba"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%sidba"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("mv src/sequence/short_sequence.h src/sequence/short_sequence.orig")
os.system("cat src/sequence/short_sequence.orig |awk '{if (match($0, \"kMaxShortSequence = 128\")) print \"static const uint32_t kMaxShortSequence = 32768;\"; else print $0}' > src/sequence/short_sequence.h")
os.system("mv src/basic/kmer.h src/basic/kmer.orig")
os.system("cat src/basic/kmer.orig |awk '{if (match($0, \"kNumUint64 = 4\")) print \" static const uint32_t kNumUint64 = 16;\"; else print $0}' > src/basic/kmer.h")
os.system("./configure")
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf idba.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%seautils"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
eautils = utils.getFromPath("fastq-mcf", "EA-UTILS", False)
if eautils == "":
if "eautils" in packagesToInstall:
dl = 'y'
else:
print "EA-UTILS binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L https://github.com/ExpressionAnalysis/ea-utils/tarball/master -o eautils.tar.gz")
os.system("curl -L ftp://ftp.gnu.org/gnu/gsl/gsl-1.16.tar.gz -o gsl.tar.gz")
os.system("tar xvzf eautils.tar.gz")
os.system("tar xvzf gsl.tar.gz")
os.system("mv ExpressionAnalysis-ea-utils* ./Utilities/cpp%s%s-%s%seautils"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("mv gsl-1.16 ./Utilities/cpp%s%s-%s%seautils/clipper/gsl"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%seautils/clipper/gsl"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("./configure --prefix=`pwd`/build")
os.system("make")
os.system("make install")
os.chdir("..")
os.system("mv Makefile Makefile.orig")
os.system("cat Makefile.orig |sed s/CFLAGS?=/CFLAGS+=/g |sed s/CPPFLAGS?=/CPPFLAGS+=/g > Makefile")
addEnvironmentVar("CFLAGS", "-I. -L%s/Utilities/cpp%s%s-%s%seautils/gsl/build/lib/"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep))
addEnvironmentVar("CPPFLAGS", "-I. -L%s/Utilities/cpp%s%s-%s%seautils/gsl/build/lib/"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("make")
os.system("cp fastq-mcf ../")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf eautils.tar.gz")
os.system("rm -rf gsl.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%sabyss"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
abyss = utils.getFromPath("ABYSS", "ABySS", False)
if abyss == "":
if "abyss" in packagesToInstall:
dl = 'y'
else:
print "ABySS binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L https://github.com/sparsehash/sparsehash/archive/sparsehash-2.0.2.tar.gz -o sparse.tar.gz")
os.system("tar xvzf sparse.tar.gz")
os.chdir("sparsehash-sparsehash-2.0.2")
os.system("./configure --prefix=`pwd`")
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
os.system("curl -L http://sourceforge.net/projects/boost/files/boost/1.54.0/boost_1_54_0.tar.gz -o boost.tar.gz")
os.system("tar xvzf boost.tar.gz")
os.system("curl -L http://www.bcgsc.ca/platform/bioinfo/software/abyss/releases/1.3.6/abyss-1.3.6.tar.gz -o abyss.tar.gz")
os.system("tar xvzf abyss.tar.gz")
os.chdir("abyss-1.3.6")
os.system("ln -s %s/boost_1_54_0/boost boost"%(METAMOS_ROOT))
addEnvironmentVar("CFLAGS", "-I%s/sparsehash-sparsehash-2.0.2/include"%(METAMOS_ROOT))
addEnvironmentVar("CPPFLAGS", "-I%s/sparsehash-sparsehash-2.0.2/include"%(METAMOS_ROOT))
addEnvironmentVar("CXXFLAGS", "-I%s/sparsehash-sparsehash-2.0.2/include"%(METAMOS_ROOT))
# sparse hash library has unused variables which cause warnings with gcc 4.8 so disable -Werror
if GCC_VERSION >= 4.8:
os.system("mv configure configure.original")
os.system("cat configure.original |sed s/\-Werror//g > configure")
os.system("chmod a+rx configure")
os.system("./configure --enable-maxk=96 --prefix=`pwd`/build")
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
os.system("mkdir ./Utilities/cpp%s%s-%s%sabyss"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("mv abyss-1.3.6/build/* ./Utilities/cpp%s%s-%s%sabyss/"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
# update abysss to use installed mpi
command="mpirun"
mpi=utils.getFromPath(command, "MPI", False)
if not os.path.exists("%s%s%s"%(mpi, os.sep, command)):
command="openmpirun"
mpi=utils.getFromPath(command, "MPI", False)
if not os.path.exists("%s%s%s"%(mpi, os.sep, command)):
mpi = command = ""
os.chdir("./Utilities/cpp%s%s-%s%sabyss/bin/"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("cp abyss-pe abyss-pe-orig")
if mpi != "" and os.path.exists("ABYSS-P"):
testIn = open("abyss-pe-orig", 'r')
testOut = open("abyss-pe", 'w')
for line in testIn.xreadlines():
if "which mpirun" in line:
testOut.write("mpirun?=$(shell which %s)\n"%(command))
elif "ifdef np" in line:
testOut.write(line)
testOut.write("ifneq ($(mpirun),mpirun)\n")
elif "ABYSS-P" in line:
testOut.write(line)
testOut.write("else\n")
testOut.write("\tABYSS $(abyssopt) $(ABYSS_OPTIONS) -o $@ $(in) $(se)\n")
testOut.write("endif\n")
else:
testOut.write(line)
testIn.close()
testOut.close()
else:
print "Error: cannot find MPI in your path. Disabling ABySS threading."
os.system("cat abyss-pe-orig |awk -v found=0 -v skipping=0 '{if (match($0, \"ifdef np\")) {skipping=1; } if (skipping && match($1, \"ABYSS\")) {print $0; skipping=1; found=1} if (found && match($1, \"endif\")) {skipping=0;found = 0;} else if (skipping == 0) { print $0; } }' > abyss-pe")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf sparsehash-sparsehash-2.0.2")
os.system("rm -rf sparse.tar.gz")
os.system("rm -rf abyss-1.3.6")
os.system("rm -rf abyss.tar.gz")
os.system("rm -rf boost_1_54_0")
os.system("rm -rf boost.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%ssga"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
sga = utils.getFromPath("sga", "SGA", False)
if sga == "":
if "sga" in packagesToInstall:
dl = 'y'
else:
print "SGA binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L https://github.com/sparsehash/sparsehash/archive/sparsehash-2.0.2.tar.gz -o sparse.tar.gz")
os.system("tar xvzf sparse.tar.gz")
os.chdir("sparsehash-sparsehash-2.0.2")
os.system("./configure --prefix=`pwd`")
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
os.system("curl -L https://github.com/pezmaster31/bamtools/archive/v2.3.0.tar.gz -o bamtools.tar.gz")
os.system("tar xvzf bamtools.tar.gz")
os.system("curl -L http://sourceforge.net/projects/bio-bwa/files/bwa-0.7.5a.tar.bz2 -o bwa.tar.bz2")
os.system("tar xvjf bwa.tar.bz2")
os.chdir("bwa-0.7.5a")
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
os.system("curl -L https://github.com/jts/sga/archive/v0.10.10.tar.gz -o sga.tar.gz")
os.system("tar xvzf sga.tar.gz")
os.system("mv sga-0.10.10 ./Utilities/cpp%s%s-%s%ssga"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("mv bamtools-2.3.0 ./Utilities/cpp%s%s-%s%ssga/bamtools"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("mv sparsehash-sparsehash-2.0.2 ./Utilities/cpp%s%s-%s%ssga/sparsehash"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%ssga/bamtools"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("mkdir build")
os.chdir("build")
os.system("export CC=`which gcc` && cmake ..")
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
os.chdir("./Utilities/cpp%s%s-%s%ssga/src"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
# sparse hash library has unused variables which cause warnings with gcc 4.8 so disable -Werror
if GCC_VERSION >= 4.8:
os.system("mv configure.ac configure.original")
os.system("cat configure.original |sed s/\-Werror//g > configure.ac")
os.system("sh ./autogen.sh")
os.system("./configure --with-sparsehash=`pwd`/../sparsehash --with-bamtools=`pwd`/../bamtools --prefix=`pwd`/../")
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
os.system("mv bwa-0.7.5a/bwa ./Utilities/cpp%s%s-%s%ssga/bin/"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("cp %s/Utilities/cpp%s%s-%s%ssamtools %s/Utilities/cpp%s%s-%s%ssga/bin%ssamtools"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep, METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep, os.sep))
os.system("rm -rf sparsehash-sparsehash-2.0.2")
os.system("rm -rf sparse.tar.gz")
os.system("rm -rf bamtools-2.3.0")
os.system("rm -rf bamtools.tar.gz")
os.system("rm -rf sga-0.10.10")
os.system("rm -rf sga.tar.gz")
os.system("rm -rf bwa.tar.bz2")
os.system("rm -rf bwa-0.7.5a")
if not os.path.exists("./Utilities/cpp%s%s-%s%sedena"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
edena = utils.getFromPath("edena", "EDENA", False)
if "edena" in packagesToInstall:
dl = 'y'
else:
print "Edena binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/EdenaV3_130110.tar.gz -o edena.tar.gz")
os.system("tar xvzf edena.tar.gz")
os.system("mv EdenaV3.130110 ./Utilities/cpp%s%s-%s%sedena"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%sedena"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("src/Makefile", addedCFlags, addedLDFlags)
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf edena.tar.gz")
if not os.path.exists("./quast"):
if "quast" in packagesToInstall:
dl = 'y'
else:
print "QUAST tool not found, optional for Validate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L http://downloads.sourceforge.net/project/quast/quast-2.2.tar.gz -o quast.tar.gz")
os.system("tar xvzf quast.tar.gz")
os.system("mv ./quast-2.2 ./quast")
os.system("rm -rf quast.tar.gz")
# since quast requires a reference, also download refseq
ftpSite = "ftp://ftp.ncbi.nih.gov/genomes/"
file = "all.fna.tar.gz"
if not os.path.exists("./Utilities/DB/refseq/") and not nodbs:
print "Downloading refseq genomes (Bacteria/%s, Viruses/%s)..."%(file,file)
print "\tThis file is large and may take time to download"
os.system("curl -L %s/archive/old_refseq/Bacteria/%s -o bacteria.tar.gz"%(ftpSite, file))
os.system("curl -L %s/Viruses/%s -o viruses.tar.gz"%(ftpSite, file))
os.system("mkdir -p ./Utilities/DB/refseq/temp")
os.system("mv bacteria.tar.gz ./Utilities/DB/refseq/temp")
os.system("mv viruses.tar.gz ./Utilities/DB/refseq/temp")
os.chdir("./Utilities/DB/refseq/temp")
os.system("tar xvzf bacteria.tar.gz")
os.system("tar xvzf viruses.tar.gz")
os.chdir("..")
print "Current directory is %s"%(os.getcwd())
for file in os.listdir("%s/temp"%(os.getcwd())):
file = "%s%stemp%s%s"%(os.getcwd(), os.sep, os.sep, file)
if os.path.isdir(file):
prefix = os.path.splitext(os.path.basename(file))[0]
os.system("cat %s/*.fna > %s.fna"%(file, prefix))
os.system("rm -rf temp")
os.chdir("%s"%(METAMOS_ROOT))
if not os.path.exists("./Utilities/cpp%s%s-%s%sfreebayes"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "freebayes" in packagesToInstall:
dl = 'y'
else:
print "FreeBayes tool not found, optional for Validate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("git clone --recursive git://github.com/ekg/freebayes.git freebayes")
os.system("mv ./freebayes ./Utilities/cpp/%s%s-%s%sfreebayes"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%sfreebayes"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("src/makefile", addedCFlags, addedLDFlags)
# dont set static building libs on OSX, sseems to cause compile issues for jellyfish
os.environ["CFLAGS"] = oldCFlags
os.environ["CPPFLAGS"] = oldCPPFlags
os.environ["CXXFLAGS"] = oldCXXFlags
os.environ["LDFLAGS"] = oldLDFlags
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
if OSTYPE == "Darwin":
# reset env variables again
addEnvironmentVar("CFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CPPFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CXXFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("LDFLAGS", " %s "%(addedLDFlags))
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
if not os.path.exists("./Utilities/cpp%s%s-%s%scgal"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "cgal" in packagesToInstall:
dl = 'y'
else:
print "CGAL tool not found, optional for Validate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L http://bio.math.berkeley.edu/cgal/cgal-0.9.6-beta.tar -o cgal.tar")
os.system("tar xvf cgal.tar")
os.system("mv cgal-0.9.6-beta ./Utilities/cpp/%s%s-%s%scgal"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%scgal"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("makefile", addedCFlags, addedLDFlags, True)
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf cgal.tar")
if not os.path.exists("./Utilities/cpp%s%s-%s%sREAPR"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "reapr" in packagesToInstall:
dl = 'y'
else:
print "REAPR tool not found, optional for Validate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/Reapr_1.0.16.tar.gz -o reapr.tar.gz")
os.system("tar xvzf reapr.tar.gz")
os.system("mv Reapr_1.0.16 ./Utilities/cpp/%s%s-%s%sREAPR"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
# find cmake we installed anyway
if not os.path.exists("./Utilities/cpp%s%s-%s%scmake"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
cmake = utils.getFromPath("cmake", "CMAKE", False) + os.sep + "cmake"
else:
cmake="%s/Utilities/cpp%s%s-%s%scmake/bin/cmake"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep)
filespec = utils.getCommandOutput("perl -MFile::Spec::Link -e 0 && echo $?", True)
if filespec == "":
os.system("curl -L http://search.cpan.org/CPAN/authors/id/R/RM/RMBARKER/File-Copy-Link-0.113.tar.gz -o file.tar.gz")
os.system("tar xvzf file.tar.gz")
os.chdir("File-Copy-Link-0.113")
os.system("perl Makefile.PL PREFIX=`pwd`/build")
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
pathToCopy = utils.getCommandOutput("find File-Copy-Link-0.113/build -type d -name \"File\" |grep -v auto", False)
copyPerlLib(pathToCopy, "./Utilities/cpp%s%s-%s%sREAPR/lib/"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("rm -rf file.tar.gz")
os.system("rm -rf File-Copy-Link-0.113")
libUpdate = "%s/Utilities/cpp%s%s-%s%sREAPR/lib/"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep)
if "PERL5LIB" in os.environ:
libUpdate = "%s%s%s"%(os.environ["PERL5LIB"], os.pathsep, libUpdate)
os.environ["PERL5LIB"]=libUpdate
if OSTYPE == "Darwin":
os.chdir("./Utilities/cpp/%s%s-%s%sREAPR/third_party/snpomatic/src"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("cp snpomatic.h snpomatic.original")
os.system("cat snpomatic.original |awk '{if (match($0, \"#include <algorithm>\")) { print $0; print \"#define ulong u_long\"; } else { print $0} }' > snpomatic.h")
os.chdir("%s"%(METAMOS_ROOT))
# also need smalt, the reapr distro comes with linux 64 bit only
os.chdir("./Utilities/cpp/%s%s-%s%sREAPR/third_party"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("tabix/Makefile", addedCFlags, addedLDFlags)
updateMakeFileForDarwin("snpomatic/Makefile", addedCFlags, addedLDFlags)
os.system("curl -L http://sourceforge.net/projects/smalt/files/smalt-0.7.5.tar.gz -o smalt.tar.gz")
os.system("tar xvzf smalt.tar.gz")
os.chdir("./smalt-0.7.5")
os.system("./configure --prefix=`pwd`/build")
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make install")
os.chdir("..")
os.system("rm smalt_x86_64")
os.system("rm -rf smalt.tar.gz")
os.system("ln -s smalt-0.7.5/build/bin/smalt smalt_x86_64")
os.chdir("%s"%(METAMOS_ROOT))
os.chdir("./Utilities/cpp/%s%s-%s%sREAPR"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
# samtools which reapr includes uses curses lib which is optional so disable it if not found
os.system("echo \"#include <curses.h>\" > .test.h")
HAVE_CURSES=utils.getCommandOutput("gcc .test.h && echo $?", True)
if HAVE_CURSES == "":
os.chdir("third_party/samtools")
os.system("mv Makefile Makefile.original")
os.system("cat Makefile.original | sed s/\-lcurses//g |sed s/\-D_CURSES_LIB=1//g > Makefile")
os.chdir("../../")
# reapr comes with its own cmake which has issues building on recent gcc
# kill it it and use our own
os.system("cp install.sh install.sh.orig")
testIn = open("install.sh.orig", 'r')
testOut = open("install.sh", 'w')
isSkip = 0;
for line in testIn.xreadlines():
if "cmake/bin/cmake" in line:
testOut.write("%s ..\n"%(cmake))
elif "cd cmake" in line:
# skip some lines
isSkip = 3
elif isSkip > 0:
isSkip -= 1
else:
testOut.write(line.strip() + "\n")
testIn.close()
testOut.close()
os.system("export CC=`which gcc` && sh install.sh force")
os.system("chmod ugo+rx third_party/smalt_x86_64")
os.chdir("%s"%(METAMOS_ROOT))
if os.path.exists("./Utilities/cpp%s%s-%s%sREAPR/lib"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
os.chdir("./Utilities/cpp%s%s-%s%sREAPR/"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("cp reapr reapr.original")
os.system("cat reapr.original |awk '{if (match($0, \"use strict\")) { print \"use lib \\\"%s/Utilities/cpp%s%s-%s%sREAPR/lib\\\";\"; print $0; } else { print $0}}' > reapr"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("%s"%(METAMOS_ROOT))
os.chdir("./Utilities/cpp%s%s-%s%sREAPR/src"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
# fix samtools link
os.system("rm samtools")
os.system("cp ../third_party/samtools/samtools ./")
# REAPR has a bug where fasta headers with commas are not properly fixed, patch the bug
os.system("cp task_facheck.pl task_facheck.pl.original")
os.system("cat task_facheck.pl.original |awk -v quote=\"'\" '{if (match($0, \"new_id =~\")) { print \" $new_id =~ s/[;\"quote\"|:,\\\\+\\\\-\\\\s\\\\(\\\\)\\\\{\\\\}\\\\[\\\\]]/_/g;\"; } else { print $0}}' > task_facheck.pl")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf reapr.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%sFRCbam"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "frcbam" in packagesToInstall:
dl = 'y'
else:
print "FRCbam tool not found, optional for Validate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/FRC_align-master.zip -o frcbam.zip")
os.system("unzip frcbam.zip")
os.system("mv FRC_align-3398ca469b2077d6672b85317eee6fea171b6a27 ./Utilities/cpp/%s%s-%s%sFRCbam"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%sFRCbam/src/samtools"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
# samtools which frcbam includes uses curses lib which is optional so disable it if not found
os.system("echo \"#include <curses.h>\" > .test.h")
HAVE_CURSES=utils.getCommandOutput("gcc .test.h && echo $?", True)
if HAVE_CURSES == "":
os.system("mv Makefile Makefile.original")
os.system("cat Makefile.original | sed s/\-lcurses//g |sed s/\-D_CURSES_LIB=1//g > Makefile")
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make")
os.chdir("%s/Utilities/cpp/%s%s-%s%sFRCbam"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep))
boostFlags = ""
if os.path.exists("/opt/local/lib/libboost_system-mt.a"):
os.environ["LDFLAGS"]="-L/opt/local/lib -lboost_system-mt"
elif os.path.exists("/opt/local/lib/libboost_system.a"):
os.environ["LDFLAGS"]="-L/opt/local/lib -lboost_system"
elif os.path.exists("/usr/lib/libboost_system-mt.a"):
os.environ["LDFLAGS"]="-L/usr/lib -lboost_system-mt"
elif os.path.exists("/usr/lib/libboost_system.a"):
os.environ["LDFLAGS"]="-L/usr/lib -lboost_system"
else:
# install boost ourselves
os.system("curl -L http://sourceforge.net/projects/boost/files/boost/1.54.0/boost_1_54_0.tar.gz -o boost.tar.gz")
os.system("tar xvzf boost.tar.gz")
os.chdir("boost_1_54_0")
os.system("sh bootstrap.sh")
os.system("./b2 install --prefix=`pwd`/build threading=multi")
ldflags = "-L%s/build/lib -lboost_system"%(os.getcwd())
if os.path.exists("%s/build/lib/libboost_system-mt.a"%(os.getcwd())):
ldflags = "-L%s/build/lib -lboost_system-mt"%(os.getcwd())
os.environ["LDFLAGS"]=ldflags
try:
os.environ["LD_LIBRARY_PATH"] = os.environ["LD_LIBRARY_PATH"] + os.pathsep + "%s/build/lib"%(os.getcwd())
except KeyError:
os.environ["LD_LIBRARY_PATH"] = "%s/build/lib"%(os.getcwd())
boostFlags = "--with-boost=%s/build/ --disable-shared --enable-static-boost --enable-static-FRC"%(os.getcwd())
os.chdir("..")
os.system("rm -rf boost.tar.gz")
os.system("./configure --prefix=%s/Utilities/cpp/%s%s-%s%sFRCbam/ %s"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep, boostFlags))
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make install")
if boostFlags != "":
os.system("cp boost_1_54_0/build/lib/* ./bin")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf frcbam.zip")
if not os.path.exists("./Utilities/cpp/%s%s-%s%sALE"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "ale" in packagesToInstall:
dl = 'y'
else:
print "ALE tool not found, optional for Validate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/ale.tar.gz -o ale.tar.gz")
os.system("tar xvzf ale.tar.gz")
os.system("mv ALE ./Utilities/cpp/%s%s-%s%sALE"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%sALE/src"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("makefile", addedCFlags, addedLDFlags)
os.system("make all")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf ale.tar.gz")
if "deprecated" in enabledWorkflows or manual:
if not os.path.exists("./Utilities/glimmer-mg"):
if "glimmer-mg" in packagesToInstall:
dl = 'y'
else:
print "Glimmer-MG not found, optional for FindORFS step. Caution, this will take approx. 24 hours to complete, including Phymm download & install. download & install now?"
dl = raw_input("Enter Y/N: ")
if (dl == 'y' or dl == 'Y') and not nodbs:
archive = "glimmer-mg-0.3.1.tar.gz"
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/%s -o %s" %(archive, archive))
os.system("tar -C ./Utilities/ -xvf %s" % archive)
os.system("rm %s"%archive)
os.system("python ./Utilities/glimmer-mg/install_glimmer.py")
# should check for success of installation
workflow.updateSupportedWorkflows(enabledWorkflows)
os.environ["CFLAGS"] = oldCFlags
os.environ["CPPFLAGS"] = oldCPPFlags
os.environ["CXXFLAGS"] = oldCXXFlags
os.environ["LDFLAGS"] = oldLDFlags
sys.path.append(METAMOS_ROOT + os.sep + "Utilities" + os.sep + "python")
from get_setuptools import use_setuptools
use_setuptools()
print "Run setup.py.."
os.system("python setup.py install_scripts --install-dir=`pwd` build_ext")
#print "Compile & optimize"
#distutils.util.byte_compile(['./runPipeline.py'],optimize=2,force=True)
#os.system("chmod a+wrx runPipeline.pyo")
os.system("mv runPipeline.py runPipeline")
os.system("mv initPipeline.py initPipeline")
#remove imports from pth file, if exists
nf = []
try:
dir1 = utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib"+os.sep+"python"
if not os.path.exists(dir1+os.sep+"easy-install.pth"):
dir1 = utils.INITIAL_UTILS+os.sep+"python"+os.sep+"lib64"+os.sep+"python"
nf = open(dir1+os.sep+"easy-install.pth",'r')
ndata = []
for line in nf.xreadlines():
if "import" in line:
continue
ndata.append(line)
nf.close()
nfo = open(dir1+os.sep+"easy-install.pth",'w')
for line in ndata:
nfo.write(line)
nfo.close()
except IOError:
pass
validate_install = 0
if validate_install:
import check_install
rt = check_install.validate_dir(METAMOS_ROOT,'required_file_list.txt')
if rt == -1:
print "MetAMOS not properly installed, please reinstall or contact development team for assistance"
sys.exit(1)
```
#### File: metAMOS/src/abundance.py
```python
import os, sys, string, time, BaseHTTPServer, getopt, re, subprocess, webbrowser
from operator import itemgetter
from utils import *
from findscforfs import FindScaffoldORFS
sys.path.append(INITIAL_UTILS)
from ruffus import *
_readlibs = []
_skipsteps = []
_forcesteps = []
_cls = None
_settings = Settings()
def init(reads, skipsteps, forcesteps, cls):
global _readlibs
global _skipsteps
global _forcesteps
global _cls
_readlibs = reads
_skipsteps = skipsteps
_forcesteps = forcesteps
_cls = cls
def parse_metaphyler(giMapping, toTranslate, output):
giDictionary = {};
try:
GIs = open(giMapping, 'r')
except IOError as e:
return
for line in GIs:
line = line.replace("\n","")
splitLine = line.split("\t")
giDictionary[splitLine[0]] = splitLine[1]
GIs.close();
try:
GIs = open(toTranslate, 'r')
except IOError as e:
print "Exception opening file %s"%(e)
return
outf = open(output, 'w')
for line in GIs:
line = line.replace("\n","")
splitLine = line.split("\t")
if splitLine[1] in giDictionary:
outf.write(line.replace(splitLine[1], giDictionary[splitLine[1]]) + "\n")
GIs.close()
outf.close()
@follows(FindScaffoldORFS)
@posttask(touch_file("%s/Logs/abundance.ok"%(_settings.rundir)))
@files("%s/Assemble/out/%s.asm.contig"%(_settings.rundir,_settings.PREFIX),"%s/Abundance/out/%s.taxprof.pct.txt"%(_settings.rundir,_settings.PREFIX))
def Abundance(input,output):
if "Abundance" not in _forcesteps and ("FindORFS" in _skipsteps or "Abundance" in _skipsteps):
# can this be done automatically by ruffus pipeline?
run_process(_settings, "touch %s/Logs/abundance.skip"%(_settings.rundir), "Abundance")
run_process(_settings, "touch %s/Abundance/out/%s.taxprof.pct.txt"%(_settings.rundir, _settings.PREFIX), "Abundance")
run_process(_settings, "touch %s/Abundance/out/%s.classify.txt"%(_settings.rundir, _settings.PREFIX), "Abundance");
return 0
blastfile = _settings.PREFIX+".blastx"
blastc = _settings.BLAST + os.sep + "blastall"
formatc = _settings.BLAST + os.sep + "formatdb"
run_process(_settings, "ln -s %s/markers.pfasta %s/Abundance/out/markers.pfasta"%(_settings.DB_DIR, _settings.rundir), "Abundance")
run_process(_settings, "%s -p T -i %s/Abundance/out/markers.pfasta"%(formatc,_settings.rundir),"Abundance")
#update to MetaPhyler 1.25
run_process(_settings, "%s -p blastp -i %s/FindORFS/out/%s.faa -d %s/Abundance/out/markers.pfasta -m 8 -b 10 -v 10 -a %s -o %s/Abundance/out/%s.blastp"%(blastc, _settings.rundir,_settings.PREFIX,_settings.rundir,_settings.threads,_settings.rundir,_settings.PREFIX),"Abundance")
run_process(_settings, "perl %s/perl/metaphyler_contigs.pl %s/Abundance/out/%s.blastp %s %s/FindORFS/out/%s.gene.cvg %s/Abundance/out %s"%(_settings.METAMOS_UTILS,_settings.rundir,_settings.PREFIX,_settings.PREFIX,_settings.rundir,_settings.PREFIX,_settings.rundir,_settings.METAMOS_UTILS),"Abundance")
#run_process(_settings, "./installMetaphyler.pl")
#run_process(_settings, "$Bin/metaphylerClassify $Bin/markers/markers.$blast.classifier $Bin/markers/markers.taxonomy $prefix.$blast > $prefix.classification")
# finally add the GI numbers to the results where we can
parse_metaphyler("%s/markers.toGI.txt"%(_settings.DB_DIR), "%s/Abundance/out/%s.blastp"%(_settings.rundir, _settings.PREFIX), "%s/Abundance/out/%s.gi.blastp"%(_settings.rundir, _settings.PREFIX))
# generate Krona output
importMetaPhyler = "%s%sperl%sImportMetaPhyler.pl"%(_settings.METAMOS_UTILS, os.sep, os.sep)
if not os.path.exists(importMetaPhyler):
print "Error: Krona importer for MetaPhyler not found in %s. Please check your path and try again.\n"%(importMetaPhyler)
raise(JobSignalledBreak)
run_process(_settings, "perl %s %s -c -v -i -p %s/Abundance/out/%s.taxprof.pct.txt:%s"%(importMetaPhyler,"-l" if _settings.local_krona else "",_settings.rundir,_settings.PREFIX, _settings.taxa_level),"Abundance")
if _cls == 'metaphyler' or _cls == None:
print "!!No classification selected, using MetaPhyler for taxonomic composition classification\n"
run_process(_settings, "cp %s/Abundance/out/%s.gi.blastp %s/Postprocess/in/%s.hits"%(_settings.rundir, _settings.PREFIX,_settings.rundir,_settings.PREFIX),"Abundance")
```
#### File: metAMOS/src/calcdist.py
```python
B77;10003;0c#!python
import os, sys, string, time, BaseHTTPServer, getopt, re, subprocess, webbrowser
from operator import itemgetter
from utils import *
from preprocess import Preprocess
from assemble import Assemble
sys.path.append(INITIAL_UTILS)
from ruffus import *
_readlibs = []
_skipsteps = []
_settings = Settings()
_asm = None
_mapper = "bowtie"
def init(reads, skipsteps, asm,mapper):
global _readlibs
global _asm
global _skipsteps
_mapper = mapper
_readlibs = reads
_skipsteps = skipsteps
_asm = asm
@files("%s/Assemble/out/%s.bout"%(_settings.rundir,_settings.PREFIX))
#@posttask(create_symlink,touch_file("completed.flag"))
@follows(MapReads)
def CalcDist(input,output):
if "CalcDist" in _skipsteps or "calcdist" in _skipsteps:
return 0
#given read pairs mapped to contigs, calc insert length
```
#### File: metAMOS/src/fannotate.py
```python
import os, sys, math, string, time, BaseHTTPServer, getopt, re, subprocess, webbrowser
from operator import itemgetter
from utils import *
from preprocess import Preprocess
from annotate import Annotate
sys.path.append(INITIAL_UTILS)
from ruffus import *
import pysam
_skipsteps = []
_settings = Settings()
def init(skipsteps):
global _skipsteps
_skipsteps = skipsteps
@follows(Annotate)
@posttask(touch_file("%s/Logs/functionalannotation.ok"%(_settings.rundir)))
@files("%s/FindORFS/out/%s.faa"%(_settings.rundir,_settings.PREFIX),["%s/FunctionalAnnotation/out/blast.out"%(_settings.rundir),"%s/FunctionalAnnotation/out/krona.ec.input"%(_settings.rundir)])
def FunctionalAnnotation(input,output):
if "FunctionalAnnotation" in _skipsteps:
run_process(_settings, "touch %s/Logs/functionalannotation.skip"%(_settings.rundir), "FunctionalAnnotation")
run_process(_settings, "touch %s/FunctionalAnnotation/out/blast.out"%(_settings.rundir), "FunctionalAnnotation")
return 0
# uniprot_sprot_enz_set
if os.path.exists("%s/uniprot_sprot.fasta"%(_settings.BLASTDB_DIR)):
run_process(_settings,"%s/blastall -p blastp -i %s/FindORFS/out/proba.faa -d %s/uniprot_sprot.fasta -a %s -e 0.001 -m 8 -b 1 > %s/FunctionalAnnotation/out/blast.out"%(_settings.BLAST,_settings.rundir,_settings.BLASTDB_DIR,_settings.threads,_settings.rundir),"FunctionalAnnotation")
#run_process(_settings,"%s/blastall -p blastx -a %d -m 8 -b 1 -e 1e-2 -i %s -d %s/perl/metaphyler/test/test.ref.protein > %s/Annotate/out/%s.query.blastx"%(_settings.BLAST,_settings.threads,orfFA,_settings.METAMOS_UTILS,_settings.rundir,_settings.PREFIX))
#create index of EC codes
eclines = []
if os.path.exists("%s/uniprot_sprot_enz_set"%(_settings.BLASTDB_DIR)):
ecdata = open("%s/uniprot_sprot_enz_set"%(_settings.BLASTDB_DIR),'r')
eclines = ecdata.readlines()
ecdict = {}
for line in eclines:
line = line.replace("\n","")
data = line.split(" ")
#print data
data2 = []
for item in data:
if len(item) <= 1:
continue
else:
data2.append(item)
seqid = data2[0]
ecid = data2[-1]
ecdict[seqid] = ecid
blastout = ""
blastdict = {}
#process blast output
if os.path.exists("%s/FunctionalAnnotation/out/blast.out"%(_settings.rundir)):
blastout = open("%s/FunctionalAnnotation/out/blast.out"%(_settings.rundir),'r')
else:
print "blastall in FunctionalAnnotation failed.."
run_process(_settings, "touch %s/FunctionalAnnotation/out/blast.out"%(_settings.rundir), "FunctionalAnnotation")
return 0
blastdata = blastout.readlines()
foutput = open("%s/FunctionalAnnotation/out/krona.ec.input"%(_settings.rundir),'w')
for line in blastdata:
line = line.replace("\n","")
items = line.split("\t")
if len(items) < 10:
continue
seqid = items[1]
pid = float(items[2])
hitlen = float(items[3])
evalue = items[10]
if pid > 80 and hitlen > 50:
try:
foutput.write("%s\t%s\t%s\n"%(seqid,ecdict[seqid],evalue))
except KeyError:
continue
foutput.close()
#for top hit for each seq, report id, e-vlue and EC value
#create krona plot
run_process(_settings,"%s/KronaTools/bin/ktImportEC %s %s/FunctionalAnnotation/out/krona.ec.input"%(_settings.METAMOSDIR,"-l" if _settings.local_krona else "",_settings.rundir), "FunctionalAnnotation")
```
#### File: metAMOS/src/multialign.py
```python
import os, sys, string, time, BaseHTTPServer, getopt, re, subprocess, webbrowser
from operator import itemgetter
from utils import *
from assemble import Assemble
from findscforfs import FindScaffoldORFS
sys.path.append(INITIAL_UTILS)
from ruffus import *
_readlibs = []
_skipsteps = []
_forcesteps = []
_aln = None
_settings = Settings()
_refgenomes = ""
def init(reads, skipsteps, forcesteps, aln, refgenomes):
global _readlibs
global _skipsteps
global _forcesteps
global _refgenomes
global _aln
_readlibs = reads
_skipsteps = skipsteps
_forcesteps = forcesteps
_aln = aln
_refgenomes = refgenomes
@follows(Assemble)
@posttask(touch_file("%s/Logs/multialign.ok"%(_settings.rundir)))
@files("%s/Assemble/out/%s.asm.contig"%(_settings.rundir,_settings.PREFIX),"%s/MultiAlign/out/%s.tree"%(_settings.rundir,_settings.PREFIX))
def MultiAlign(input,output):
if "MultiAlign" in _skipsteps:
# can this be done automatically by ruffus pipeline?
run_process(_settings, "touch %s/Logs/multialign.skip"%(_settings.rundir), "MultiAlign")
run_process(_settings, "touch %s/MultiAlign/out/%s.tree"%(_settings.rundir, _settings.PREFIX), "MultiAlign")
return 0
#run_process(_settings, "%s -p blastp -i %s/FindORFS/out/%s.faa -d %s/Abundance/out/markers.pfasta -m 8 -b 10 -v 10 -a %s -o %s/Abundance/out/%s.blastp"%(blastc, _settings.rundir,_settings.PREFIX,_settings.rundir,_settings.threads,_settings.rundir,_settings.PREFIX),"Abundance")
pass
```
#### File: metAMOS/src/propagate.py
```python
import os, sys, string, time, BaseHTTPServer, getopt, re, subprocess, webbrowser
from operator import itemgetter
from utils import *
from annotate import Annotate
from scaffold import Scaffold
from findscforfs import FindScaffoldORFS
from abundance import Abundance
sys.path.append(INITIAL_UTILS)
from ruffus import *
from create_mapping import *
_readlibs = []
_skipsteps = []
_cls = None
_mated = False
_settings = Settings()
def init(reads, skipsteps, cls):
global _readlibs
global _skipsteps
global _cls
global _mated
_readlibs = reads
_skipsteps = skipsteps
_cls = cls
for lib in _readlibs:
if lib.mated == True:
_mated = True
break
@follows(Abundance)
@posttask(touch_file("%s/Logs/propagate.ok"%(_settings.rundir)))
@files("%s/Annotate/out/%s.annots"%(_settings.rundir, _settings.PREFIX),"%s/Logs/propagate.ok"%(_settings.rundir))
def Propagate(input,output):
if _cls == "metaphyler":
#run_process(_settings, "python %s/python/create_mapping.py %s/class_key.tab %s/Abundance/out/%s.classify.txt %s/Propagate/in/%s.annots"%(_settings.METAMOS_UTILS,_settings.DB_DIR,_settings.rundir,_settings.PREFIX,_settings.rundir,_settings.PREFIX),"Propagate")
create_mapping("%s/class_key.tab"%(_settings.DB_DIR),"%s/Abundance/out/%s.classify.txt"%(_settings.rundir,_settings.PREFIX),"%s/Propagate/in/%s.annots"%(_settings.rundir,_settings.PREFIX))
else:
run_process(_settings, "ln %s/Annotate/out/%s.annots %s/Propagate/in/%s.annots"%(_settings.rundir,_settings.PREFIX,_settings.rundir,_settings.PREFIX),"Propagate")
# some output from the classifiers (for example PhyloSift) outputs multiple contigs with the same classification on one line
# the line looks like ctg1","ctg2 class so we don't know which is right and we skip it in the classification below
if "Scaffold" in _skipsteps or "Propagate" in _skipsteps or "Assemble" in _skipsteps:
run_process(_settings, "cat %s/Propagate/in/%s.annots | grep -v \"\\\"\" | grep -v contigID > %s/Propagate/in/%s.clusters"%(_settings.rundir,_settings.PREFIX,_settings.rundir,_settings.PREFIX),"Propagate")
else:
run_process(_settings, "cat %s/Propagate/in/%s.annots | grep -v \"\\\"\" | grep -v contigID |sed s/utg//g |sed s/ctg//g > %s/Propagate/in/%s.clusters"%(_settings.rundir,_settings.PREFIX,_settings.rundir,_settings.PREFIX),"Propagate")
numMates = 0
if os.path.exists("%s/Assemble/out/%s.graph.cte"%(_settings.rundir, _settings.PREFIX)):
p = subprocess.Popen("cat %s/Assemble/out/%s.graph.cte |grep \"{CTL\" |wc -l"%(_settings.rundir, _settings.PREFIX), stdin=None, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(checkStdout, checkStderr) = p.communicate()
numMates = int(checkStdout.strip())
if "Propagate" in _skipsteps or "propagate" in _skipsteps or "Assemble" in _skipsteps or _cls == None or (_mated == False and numMates == 0):
run_process(_settings, "touch %s/Logs/propagate.skip"%(_settings.rundir), "Propagate")
run_process(_settings, "ln %s/Propagate/in/%s.clusters %s/Propagate/out/%s.clusters"%(_settings.rundir, _settings.PREFIX, _settings.rundir, _settings.PREFIX), "Propagate")
run_process(_settings, "ln %s/Annotate/out/%s.reads.annots %s/Propagate/out/%s.reads.clusters"%(_settings.rundir, _settings.PREFIX, _settings.rundir, _settings.PREFIX), "Propagate")
return
else:
run_process(_settings, "%s/FilterEdgesByCluster -b %s/Scaffold/in/%s.bnk -clusters %s/Propagate/in/%s.clusters -noRemoveEdges > %s/Propagate/out/%s.clusters"%(_settings.AMOS,_settings.rundir,_settings.PREFIX,_settings.rundir,_settings.PREFIX,_settings.rundir,_settings.PREFIX),"Propagate")
# here we also propagate to the reads within contigs
readctg_dict = {}
for lib in _readlibs:
ctgfile = open("%s/Assemble/out/%s.lib%dcontig.reads"%(_settings.rundir, _settings.PREFIX, lib.id), 'r')
for line in ctgfile.xreadlines():
line = line.replace("\n","")
read, ctg = line.split()
if ctg in readctg_dict:
readctg_dict[ctg].append(read)
else:
readctg_dict[ctg] = [read,]
ctgfile.close()
read_annots = {}
known_annots = {}
annotsfile = open("%s/Propagate/out/%s.clusters"%(_settings.rundir, _settings.PREFIX), 'r')
for line in annotsfile.xreadlines():
line = line.replace("\n", "")
ctg, annot = line.split()
known_annots[ctg] = annot
annotsfile.close()
annotsfile = open("%s/Propagate/in/%s.annots"%(_settings.rundir, _settings.PREFIX), "r")
for line in annotsfile.xreadlines():
line = line.replace("\n", "")
ctg, annot = line.split()
if ctg not in readctg_dict.keys() and ctg not in known_annots.keys():
read_annots[ctg] = annot
annotsfile.close()
if "Propagate" not in _skipsteps:
annotsfile = open("%s/Propagate/out/%s.clusters"%(_settings.rundir, _settings.PREFIX), 'a')
for ctg in read_annots:
annotsfile.write("%s\t%s\n"%(ctg, read_annots[ctg]))
annotsfile.close()
annotsfile = open("%s/Propagate/out/%s.clusters"%(_settings.rundir, _settings.PREFIX), 'r')
annotreads = open("%s/Propagate/out/%s.reads.clusters"%(_settings.rundir, _settings.PREFIX), 'w')
for line in annotsfile.xreadlines():
line = line.replace("\n", "")
ctg, annot = line.split()
if ctg in readctg_dict:
for x in readctg_dict[ctg]:
annotreads.write("%s\t%s\n"%(x, annot))
else:
annotreads.write("%s\t%s\n"%(ctg, annot))
annotsfile.close()
annotreads.close()
readctg_dict.clear()
```
#### File: Utilities/python/extract_mates_from_fasta.py
```python
import string, sys
#if __name__ == "__main__":
def extract_mates_from_fasta(infile):
f1 = open(infile,'r')
f2 = open("%s.mates"%(infile),'w')
first = 1
second = 0
firstmate = ""
linecnt = 0
for line in f1.xreadlines():
#if linecnt % 2 == 0:#">" not in line:
if ">" in line:
line = line.replace(">","")
line = line.replace("\n","")
data = line.split(" ")
mate= data[0]
mate = mate.strip()
else:
linecnt +=1
continue
if first:
firstmate = mate
first = 0
second = 1
elif second:
f2.write(firstmate+"\t"+mate+"\n")
f2.flush()
first = 1
second = 0
else:
linecnt+=1
continue
linecnt +=1
f1.close()
f2.close()
```
#### File: Utilities/python/extract_mates_from_fastq.py
```python
import string, sys
#if __name__ == "__main__":
def extract_mates_from_fastq(infile):
f1 = open(infile,'r')
f2 = open("%s.mates"%(infile),'w')
while 1:
s1 = f1.readline()
s2 = f1.readline()
s3 = f1.readline()
s4 = f1.readline()
l1 = f1.readline()
l2 = f1.readline()
l3 = f1.readline()
l4 = f1.readline()
if l4 == "":
break
m1 = s1.split(" ")[0]
m2 = l1.split(" ")[0]
m1 = m1.replace("\n","")
m2 = m2.replace("\n","")
f2.write(m1+"\t"+m2+"\n")
f2.flush()
f1.close()
f2.close()
```
#### File: Utilities/python/get_classify_stats.py
```python
import sys, os, string
ROOT = os.path.dirname(os.path.abspath(__file__))
#sys.path.insert(0, os.path.join(ROOT, '..'))
#sys.path.append(ROOT+"/lib")
import markup, datetime
def get_classify_stats(ocf,cf,ck,out_dir,outf,outfo,taxa_level):
contigs_by_class = { }
origContigsByClass = { }
origClassifiedCount = 0
classifiedCount = 0
id_class = { }
id_class["0"] = "UNKNOWN"
orig_class_file = open(ocf)
class_file = open(cf)
class_key = open(ck)
#pass outdir as argument
out = open(out_dir + os.sep + outf, 'w')
orig_out = open(out_dir + os.sep + outfo, 'w')
# parse in key file
for line in class_key:
line = line.strip()
fields = line.split("\t")
# f1 is id, f2 is class name
if len(fields) != 2:
print "Error in file format\n"
else:
id_class[fields[0]] = fields[1]
# parse original file to identity ambiguous assignment (which is one more than max previous ID)
maxClassID = 0;
for line in orig_class_file:
line = line.strip()
fields = line.split()
# f1 is contig, f2 is class
if len(fields) != 2:
print "Error in file format\n"
elif maxClassID < int(fields[1]):
maxClassID = int(fields[1])
if origContigsByClass.has_key(fields[1]):
origContigsByClass[fields[1]]+=1
else:
origContigsByClass[fields[1]] = 1
origClassifiedCount += 1
id_class[str(maxClassID+1)] = "AMBIGUOUS"
# parse contig class file
for line in class_file:
line = line.strip()
fields = line.split()
# f1 is contig, f2 is class
if len(fields) != 2:
print "Error in file format\n"
elif contigs_by_class.has_key(fields[1]):
contigs_by_class[fields[1]] += 1
else:
contigs_by_class[fields[1]] = 1
if fields[1] > 0:
classifiedCount += 1
# output stats
# todo: add info on ORFs and read counts
summary = markup.page()
summary.init(bodyattrs={'style':"margin:0px"})
summary.p("Originally classified contigs:")
summary.table(border="1")
for key in origContigsByClass:
try:
class_name = id_class[key]
except KeyError:
continue
summary.tr()
summary.add("<td align=\"left\">%s</td><td align=\"right\">%d</td><td align=\"right\">%3.2f%%</td>"%(class_name, origContigsByClass[key], origContigsByClass[key]/float(origClassifiedCount)*100))
summary.tr.close()
summary.tr()
summary.add("<td align=\"left\"Total classified:</td><td align=\"right\">%d</td>"%(origClassifiedCount))
summary.tr.close()
summary.table.close()
classify = markup.page()
classify.init(bodyattrs={'style':"margin:0px"})
classify.p("Classified contigs:")
classify.table(border="1")
for key in contigs_by_class:
try:
class_name = id_class[key]
except KeyError:
continue
classify.tr()
classify.add("<td align=\"left\"><a target=\"_blank\" href=\"../%s.classified/%s/\">%s</a></td><td align=\"right\">%d</td><td align=\"right\">%3.2f%%</td>"%(taxa_level, class_name, class_name, contigs_by_class[key], contigs_by_class[key]/float(classifiedCount)*100))
classify.tr.close()
classify.tr()
classify.add("<td align=\"left\"Total classified:</td><td align=\"right\">%d</td>"%(classifiedCount))
classify.tr.close()
classify.table.close()
additional = classifiedCount - origClassifiedCount
if additional >= 0:
summary.p("Total additional classified contigs: %d"%(additional))
else:
summary.p("Total contigs classified as unknown from known: %d"%(abs(additional)))
summary.p.close();
orig_out.write(summary.__str__())
out.write(classify.__str__())
orig_out.close()
out.close()
```
#### File: Utilities/python/getContigRepeats.py
```python
import sys, string, os
def concatContig(ctgfile):
if len(sys.argv) < 3:
print "usage: contig_file out_file"
contig_file = open(ctgfile,'r')
out_file = open(ctgfile+".merged",'w')
out_data = ""
for line in contig_file.xreadlines():
if ">" not in line:
out_data += line.replace("\n","")
width = 60
pp = 0
out_file.write(">seq\n")
while pp+60 < len(out_data):
out_file.write(out_data[pp:pp+60]+"\n")
pp +=60
out_file.write(out_data[pp:]+"\n")
out_file.close()
contig_file.close()
if __name__ == "__main__":
contig_repeats = ""
contig_file = ""
if len(sys.argv) < 3:
print "usage: getContigRepeats.py <contig_file> <out_file>"
sys.exit(1)
# contig_repeats = open("myreps.out",'w')
try:
contig_repeats = open(sys.argv[2],'w')
except IOError, errmsg:
print "Error creating output file %s "%(sys.argv[2]), errmsg
sys.exit(1)
try:
contig_file = open(sys.argv[1],'r')
except IOError, errmsg:
print "Error openinig input file %s "%(sys.argv[1]), errmsg
sys.exit(1)
contig_file.close()
contig_file = open(sys.argv[1],'r')
concatContig(sys.argv[1])
if 1:
os.system("/fs/szdevel/metAMOS/Utilities/cpp/repeatoire --minreplen=200 --z=17 --sequence=%s.merged --xmfa=%s.xmfa"%(sys.argv[1],sys.argv[1]))
# repeat_file = open(sys.argv[2],'r')
repeat_file = open(sys.argv[1]+".xmfa",'r')
ctg_dict = {}
seq_map = {}
contig_data = contig_file.read()
num_contigs = contig_data.count(">")
contig_data = contig_data.split(">")[1:]
prev_pos = 0
eid = ""
iid = 1
for contig in contig_data:
hdr,seq = contig.split("\n",1)
id = hdr.split(" ")[0]
hdr = hdr.replace(">","").replace("\n","")
start = prev_pos
clen = len(seq.replace("\n",""))
end = prev_pos+clen
ctg_dict[iid] = [start, end, seq]
i = 0
while i < clen:
seq_map[prev_pos+i] = hdr#iid
i+=1
prev_pos = end+1
iid +=1
repeat_data = repeat_file.readlines()
repfam = 1
reppos = []
clc = 1
for line in repeat_data:
if "=" in line:
repfam +=1
ctg_list = []
for copy in reppos:
try:
#print seq_map[int(copy[0])]
if seq_map[int(copy[0])] == seq_map[int(copy[1])]:
ctg_list.append(seq_map[int(copy[0])])
#ctg_list.append(seq_map[copy[1]])
except KeyError:
continue
#print ctg_list
if len(ctg_list) > 1 and ctg_list.count(ctg_list[0]) != len(ctg_list):
for item in ctg_list:
contig_repeats.write("%d:"%repfam+str(item)+"\n")
clc +=1
reppos = []
if ">" not in line:
continue
gg, info = line.split(":",1)
spos,info = info.split("-",1)
epos,info = info.split(" ",1)
orient, info = info.split(" ",1)
# print spos, epos, orient
reppos.append([spos,epos])
```
#### File: Utilities/python/NB_install.py
```python
import os
import sys
import platform
import tarfile
import urllib
import fileinput
from ftplib import FTP
# Write a file to disk obtained via FTP
class FtpWriter:
def __init__(self, file):
self.f = open(file, 'wb')
self.count = 0
def __call__(self, block):
self.f.write(block)
if self.count % 100 == 0:
print '.',
self.count += 1
def close(self):
self.f.close()
# Download bacterial and archaeal genomes in NCBI RefSeq
def DownloadGenomes(genomeFile):
bDownload = True
if os.path.exists('./' + genomeFile):
bValidResponse = False
while not bValidResponse:
response = raw_input('NCBI genome file ' + genomeFile + ' already exists. Would you like to download the latest version [Y/N]? ')
if response[0] == 'Y' or response[0] == 'y':
bDownload = True
bValidResponse = True
elif response[0] == 'N' or response[0] == 'n':
bDownload = False
bValidResponse = True
if bDownload:
ncbiFTP = 'ftp.ncbi.nih.gov'
genomeDir = '/genomes/Bacteria'
# Connect to NBCI's FTP site using an anonymous account
print 'Connecting to NCBI FTP site (' + ncbiFTP + ')...'
ftp = FTP(ncbiFTP)
print ftp.login()
print '\n'
# Change to directory containing bacterial and archaeal genomes
print 'Changing to directory ' + genomeDir
print ftp.cwd(genomeDir)
print '\n'
# Download bacterial and archaeal genomes
print 'Downloading bacterial and archaeal genomes (' + genomeFile + ')...'
print ' This file is over 3GB and may take awhile to download.'
ftpWriter = FtpWriter(genomeFile)
msg = ftp.retrbinary('RETR ' + genomeFile, ftpWriter, 32*1024*1024)
print '\n'
print msg
ftpWriter.close()
ftp.quit()
# Download NCBI taxonomy database
def DownloadTaxonomy(taxonomyDump):
ncbiFTP = 'ftp.ncbi.nih.gov'
taxonomyDir = '/pub/taxonomy'
# Connect to NBCI's FTP site using an anonymous account
print 'Connecting to NCBI FTP site (' + ncbiFTP + ')...'
ftp = FTP(ncbiFTP)
print ftp.login()
print '\n'
# Change to directory containing taxonomy files
print 'Changing to directory ' + taxonomyDir
print ftp.cwd(taxonomyDir)
print '\n'
# Download taxonomy files
print 'Downloading taxonomy database files...'
print ' It may take a few minutes to download these files.'
ftpWriter = FtpWriter(taxonomyDump)
msg = ftp.retrbinary('RETR ' + taxonomyDump, ftpWriter, 32*1024*1024)
print '\n'
print msg
ftpWriter.close()
ftp.quit()
# Decompress genome file
def DecompressGenomes(genomeFile):
tar = tarfile.open(genomeFile, 'r:gz')
tar.extractall('./ncbi_genomes/')
tar.close()
# Decompress taxonomy files
def DecompressTaxonomy(taxonomyDump):
tar = tarfile.open(taxonomyDump, 'r:gz')
tar.extractall('./taxonomy/')
tar.close()
# Get full taxonomy of all prokaryotes
def BuildTaxonomyFile():
# read taxon Id number of all contigs
print 'Extracting taxon Id from each contig...'
assessionToTaxonId = {}
accessionToSource = {}
genomeDirs = os.listdir('./ncbi_genomes/')
for dir in genomeDirs:
for filename in os.listdir('./ncbi_genomes/' + dir):
accession = filename.split('.')[0]
for line in fileinput.input(['./ncbi_genomes/' + dir + '/' + filename]):
if 'SOURCE' in line:
source = line[len('SOURCE'):].strip()
accessionToSource[accession] = source.replace('/', '_')
if '/db_xref="taxon:' in line:
taxonId = line.split(':')[1]
taxonId = int(taxonId[0:taxonId.rfind('\"')])
assessionToTaxonId[accession] = taxonId
fileinput.close()
break
print 'Number of contigs: ' + str(len(assessionToTaxonId))
# extract taxonomy of each contig
print 'Extracting taxonomy of each contig...'
nodeIdToName = {}
for line in fileinput.input(['./taxonomy/names.dmp']):
lineSplit = line.split('|')
id = int(lineSplit[0])
name = lineSplit[1].strip()
type = lineSplit[3].strip()
if type == 'scientific name':
nodeIdToName[id] = name
taxonIdToNode = {}
for line in fileinput.input(['./taxonomy/nodes.dmp']):
lineSplit = line.split('|')
taxonId = int(lineSplit[0])
parentId = int(lineSplit[1])
rank = lineSplit[2].strip()
taxonIdToNode[taxonId] = [rank, parentId]
ranks = ['strain', 'species', 'genus', 'family', 'order', 'class', 'phylum', 'superkingdom']
fout = open('taxonomy.txt', 'w')
for assession in assessionToTaxonId:
taxonId = assessionToTaxonId[assession]
source = accessionToSource[assession]
fout.write(assession + '\t')
taxonomy = ['','','','','','','','']
rankIndex = 0
while nodeIdToName[taxonId] != 'root':
node = taxonIdToNode[taxonId]
if node[0] in ranks:
while rankIndex < ranks.index(node[0]):
if rankIndex != 0:
taxonomy[rankIndex] = nodeIdToName[taxonId] + ' (' + ranks[rankIndex] + ')'
else:
taxonomy[rankIndex] = source
rankIndex += 1
taxonomy[ranks.index(node[0])] = nodeIdToName[taxonId]
rankIndex += 1
taxonId = node[1]
for r in xrange(7, -1, -1):
fout.write(taxonomy[r] + ';')
fout.write('\n')
fout.close()
# create genome-level input files
def CreateStrainSeqFiles():
# determine genome of each sequence
assessionToGenome = {}
for line in fileinput.input(['taxonomy.txt']):
lineSplit = line.split('\t')
seqId = lineSplit[0]
category = lineSplit[1].split(';')[7]
assessionToGenome[seqId] = category
# creat required directories
if not os.path.exists('./training'):
os.makedirs('./training')
os.makedirs('./training/sequences')
os.makedirs('./training/custom')
# remove any previously created models
for assession in assessionToGenome:
genome = assessionToGenome[assession]
genomeFile = genome.replace(' ', '_')
genomeFile = genomeFile.replace(':', '_')
genomeFile += '.fasta'
if os.path.exists('./training/sequences/' + genomeFile):
os.remove('./training/sequences/' + genomeFile)
# convert genbank files to fasta files
genomeDirs = os.listdir('./ncbi_genomes/')
for dir in genomeDirs:
for filename in os.listdir('./ncbi_genomes/' + dir):
fullFilename = './ncbi_genomes/' + dir + '/' + filename
# read sequence data from genbank file
data = open(fullFilename).read()
origin = data.rfind('ORIGIN')
start = data.find('1', origin)
end = data.find('//', origin)
seqLines = data[start:end].split('\n')
seq = ''
for line in seqLines:
subseq = line.split()
seq += ''.join(subseq[1:])
# write fasta file
assession = filename.split('.')[0]
genome = assessionToGenome[assession]
print assession
genomeFile = genome.replace(' ', '_')
genomeFile = genomeFile.replace(':', '_')
fout = open('./training/sequences/' + genomeFile + '.fasta', 'a')
fout.write('>' + assession + '\n')
index = 0
while index+60 < len(seq):
fout.write(seq[index:index+60] + '\n')
index += 60
fout.write(seq[index:] + '\n')
fout.close()
# create training file for genome models
trainingSet = open('./training/sequences.txt', 'w')
for filename in os.listdir('./training/sequences/'):
trainingSet.write('./training/sequences/' + filename + '\n')
trainingSet.close()
# Build Naive Bayes models
def BuildNaiveBayesModels():
# creat required directories
if not os.path.exists('./models'):
os.makedirs('./models')
os.makedirs('./models/genomes')
if not os.path.exists('./nb-temp-results'):
os.makedirs('./nb-temp-results')
# build stain-level models
if platform.system() == 'Windows':
print 'Building genome-level models...'
os.system('nb-train-windows.exe -n 10 -t ./taxonomy.txt -s ./training/sequences.txt -m ./models/genomes/')
else: # assume the system can build the executable from source
print 'Compiling nb-train...'
os.chdir('./nb-train-src')
os.system('make')
os.chdir('..')
os.system('cp ./nb-train-src/nb-train .')
print 'Compiling nb-classify...'
os.chdir('./nb-classify-src')
os.system('make')
os.chdir('..')
os.system('cp ./nb-classify-src/nb-classify .')
print 'Building genome-level models...'
os.system('./nb-train -n 10 -t ./taxonomy.txt -s ./training/sequences.txt -m ./models/genomes/')
# create model file for classifying query fragments
modelFile = open('./models/models.txt', 'w')
for line in fileinput.input(['./training/sequences.txt']):
genome = line[line.rfind('/')+1:line.rfind('.')]
modelFile.write('./models/genomes/' + genome + '.txt' + '\n')
modelFile.close()
genomeFile = 'all.gbk.tar.gz'
taxonomyDump = 'taxdump.tar.gz'
print 'This script is maintained by <NAME>, <NAME>, and <NAME> (<EMAIL>).'
print ''
print 'Changes to the NCBI FTP site or NCBI file formats may break this script.'
print 'Please contact us if this script is broken and we will try to resolve the issue.'
print '\n'
print 'Downloading bacterial and archaeal genomes from NCBI:'
DownloadGenomes(genomeFile)
print '\n----------------------------------------------------------\n'
print 'Decompressing genomes:'
DecompressGenomes(genomeFile)
print '\n----------------------------------------------------------\n'
print 'Downloading NCBI taxonomy database:'
DownloadTaxonomy(taxonomyDump)
print '\n----------------------------------------------------------\n'
print 'Decompressing taxonomy files:'
DecompressTaxonomy(taxonomyDump)
print '\n----------------------------------------------------------\n'
print 'Building taxonomy file for genomes:'
BuildTaxonomyFile()
print '\n----------------------------------------------------------\n'
print 'Creating input sequence file for each genome:'
CreateStrainSeqFiles()
print '\n----------------------------------------------------------\n'
print 'Building Naive Bayes models for each genomes:'
BuildNaiveBayesModels()
print '\n----------------------------------------------------------\n'
print 'Installation complete. '
```
#### File: Utilities/python/test_server.py
```python
import sys
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
HandlerClass = SimpleHTTPRequestHandler
ServerClass = BaseHTTPServer.HTTPServer
Protocol = "HTTP/1.0"
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('127.0.0.1', port)
HandlerClass.protocol_version = Protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
def run_while_true(server_class=BaseHTTPServer.HTTPServer,
handler_class=BaseHTTPServer.BaseHTTPRequestHandler):
"""
This assumes that keep_running() is a function of no arguments which
is tested initially and after each request. If its return value
is true, the server continues.
"""
server_address = ('', 8000)
httpd = server_class(server_address, handler_class)
while keep_running():
httpd.handle_request()
```
#### File: ruffus/test/test_filesre_split_and_combine.py
```python
from optparse import OptionParser
import sys, os
import os.path
import StringIO
import re,time
# add self to search path for testing
exe_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
sys.path.insert(0,os.path.abspath(os.path.join(exe_path,"..", "..")))
if __name__ == '__main__':
module_name = os.path.split(sys.argv[0])[1]
module_name = os.path.splitext(module_name)[0];
else:
module_name = __name__
import ruffus
parser = OptionParser(version="%%prog v1.0, ruffus v%s" % ruffus.ruffus_version.__version)
parser.add_option("-D", "--debug", dest="debug",
action="store_true", default=False,
help="Make sure output is correct and clean up.")
parser.add_option("-s", "--start_again", dest="start_again",
action="store_true", default=False,
help="Make a new 'original.fa' file to simulate having to restart "
"pipeline from scratch.")
parser.add_option("--jobs_per_task", dest="jobs_per_task",
default=50,
metavar="N",
type="int",
help="Simulates tasks with N numbers of files per task.")
parser.add_option("-t", "--target_tasks", dest="target_tasks",
action="append",
default = list(),
metavar="JOBNAME",
type="string",
help="Target task(s) of pipeline.")
parser.add_option("-f", "--forced_tasks", dest="forced_tasks",
action="append",
default = list(),
metavar="JOBNAME",
type="string",
help="Pipeline task(s) which will be included even if they are up to date.")
parser.add_option("-j", "--jobs", dest="jobs",
default=1,
metavar="jobs",
type="int",
help="Specifies the number of jobs (commands) to run simultaneously.")
parser.add_option("-v", "--verbose", dest = "verbose",
action="count", default=0,
help="Print more verbose messages for each additional verbose level.")
parser.add_option("-d", "--dependency", dest="dependency_file",
#default="simple.svg",
metavar="FILE",
type="string",
help="Print a dependency graph of the pipeline that would be executed "
"to FILE, but do not execute it.")
parser.add_option("-F", "--dependency_graph_format", dest="dependency_graph_format",
metavar="FORMAT",
type="string",
default = 'svg',
help="format of dependency graph file. Can be 'ps' (PostScript), "+
"'svg' 'svgz' (Structured Vector Graphics), " +
"'png' 'gif' (bitmap graphics) etc ")
parser.add_option("-n", "--just_print", dest="just_print",
action="store_true", default=False,
help="Print a description of the jobs that would be executed, "
"but do not execute them.")
parser.add_option("-M", "--minimal_rebuild_mode", dest="minimal_rebuild_mode",
action="store_true", default=False,
help="Rebuild a minimum of tasks necessary for the target. "
"Ignore upstream out of date tasks if intervening tasks are fine.")
parser.add_option("-K", "--no_key_legend_in_graph", dest="no_key_legend_in_graph",
action="store_true", default=False,
help="Do not print out legend and key for dependency graph.")
parser.add_option("-H", "--draw_graph_horizontally", dest="draw_horizontally",
action="store_true", default=False,
help="Draw horizontal dependency graph.")
parameters = [
]
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import StringIO
import re
import operator
import sys,os
from collections import defaultdict
import random
sys.path.append(os.path.abspath(os.path.join(exe_path,"..", "..")))
from ruffus import *
# use simplejson in place of json for python < 2.6
try:
import json
except ImportError:
import simplejson
json = simplejson
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Main logic
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# get help string
f =StringIO.StringIO()
parser.print_help(f)
helpstr = f.getvalue()
(options, remaining_args) = parser.parse_args()
tempdir = "temp_filesre_split_and_combine/"
def sleep_a_while ():
time.sleep(1)
if options.verbose:
verbose_output = sys.stderr
else:
verbose_output =open("/dev/null", "w")
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Tasks
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#
# split_fasta_file
#
@posttask(sleep_a_while)
@posttask(lambda: verbose_output.write("Split into %d files\n" % options.jobs_per_task))
@files(tempdir + "original.fa", tempdir + "files.split.success")
def split_fasta_file (input_file, success_flag):
#
# remove existing fasta files
#
import glob
filenames = sorted(glob.glob(tempdir + "files.split.*.fa"))
for f in filenames:
os.unlink(f)
import random
random.seed()
for i in range(options.jobs_per_task):
open(tempdir + "files.split.%03d.fa" % i, "w")
open(success_flag, "w")
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#
# align_sequences
#
@posttask(sleep_a_while)
@posttask(lambda: verbose_output.write("Sequences aligned\n"))
@follows(split_fasta_file)
@files_re(tempdir + "files.split.*.fa", # find all .fa files
".fa$", ".aln") # fa -> aln
def align_sequences (input_file, output_filename):
open(output_filename, "w").write("%s\n" % output_filename)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#
# percentage_identity
#
@posttask(sleep_a_while)
@posttask(lambda: verbose_output.write("%Identity calculated\n"))
@files_re(align_sequences, # find all results from align_sequences
r"(.*\.)(.+).aln$", # match file name root and substitute
r'\g<0>', # the original file
[r"\1\2.pcid", # .pcid suffix for the result
r"\1\2.pcid_success"], # .pcid_success to indicate job completed
r"\2") # extra parameter to remember the file index
def percentage_identity (input_file, output_files, split_index):
(output_filename, success_flag_filename) = output_files
open(output_filename, "w").write("%s\n" % split_index)
open(success_flag_filename, "w")
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#
# combine_results
#
@posttask(lambda: verbose_output.write("Results recombined\n"))
@posttask(sleep_a_while)
@files_re(percentage_identity, combine(r".*.pcid$"),
[tempdir + "all.combine_results",
tempdir + "all.combine_results_success"])
def combine_results (input_files, output_files):
"""
Combine all
"""
(output_filename, success_flag_filename) = output_files
out = open(output_filename, "w")
for inp, flag in input_files:
out.write(open(inp).read())
open(success_flag_filename, "w")
def start_pipeline_afresh ():
"""
Recreate directory and starting file
"""
print >>verbose_output, "Start again"
import os
os.system("rm -rf %s" % tempdir)
os.makedirs(tempdir)
open(tempdir + "original.fa", "w").close()
sleep_a_while ()
if __name__ == '__main__':
if options.start_again:
start_pipeline_afresh()
if options.just_print:
pipeline_printout(sys.stdout, options.target_tasks, options.forced_tasks,
verbose = options.verbose,
gnu_make_maximal_rebuild_mode = not options.minimal_rebuild_mode)
elif options.dependency_file:
pipeline_printout_graph ( open(options.dependency_file, "w"),
options.dependency_graph_format,
options.target_tasks,
options.forced_tasks,
draw_vertically = not options.draw_horizontally,
gnu_make_maximal_rebuild_mode = not options.minimal_rebuild_mode,
no_key_legend = options.no_key_legend_in_graph)
elif options.debug:
start_pipeline_afresh()
pipeline_run(options.target_tasks, options.forced_tasks, multiprocess = options.jobs,
logger = stderr_logger if options.verbose else black_hole_logger,
gnu_make_maximal_rebuild_mode = not options.minimal_rebuild_mode,
verbose = options.verbose)
os.system("rm -rf %s" % tempdir)
print "OK"
else:
pipeline_run(options.target_tasks, options.forced_tasks, multiprocess = options.jobs,
logger = stderr_logger if options.verbose else black_hole_logger,
gnu_make_maximal_rebuild_mode = not options.minimal_rebuild_mode,
verbose = options.verbose)
```
#### File: ruffus/test/test_tutorial7.py
```python
import sys, os
exe_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
sys.path.insert(0, os.path.abspath(os.path.join(exe_path,"..", "..")))
NUMBER_OF_RANDOMS = 10000
CHUNK_SIZE = 1000
working_dir = "temp_tutorial7/"
import time, sys, os
from ruffus import *
import random
import glob
#---------------------------------------------------------------
#
# make sure tasks take long enough to register as separate
# entries in the file system
#
def sleep_a_while ():
time.sleep(1)
#---------------------------------------------------------------
#
# Create random numbers
#
@posttask(sleep_a_while)
@follows(mkdir(working_dir))
@files(None, working_dir + "random_numbers.list")
def create_random_numbers(input_file_name, output_file_name):
f = open(output_file_name, "w")
for i in range(NUMBER_OF_RANDOMS):
f.write("%g\n" % (random.random() * 100.0))
#---------------------------------------------------------------
#
# Split initial file
#
@follows(create_random_numbers)
@posttask(sleep_a_while)
@split(working_dir + "random_numbers.list", working_dir + "*.chunks")
def step_4_split_numbers_into_chunks (input_file_name, output_files):
"""
Splits random numbers file into XXX files of CHUNK_SIZE each
"""
#
# clean up files from previous runs
#
for f in glob.glob("*.chunks"):
os.unlink(f)
#
# create new file every CHUNK_SIZE lines and
# copy each line into current file
#
output_file = None
cnt_files = 0
for i, line in enumerate(open(input_file_name)):
if i % CHUNK_SIZE == 0:
cnt_files += 1
output_file = open(working_dir + "%d.chunks" % cnt_files, "w")
output_file.write(line)
#---------------------------------------------------------------
#
# Calculate sum and sum of squares for each chunk file
#
@posttask(sleep_a_while)
@transform(step_4_split_numbers_into_chunks, suffix(".chunks"), ".sums")
def step_5_calculate_sum_of_squares (input_file_name, output_file_name):
output = open(output_file_name, "w")
sum_squared, sum = [0.0, 0.0]
cnt_values = 0
for line in open(input_file_name):
cnt_values += 1
val = float(line.rstrip())
sum_squared += val * val
sum += val
output.write("%s\n%s\n%d\n" % (repr(sum_squared), repr(sum), cnt_values))
def print_hooray_again():
print "hooray again"
def print_whoppee_again():
print "whoppee again"
#---------------------------------------------------------------
#
# Calculate sum and sum of squares for each chunk
#
@posttask(lambda: sys.stdout.write("hooray\n"))
@posttask(print_hooray_again, print_whoppee_again, touch_file("done"))
@merge(step_5_calculate_sum_of_squares, "variance.result")
@posttask(sleep_a_while)
def step_6_calculate_variance (input_file_names, output_file_name):
"""
Calculate variance naively
"""
output = open(output_file_name, "w")
#
# initialise variables
#
all_sum_squared = 0.0
all_sum = 0.0
all_cnt_values = 0.0
#
# added up all the sum_squared, and sum and cnt_values from all the chunks
#
for input_file_name in input_file_names:
sum_squared, sum, cnt_values = map(float, open(input_file_name).readlines())
all_sum_squared += sum_squared
all_sum += sum
all_cnt_values += cnt_values
all_mean = all_sum / all_cnt_values
variance = (all_sum_squared - all_sum * all_mean)/(all_cnt_values)
#
# print output
#
print >>output, variance
#---------------------------------------------------------------
#
# Run
#
pipeline_run([step_6_calculate_variance], verbose = 1)
``` |
{
"source": "AAFC-MBB/NGSSampleManager",
"score": 3
} |
#### File: ngssm/entities/run.py
```python
from base import Base
from sqlalchemy import Column, Integer, String, Boolean
from sqlalchemy.orm import relationship
class Run(Base):
__tablename__ = 'run'
id = Column(Integer, primary_key=True)
# One-to-Many relationship to Samples contained in Run
samples = relationship("Sample", cascade="delete")
# sequencer type: e.g. 454, MiSeq, HiSeq
type = Column(String)
# collection of index tags used
# TODO load midsets into DB?
mid_set = Column(String)
# Historically, we've referred to all our 454 sequencing as PlateX-Y, where:
# X is our sequencing plate (numbered from 1 as we've submitted plates)
# Y is the region on the 454 sequencing plate; a plate contains a minimum of 2 regions
# TODO - needs to be more generic, perhaps "Run Alias"
plate = Column(String)
sequencing_notes = Column(String)
def __repr__(self):
return '<Run: %r>' % (self.plate)
```
#### File: ngssm/views/sample.py
```python
from ngssm import app, auth, api
from flask import request, url_for, abort
from flask.ext.restful import Resource, reqparse, fields, marshal
from ngssm.entities.sample import Sample
sample_fields = {
'run_id': fields.Integer,
'mid': fields.String,
'target': fields.String,
'sff': fields.String,
'location': fields.String,
'sample': fields.String,
'primer_forward': fields.String,
'primer_reverse': fields.String,
'uri': fields.Url('sample')
}
sample_uris = {
'uri': fields.Url('sample')
}
class SampleAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('run_id', type = int, location = 'json')
self.reqparse.add_argument('mid', type = str, location = 'json')
self.reqparse.add_argument('target', type = str, location = 'json')
self.reqparse.add_argument('sff', type = str, location = 'json')
self.reqparse.add_argument('location', type = str, location = 'json')
self.reqparse.add_argument('sample', type = str, location = 'json')
self.reqparse.add_argument('primer_forward', type = str, location = 'json')
self.reqparse.add_argument('primer_reverse', type = str, location = 'json')
super(SampleAPI, self).__init__();
@auth.login_required
def get(self, id):
session = app.session_maker()
sample = session.query(Sample).filter_by(id=id).first();
print "Sample id: ", id, " Sample: ", sample
if sample == None:
abort(404)
return { 'sample': marshal(sample, sample_fields) }
@auth.login_required
def put(self, id):
session = app.session_maker()
sample = session.query(Sample).filter_by(id=id).first()
args = self.reqparse.parse_args();
if sample == None:
abort(404)
for k, v in args.iteritems():
if v != None:
setattr(sample,k,v)
session.commit()
return { 'sample': marshal(sample, sample_fields) }
@auth.login_required
def delete(self, id):
print "Received delete request for: ", id
session = app.session_maker()
sample = session.query(Sample).filter_by(id=id).first()
if sample == None:
abort(404)
session.delete(sample)
session.commit()
return { 'result': True }
class SampleListAPI(Resource):
def __init__(self):
self.postreqparse = reqparse.RequestParser()
self.postreqparse.add_argument('run_id', type = int, required = True, help="No run selected", location = 'json')
self.postreqparse.add_argument('mid', type = str, default = "", location = 'json')
self.postreqparse.add_argument('target', type = str, default = "", location = 'json')
self.postreqparse.add_argument('sff', type = str, default = "", location = 'json')
self.postreqparse.add_argument('location', type = str, default = "", location = 'json')
self.postreqparse.add_argument('sample', type = str, default = "", location = 'json')
self.postreqparse.add_argument('primer_forward', type = str, default = "", location = 'json')
self.postreqparse.add_argument('primer_reverse', type = str, default = "", location = 'json')
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('run_id', type = int, default = "")
self.reqparse.add_argument('mid', type = str, default = "")
self.reqparse.add_argument('target', type = str, default = "")
self.reqparse.add_argument('sff', type = str, default = "")
self.reqparse.add_argument('location', type = str, default = "")
self.reqparse.add_argument('sample', type = str, default = "")
self.reqparse.add_argument('primer_forward', type = str, default = "")
self.reqparse.add_argument('primer_reverse', type = str, default = "")
self.reqparse.add_argument('offset', type = int, default = 0)
self.reqparse.add_argument('limit', type = int, default = 10)
super(SampleListAPI, self).__init__();
@auth.login_required
def post(self):
args = self.postreqparse.parse_args();
if not 'run_id' in args:
abort(400)
sample = Sample()
for k, v in args.iteritems():
if v != None:
setattr(sample,k,v)
session = app.session_maker()
session.add(sample)
session.commit()
return { 'sample': marshal(sample, sample_fields) } , 201
@auth.login_required
def get(self):
session = app.session_maker()
query = session.query(Sample)
args = self.reqparse.parse_args();
# build a dictionary and then unpack it into
# the filter_by arguments using **
kwargs = {}
limit = args.get('limit')
offset = args.get('offset')
for k, v in args.iteritems():
if v != None and k!= 'limit' and k!= 'offset' and ((type(v) == str and len(v) > 0) or (type(v) == int and v > 0)):
kwargs[k]=v
if len(kwargs) > 0:
print "Applying ", len(kwargs), " filters"
query = query.filter_by(**kwargs)
return { 'samples': marshal(query.limit(limit).offset(offset).all(), sample_uris), 'sample_count': query.count(), 'next_page': '/ngssm/api/v1.0/samples?limit=' + str(limit) + '&offset=' + str(offset + limit) }
api.add_resource(SampleListAPI, '/ngssm/api/v1.0/samples', endpoint = 'samples')
api.add_resource(SampleAPI, '/ngssm/api/v1.0/samples/<int:id>', endpoint = 'sample')
``` |
{
"source": "aafedotov/lpg",
"score": 2
} |
#### File: gaz/lpg/models.py
```python
from django.db import models
import os
class Lpg(models.Model):
date = models.DateTimeField()
price = models.FloatField()
volume = models.FloatField()
benz_price = models.FloatField()
cost = models.FloatField()
mileage = models.FloatField()
mileage_total = models.FloatField()
consump = models.FloatField()
saving = models.FloatField()
maintenance = models.IntegerField(blank=True, default=0)
lpg_maintenance = models.IntegerField(blank=True, default=0)
class Meta:
ordering = ['-date']
def __str__(self):
return str(self.date.date())
class File(models.Model):
file = models.FileField(upload_to='')
def filename(self):
return os.path.basename(self.file.name)
def __str__(self):
return self.filename()
```
#### File: gaz/sto/forms.py
```python
from django import forms
from .models import STO, Action, Group
class STOForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(STOForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class Meta:
model = STO
fields = ['mileage', 'group', 'actions', 'description', 'receipt', 'price']
labels = {
'mileage': 'Текущий пробег:',
'group': 'Тип ТО:',
'price': 'Цена:',
'actions': 'Спецификация:'
}
mileage = forms.IntegerField()
group = forms.ModelChoiceField(queryset=Group.objects.all())
price = forms.IntegerField()
actions = forms.ModelMultipleChoiceField(
queryset=Action.objects.all(),
widget=forms.SelectMultiple
)
description = forms.CharField(widget=forms.Textarea, required=False)
receipt = forms.ImageField(required=False)
```
#### File: gaz/sto/models.py
```python
from django.db import models
class Group(models.Model):
"""Модель с типом обслуживания."""
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class Action(models.Model):
"""Перечень работ."""
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class STO(models.Model):
"""Модель с записями о прошедших ТО."""
date = models.DateField(auto_now_add=True)
mileage = models.IntegerField()
group = models.ForeignKey(
Group,
related_name='stos',
default=1,
on_delete=models.SET_DEFAULT,
blank=False, null=False
)
actions = models.ManyToManyField(Action)
price = models.IntegerField()
description = models.TextField(null=True, blank=True)
receipt = models.ImageField(upload_to='images/', null=True, blank=True)
class Meta:
ordering = ['-date']
def __str__(self):
return str(self.mileage)
```
#### File: gaz/sto/views.py
```python
from django.shortcuts import render, redirect, reverse
from django.db.models import Sum
from .forms import STOForm
from .models import STO
def sto_success(request):
template = 'sto/success.html'
return render(request, template)
def sto_view(request):
"""View-функция для формы чек-ина ТО."""
if request.user.username != 'faa':
return redirect('/auth/login/')
is_sto = True
form = STOForm(request.POST or None, files=request.FILES or None)
if form.is_valid():
form.save()
return redirect(reverse('sto:success'))
template = 'sto/sto.html'
context = {'form': form, 'is_sto': is_sto}
return render(request, template, context)
def sto_summary(request):
"""View-функция для саммари по ТО."""
if request.user.username != 'faa':
return redirect('/auth/login/')
template = 'sto/sto_summary.html'
stos = STO.objects.all()
total = stos.aggregate(Sum('price')).get('price__sum')
context = {'stos': stos, 'total': total}
return render(request, template, context)
```
#### File: gaz/todo/views.py
```python
from django.shortcuts import render, redirect, reverse
from .forms import TaskForm
from .models import Task
from django.shortcuts import get_object_or_404
def todo_list(request):
"""View-функция главной страницы с задачами."""
if request.user.username != 'faa':
return redirect('/auth/login/')
template = 'todo/todo_list.html'
form = TaskForm(request.POST or None)
if form.is_valid():
task = form.save(commit=False)
task.author = request.user
task.save()
return redirect('todo:list')
tasks = Task.objects.all()
return render(request, template, {'form': form, 'tasks': tasks})
def task_close(request, task_id):
"""View-функция для закрытия задач."""
task = get_object_or_404(Task, pk=task_id)
task.delete()
return redirect('todo:list')
``` |
{
"source": "aafedotov/stepik_oop",
"score": 4
} |
#### File: aafedotov/stepik_oop/lesson_3.4.py
```python
class ChessPlayer:
def __init__(self, name, surname, rating):
self.name = name
self.surname = surname
self.rating = rating
def __eq__(self, other):
if isinstance(other, (int, ChessPlayer)):
return self.rating == other
return 'Невозможно выполнить сравнение'
def __gt__(self, other):
if isinstance(other, (int, ChessPlayer)):
return self.rating > other
return 'Невозможно выполнить сравнение'
def __lt__(self, other):
if isinstance(other, (int, ChessPlayer)):
return self.rating < other
return 'Невозможно выполнить сравнение'
# tests
magnus = ChessPlayer('Carlsen', 'Magnus', 2847)
ian = ChessPlayer('Ian', 'Nepomniachtchi', 2789)
print(magnus == 4000) # False
print(ian == 2789) # True
print(magnus == ian) # False
print(magnus > ian) # True
print(magnus < ian) # False
print(magnus < [1, 2]) # печатает "Невозможно выполнить сравнениe"
```
#### File: aafedotov/stepik_oop/lesson_5.1.py
```python
class Wallet:
def __init__(self, currency, balance):
if not isinstance(currency, str):
raise TypeError('Неверный тип валюты')
if len(currency) != 3:
raise NameError('Неверная длина названия валюты')
if currency != currency.upper():
raise ValueError(
'Название должно состоять только из заглавных букв')
self.currency = currency
self.balance = balance
def __eq__(self, other):
if not isinstance(other, Wallet):
raise TypeError(f'Wallet не поддерживает сравнение с {other}')
if self.currency != other.currency:
raise ValueError('Нельзя сравнить разные валюты')
return self.balance == other.balance
@staticmethod
def add_sub(self, other, operator):
if not isinstance(other, Wallet) or self.currency != other.currency:
raise ValueError('Данная операция запрещена')
return Wallet(self.currency, operator(self.balance, other.balance))
def __add__(self, other):
return Wallet.add_sub(self, other, lambda a, b: a + b)
def __sub__(self, other):
return Wallet.add_sub(self, other, lambda a, b: a - b)
# tests
wallet1 = Wallet('USD', 50)
wallet2 = Wallet('RUB', 100)
wallet3 = Wallet('RUB', 150)
wallet4 = Wallet(12, 150) # исключение TypeError('Неверный тип валюты')
wallet5 = Wallet('qwerty', 150) # исключение NameError('Неверная длина названия валюты')
wallet6 = Wallet('abc', 150) # исключение ValueError('Название должно состоять только из заглавных букв')
print(wallet2 == wallet3) # False
print(wallet2 == 100) # TypeError('Wallet не поддерживает сравнение с 100')
print(wallet2 == wallet1) # ValueError('Нельзя сравнить разные валюты')
wallet7 = wallet2 + wallet3
print(wallet7.currency, wallet7.balance) # печатает 'RUB 250'
wallet2 + 45 # ValueError('Данная операция запрещена')
``` |
{
"source": "Aaftab-Alam/PythonPractice",
"score": 4
} |
#### File: Aaftab-Alam/PythonPractice/class_practice.py
```python
class Person:
def __init__(self,name,age,gender):
self.name=name
self.age=age
self.gender=gender
class Publish:
def __init__(self,publish_content):
self.publish_content=publish_content
def display(self):
print("Content of Publishing:-",self.publish_content)
class Faculty(Person):
def __init__(self,name,age,gender,role,content):
super().__init__(name,age,gender)
self.role=role
self.content=Publish(content)
def display(self):
print("Name :",self.name)
print("Age :",self.age)
print("Gender :",self.gender)
print("Role :",self.role)
self.content.display()
obj=Faculty("Aadil",18,"male","student","This is content")
obj.display()
```
#### File: Aaftab-Alam/PythonPractice/color_turtle.py
```python
import turtle
loc1=("dark slate blue","crimson","indigo","cyan","purple","white","deep pink")
loc=("indigo","cyan","deep pink","purple","white","dark slate blue","crimson")
t=turtle.Turtle()
e=turtle.Turtle()
f=turtle.Turtle()
g=turtle.Turtle()
screen=turtle.Screen()
screen.title("Polygon")
screen.setup(1000,1980)
screen.bgcolor("black")
turtle.delay(0)
turtle.tracer(30)
col=loc1
def func(a):
a.speed(0)
a.ht()
global col
if col==loc1:
col=loc
else:
col=loc1
for i in range(650):
a.color(col[i%7])
a.forward(i*1.5)
a.left(52.25)
a.width(3)
# a.goto(0,0)
func(e)
func(t)
func(f)
func(g)
```
#### File: Aaftab-Alam/PythonPractice/first_class.py
```python
class Vehicle:
def __init__(self, name, max_speed, mileage):
self.name = name
self.max_speed = max_speed
self.mileage = mileage
def info(self):
return f"Name: {self.name} \nSpeed: {self.max_speed}\nMileage: {self.mileage}"
class Person:
def __init__(self,name,age):
self.name=name
self.age=age
def info(self):
return f"Name: {self.name}\nAge: {self.age}"
a=Vehicle("SchoolVolvo" , 80 ,12)
c=Vehicle("Bus",90,15)
b=Person("Aadil",18)
print(a.info())
print("\n")
print(c.info())
print("\n")
print(b.info())
```
#### File: PythonPractice/Flask/s_tut1.py
```python
from flask import Flask,render_template
app=Flask(__name__)
#name1=input("Name :")
@app.route("/")
def hello():
name="Aaftab"
return render_template("index.html",name=name)
app.run(debug=True)
```
#### File: Aaftab-Alam/PythonPractice/happy_number_2.py
```python
n=int(input("Number"))
for i in range(10,n):
# num1=i
num=i#int(input("Enter a number"))
def func(num):
str_num=str(num)
list=[]
for k in str_num:
list.append(int(k))
sum=0
for j in list:
sum=sum+j**2
if sum==1:
print(i ,"Number is Happy")
else:
# print(sum)
num=sum
func(num)
try:
func(num)
except:
print(num,"Number is not happy")
```
#### File: PythonPractice/myLibrary/new_library.py
```python
class Librarian:
def __init__(self,name,number):
self.name=name
self.number=number
try:
with open(str(self.name)+"books.txt","x"):
pass
except:
pass
try:
with open(str(self.name)+"customer_info.txt","x"):
pass
except:
pass
print("""1.Check books\n2.Add books\n3.Customer info\n4.Clear book records\n5.Clear customer""")
self.decision=int(input())
print("\n")
def check_books(self):
with open (str(self.name)+"books.txt","r") as books:
books_read=books.read()
if len(books_read)==0:
print("No books available currently")
else:
print(books_read)
def customer_info(self):
with open(str(self.name)+"customer_info.txt","r") as customer:
print(customer.read())
def add_books(self):
with open(str(self.name)+"books.txt","a") as books_write:
inp=input("Enter books name separated by commas :").split(",")
for name in inp:
books_write.write(name+"\n")
print("Books added successfully!")
def clear_book(self):
with open(str(self.name)+"books.txt","r+") as clear:
clear.truncate()
def clear_customer(self):
with open(str(self.name)+"customer_info.txt","r+") as clear:
clear.truncate()
class Customer:
def __init__(self,name,address,phonenumber):
self.name=name
self.address=address
self.phonenumber=phonenumber
print("""1.To lend book\n2.To donate book\n3.To return book""")
self.decision=int(input())
print("\n")
def lend_book(self):
book_name=input("Enter book name :")
librarian=input("Enter Librarian name :")
with open(str(librarian)+"books.txt","r+") as names:
read_name=names.readlines()
# print(read_name)
# print(book_name)
if (book_name+"\n") in read_name:
with open(str(librarian)+"customer_info.txt","a") as customer_name:
customer_name.write(f"Book name:-{book_name} :\nIssued to:-\nCustomer name:-{self.name}\nCustomer Address:-{self.address}\nCustomer phone number:-{self.phonenumber}\n\n")
names.seek(0)
for i in read_name:
if i!=(book_name+"\n"):
names.write(i)
names.truncate()
print("Book issued successfully")
else:
print("This book is not available in our library currently.")
def add_book(self):
book_name=input("Enter book name :")
librarian_name=input("Enter librarian name :")
with open(str(librarian_name)+"books.txt","a") as donate:
donate.write(book_name+"\n")
with open(str(librarian_name)+"customer_info.txt","a") as customer_name:
customer_name.write(f"Book name:-{book_name} :\nDonated by:-\nCustomer name:-{self.name}\nCustomer Address:-{self.address}\nCustomer phone number:-{self.phonenumber}\n\n")
print("Books donated successfully!")
def librarian_func():
name=input("Enter your name :")
number=input("Enter number :")
lib_obj=Librarian(name,number)
if lib_obj.decision==1:
lib_obj.check_books()
elif lib_obj.decision==2:
lib_obj.add_books()
elif lib_obj.decision==3:
lib_obj.customer_info()
elif lib_obj.decision==4:
lib_obj.clear_book()
elif lib_obj.decision==5:
lib_obj.clear_customer()
else:
print("Invalid choice!")
def customer_func():
name=input("Enter your name :")
address=input("Enter your address :")
phonenumber=input("Enter your phonenumber :")
customer_obj=Customer(name,address,phonenumber)
if customer_obj.decision==1:
customer_obj.lend_book()
elif customer_obj.decision==2:
customer_obj.add_book()
else:
print("Invalid coice")
role=int(input("1.For librarian\n2.For customer\n"))
if role==1:
librarian_func()
elif role==2:
customer_func()
else:
print("Invalid Choice.")
```
#### File: Aaftab-Alam/PythonPractice/operator_overloading.py
```python
class Time:
def gettime(self):
self.hour=int(input("Enter hour: "))
self.minute=int(input("Enter minute: "))
self.second=int(input("Enter seconds: "))
def display(self):
print(f"Time is {self.hour}:{self.minute}:{self.second}\n")
def __add__(self,other):
sum=Time()
sum.hour=self.hour+other.hour
sum.minute=self.minute+other.minute
if sum.minute>=60:
sum.hour+=1
sum.minute-=60
sum.second=self.second+other.second
if sum.second>=60:
sum.minute+=1
sum.second-=60
return sum
a=Time()
a.gettime()
a.display()
b=Time()
b.gettime()
b.display()
c=a+b
c.display()
```
#### File: Aaftab-Alam/PythonPractice/password_generator.py
```python
import random
import string
import time
initial=time.time()
i=0
while True:
inp=int(input("Length of password:"))
inp1=input("Strength of the password strong/weak?:")
#print(inp)
def func_string():
if inp1=="weak":
weak=''.join(random.choices(string.ascii_uppercase + string.digits , k=inp ))
return weak
elif inp1=="strong":
strong=''.join(random.choices(string.ascii_letters + string.digits + string.punctuation , k=inp ))
return strong
else:
return "Invalid Syntax"
print(func_string()+ "\n")
inp3=input(("You want to generate password Again? Yes\\no\n"))
if inp3=="yes":
pass
elif inp3=="no":
exec=time.time()
print(f"Program executed in {(exec-initial)} seconds")
break
else:
print("Invalid syntax\nTry again")
i=i+1
```
#### File: PythonPractice/PyQt_GUI/chatbot_gui.py
```python
import PyQt5.QtWidgets as qtw
import PyQt5.QtGui as qtg
class MainWindow(qtw.QWidget):
i=0
j=0
def __init__(self):
super().__init__()
self.setWindowTitle("Chatbot")
self.setupUI()
def setupUI(self):
global chatlayout
outerlayout=qtw.QVBoxLayout()
header=qtw.QGridLayout()
footer=qtw.QGridLayout()
chatlayout=qtw.QGridLayout()
##------Header--------##
self.chat_icon=qtw.QLabel("Icon",self)
#self.chat_icon.setStyleSheet=("margin:0px;padding:30px; background:{color};color:white;")
self.chat_icon.setStyleSheet("border:2px solid black;padding:0px;")
header.addWidget(self.chat_icon,0,0)
self.chat_name=qtw.QLabel("Chatbot",self)
self.chat_name.setStyleSheet("border:2px solid black")
header.addWidget(self.chat_name,0,1)
header.setColumnStretch(2,1)
## -----TextBar------##
self.textbar=qtw.QLineEdit(placeholderText="Type...")
footer.addWidget(self.textbar)
self.textbar.setStyleSheet("border:2px solid lightblue;border-radius:10px;padding:10px;margin:20px;height:140px;font-size:80px;")
self.send_button=qtw.QPushButton("Send",clicked=lambda:self.sent())
footer.addWidget(self.send_button,0,1)
outerlayout.addLayout(header)
outerlayout.addLayout(chatlayout)
outerlayout.addStretch()
outerlayout.addLayout(footer)
self.setLayout(outerlayout)
def sent(self):
global chatlayout,i,j
text=self.textbar.text()
if text=="":
pass
else:
self.chat=qtw.QLabel(text)
chatlayout.addWidget(self.chat,MainWindow.i,MainWindow.j)
MainWindow.i+=1
self.textbar.setText("")
self.textbar.setPlaceholderText("Type...")
#MainWindow.j+=1
app=qtw.QApplication([])
win=MainWindow()
win.show()
app.exec_()
```
#### File: PythonPractice/PyQt_GUI/qt_combo_box.py
```python
import PyQt5.QtWidgets as qtw
import PyQt5.QtGui as qtg
class MainWindow(qtw.QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Hellow")
self.setLayout(qtw.QVBoxLayout())
self.setupUI()
def setupUI(self):
self.my_label=qtw.QLabel(self)
self.my_label.setText("Pick any one of the following!")
self.layout(). addWidget(self.my_label)
self.my_label1=qtw.QLabel(self)
self.my_label1.setText("")
self.layout(). addWidget(self.my_label1)
self.my_label2=qtw.QLabel(self)
self.my_label2.setText("")
self.layout(). addWidget(self.my_label2)
self.my_combo=qtw.QComboBox(self)
self.my_combo.addItem("Pizza","First")
self.my_combo.addItem("Burger","Second")
self.my_combo.addItem("Cold Drink","Third")
#Adding List
self.my_combo.addItems(["French Fries","Momos","Kulche"])
self.layout(). addWidget(self.my_combo)
#Adding item/items at certain index
self.my_combo.insertItem(2,"Inserted item")
#insertItem(index,list) for multiple items
self.my_button=qtw.QPushButton(self)
self.my_button.setText("Click me!")
self.my_button.pressed.connect(lambda : self.my_func())
self.my_button.setStyleSheet("display:inline-block;padding:0.3em 1.2em;margin:0 0.1em 0.1em 0;border:0.06em solid rgba(0,255,255,1);border-radius:2em;box-sizing: border-box;text-decoration:none;font-family:'Roboto',sans-serif;font-weight:300;color:#000098;text-shadow: 0 0.04em 0.04em rgba(0,0,0);text-align:center;transition: all 0.2s; border-radius:50px; background-color:rgba(0,215,215,1);")
self.layout().addWidget(self.my_button)
def my_func(self):
self.my_label.setText(f"You picked {self.my_combo.currentText()}!")
self.my_label1.setText(f"Data of item {self.my_combo.currentData()}")
self.my_label2.setText(f"Index of item {self.my_combo.currentIndex()}")
app=qtw.QApplication([])
win=MainWindow()
win.show()
app.exec_()
```
#### File: PythonPractice/PyQt_GUI/qt_entry_line2.py
```python
import PyQt5.QtWidgets as qtw
import PyQt5.QtGui as qtg
import PyQt5.QtCore as qc
class MainWindow(qtw.QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Hellow")
self.setLayout(qtw.QVBoxLayout())
self.setupUI()
def setupUI(self):
self.entry=qtw.QLineEdit(self,placeholderText="Hi")
self.entry.setAlignment(qc.Qt.AlignRight)
self.layout().addWidget(self.entry)
app=qtw.QApplication([])
win=MainWindow()
win.show()
app.exec_()
```
#### File: PythonPractice/Tkinter_practice/file_opener.py
```python
from tkinter import *
from os import listdir
import tkinter.messagebox as tmsg
root= Tk()
root.title("File Opener")
root.geometry("1000x2000")
scrollbar=Scrollbar(root,width=0)
scrollbar.pack(side=RIGHT,fill=Y)
file_name="/storage/emulated/0"
name=""
def next(event=None):
global name
name=listbox.get(listbox.curselection())
global file_name
#dirs=os.listdir(f'/storage/emulated/0/{name}')
file_name=file_name+"/"+str(name)
files()
def back():
global file_name
#-----------------#
file_name=file_name.rsplit("/",1)[0]
#-----------------#
files()
def files():
try:
dirs=os.listdir(file_name)
dirs.sort()
global listbox
last=listbox.size()
listbox.delete(0,last)
for i in dirs:
listbox.insert(END,i)
except:
tmsg.showinfo("Can't Open","Sorry ,the selected file is not \na directory")
back()
frame=Frame(root)
frame.pack(side=BOTTOM,fill=X)
Button(frame,text="Open", command=next).pack(side=RIGHT,padx=30)
Button(frame,text="Back",command=back).pack(side=LEFT,padx=30)
listbox=Listbox(root, yscrollcommand=scrollbar.set, height=1000, selectbackground="sky blue",selectforeground="purple", activestyle="none")
listbox.pack(fill=X)
listbox.bind("<Double-1>",next)
files()
scrollbar.config(command=listbox.yview)
root.mainloop()
```
#### File: Tkinter_practice/projects/canvas_paint_brush.py
```python
from tkinter import *
import random
canvas_width = 1000
canvas_height = 900
def origin(event):
global x2,y2
x2,y2=event.x,event.y
def paint( event ):
colors_list=["red","green","blue"]
color=random.choice(colors_list)
global x2,y2
# x2,y2=0,0
x1,y1=event.x,event.y
w.create_line( x2,y2,x1,y1, fill = color ,smooth=True)
x2,y2=x1,y1
master = Tk()
master.title( "Paint Brush" )
w = Canvas(master,
width=canvas_width,
height=canvas_height)
w.pack(expand = YES, fill = BOTH)
w.bind("<Button-1>",origin)
w.bind( "<B1-Motion>", paint )
message = Label( master, text = "Press and Drag the mouse to draw" )
message.pack( )
Button(text="Clear",command=lambda :w.delete("all")) .pack(side=BOTTOM)
mainloop()
```
#### File: Tkinter_practice/projects/rectangle_drawyer.py
```python
from tkinter import *
root=Tk()
root.geometry("1000x1000")
def origin(event):
global x,y
x=event.x
y=event.y
def rect(event):
x1,y1=event.x,event.y
canvas.delete("last")
canvas.create_rectangle(x,y,x1,y1,outline="grey", width=5,tag="last")
canvas.update()
def release(event):
x2,y2=event.x,event.y
canvas.delete("last")
canvas.create_rectangle(x,y,x2,y2,fill="#992512",outline="#992512")
canvas=Canvas(root,width=700,height=700, highlightbackground="black", highlightthickness=4)
canvas.pack(anchor="c")
canvas.bind("<Button-1>",origin)
canvas.bind("<B1-Motion>",rect)
canvas.bind("<ButtonRelease-1>", release)
Button(text="Clear",command=lambda :canvas.delete("all")).pack()
root.mainloop()
```
#### File: Tkinter_practice/projects/speed_tester.py
```python
import pyspeedtest
from tkinter import *
from tkinter import ttk
root=Tk()
root.geometry("1000x1200")
def test():
down.set("Testing...")
up.set("Testing...")
ping.set("Testing...")
root.update()
st=pyspeedtest.SpeedTest("www.google.com")
down.set(st.download())
#up.set(st.upload())
#ping.set(st.ping())
up=StringVar()
up.set("0.0")
down=StringVar()
down.set("0.0")
ping=StringVar()
ping.set("0ms")
f1=Frame(root)
f1.pack(side=TOP,fill=X)
separator = ttk.Separator(root,orient='horizontal')
separator.pack(fill=X)
f2=Frame(root)
f2.pack(side=TOP,fill=BOTH,pady=30)
separator = ttk.Separator(root,orient='horizontal')
separator.pack(fill=X)
f3=Frame(root)
f3.pack(side=BOTTOM, fill=X)
l1=Label(f1,text="Speedtest",font="Lucida 17 bold",fg="purple")
l1.pack(anchor="nw")
l2=Label(f2,text="Download Speed",font="Helvetica 13 bold")
l2.pack(anchor="nw")
l3=Label(f2,textvariable=down,font=("Noto Serif",40))
l3.pack(anchor="c",pady=120)
f_down=Frame(f2)
f_down.pack(side=BOTTOM,fill=X)
l4=Label(f2,text="Upload Speed",font="lucida 7 bold")
l4.pack(side=LEFT,anchor="w")
l5=Label(f_down,textvariable=up,font=("Noto Serif",6))
l5.pack(side=LEFT)
l6=Label(f2,text="Ping",font="lucida 7 bold")
l6.pack(side=RIGHT,anchor="e")
l6=Label(f_down,textvariable=ping,font="lucida 6")
l6.pack(side=RIGHT)
button=Button(f3,text="Test",command=test)
button.pack(pady=30)
root.mainloop()
```
#### File: PythonPractice/Tkinter_practice/tkbutton.py
```python
from tkinter import *
root=Tk()
root.geometry("1000x700")
def func():
print("Helloooo")
f1=Frame(root,bg="grey")
f1.pack(side=BOTTOM,fill=X)
b1=Button(f1, text="Submit", borderwidth=5, relief=SUNKEN,pady=0 ,command=func)
b1.pack(pady=30,side=RIGHT,padx=30)
root.mainloop()
```
#### File: Aaftab-Alam/PythonPractice/tower_of_hanoi.py
```python
def hanoi(n,A,B,C):
if n>0:
# print(A,B,C)
hanoi(n-1,A,C,B)
if A:
C.append(A.pop())
# print(A,B,C)
hanoi(n-1,B,A,C)
A=[1,2,3,4,5]
C=[]
B=[]
hanoi(len(A),A,B,C)
print(A,B,C)
``` |
{
"source": "aafulei/leetcode",
"score": 4
} |
#### File: leetcode/python/0202-happy-number.py
```python
class Solution:
def isHappy(self, n: int) -> bool:
seen = set()
while True:
s = 0
while n:
q, r = divmod(n, 10)
s += r * r
n = q
if s == 1:
return True
elif s not in seen:
seen.add(s)
print(s)
else:
return False
n = s
```
#### File: leetcode/python/0905-sort-array-by-parity.py
```python
class Solution:
def sortArrayByParity(self, nums: List[int]) -> List[int]:
N = len(nums)
b = 0
e = N - 1
while b < e:
bgood = nums[b] % 2 == 0
egood = nums[e] % 2 == 1
if not bgood and not egood:
nums[b], nums[e] = nums[e], nums[b]
b += bgood
e -= egood
return nums
```
#### File: leetcode/scripts/lit.py
```python
import argparse
import datetime
import difflib
import io
import os
import platform
import re
import subprocess
import sys
# markdown outliner
import mo
# --- constants ---------------------------------------------------------------
BACKUP_DIR = ".bak"
CODE_DIRS = ["c", "c++", "python", "shell"]
DESCRIPTION = "LeetCode Information Tracker"
DEFAULT_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
DIFFICULTY_DICT = {"Easy": 0, "Medium": 1, "Hard": 2}
DIFFICULTIES = ["Easy", "Medium", "Hard"]
EXT_TO_LANG = {
".c": "c",
".cpp": "c++",
".py": "python",
".sh": "shell",
".txt": "testcases"
}
HELP_ADD = "number of target problem to add or update"
HELP_ASK = "ask for day of week"
HELP_DATE = "date string in the format of yy/mm/dd"
HELP_KEY = "show problems with given key topics only"
HELP_LIST = "list files that miss dates or topics"
HELP_TOPIC = "show problems with given topics only"
LANG_TO_EXT = {
"c": ".c",
"c++": ".cpp",
"python": ".py",
"shell": ".sh",
"testcases": ".txt"
}
REGEX_OBJ_DICT = {
"date":
re.compile(r"^(\d{2})/(\d{2})/(\d{2}) = (Mon|Tue|Wed|Thu|Fri|Sat|Sun)$"),
"problem":
re.compile(r"^([1-9][0-9]{0,3})\. (.+) \[(Easy|Medium|Hard)\]$"),
"topic":
re.compile(r"(\[[-a-zA-Z ()]*\*?\]|{[-a-zA-Z ()]*\*?})"),
"yy/mm/dd":
re.compile(r"(\d{2,4})/(\d{1,2})/(\d{1,2})")
}
SKIP_LIST = ["testcases"]
SUB_TO_SUP = {
"Iterator": "Design", "Monotonic Stack": "Stack", "Shortest Path": "Graph"
}
TESTCASE_DIR = "testcases"
LISTED_TOPICS = [
"Array", "Backtracking", "Biconnected Component", "Binary Indexed Tree",
"Binary Search", "Binary Search Tree", "Binary Tree", "Bit Manipulation",
"Bitmask", "Brainteaser", "Breadth-First Search", "Bucket Sort",
"Combinatorics", "Concurrency", "Counting", "Counting Sort", "Data Stream",
"Database", "Depth-First Search", "Design", "Divide and Conquer",
"Doubly-Linked List", "Dynamic Programming", "Enumeration",
"Eulerian Circuit", "Game Theory", "Geometry", "Graph", "Greedy",
"Hash Function", "Hash Table", "Heap (Priority Queue)", "Interactive",
"Iterator", "Line Sweep", "Linked List", "Math", "Matrix", "Memoization",
"Merge Sort", "Minimum Spanning Tree", "Monotonic Queue",
"Monotonic Stack", "Number Theory", "Ordered Set", "Prefix Sum",
"Probability and Statistics", "Queue", "Quickselect", "Radix Sort",
"Randomized", "Recursion", "Rejection Sampling", "Reservoir Sampling",
"Rolling Hash", "Segment Tree", "Shell", "Shortest Path", "Simulation",
"Sliding Window", "Sorting", "Stack", "String", "String Matching",
"Strongly Connected Component", "Suffix Array", "Topological Sort", "Tree",
"Trie", "Two Pointers", "Union Find"]
WEEKDAY_DICT = {
"Mon": 0, "Tue": 1, "Wed": 2, "Thu": 3, "Fri": 4, "Sat": 5, "Sun": 6}
WEEKDAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
# --- classes -----------------------------------------------------------------
class Date:
def __init__(self, line):
ro = REGEX_OBJ_DICT["date"]
y, m, d, w = ro.search(line).groups()
self.year = int(y) + 2000
self.month = int(m)
self.day = int(d)
self.weekday = WEEKDAY_DICT[w]
assert check_date(self.year, self.month, self.day, self.weekday)
@property
def weekday_str(self):
return WEEKDAYS[self.weekday]
def __eq__(self, o):
return (self.year, self.month, self.day) == (o.year, o.month, o.day)
def __hash__(self):
return (self.year, self.month, self.day).__hash__()
def __lt__(self, o):
return (self.year, self.month, self.day) < (o.year, o.month, o.day)
def __repr__(self):
return (f"{self.year:04d}/{self.month:02d}/{self.day:02d} = " +
f"{self.weekday_str}")
def __str__(self):
return self.__repr__()
class Problem:
def __init__(self, line):
ro = REGEX_OBJ_DICT["problem"]
n, self.name, d = ro.search(line).groups()
self.num = int(n)
self.difficulty_level = DIFFICULTY_DICT[d]
@property
def difficulty(self):
return DIFFICULTIES[self.difficulty_level]
def __bool__(self):
return bool(self.num)
def __eq__(self, o):
return ((self.num, self.name, self.difficulty_level) ==
(o.num, o.name, o.difficulty_level))
def __hash__(self):
return (self.num, self.name, self.difficulty_level).__hash__()
def __lt__(self, o):
return self.num < o.num
def __repr__(self):
return f"{self.num}. {self.name} [{self.difficulty}]"
def __str__(self):
return f"{self.num}. {self.name}"
class Record:
def __init__(self, filepath, problem, lang, dates, topics, key_topics):
self.filepath = filepath
self.problem = problem
self.lang = lang
self.dates = dates
self.topics = topics
self.key_topics = key_topics
@property
def filename(self):
return os.path.basename(self.filepath)
@property
def dirname(self):
return os.path.dirname(self.filepath)
def __str__(self):
return (f"{self.problem=} | {self.lang=} | {self.dates=} | " +
f"{self.key_topics=} | {self.topics=} | {self.filepath=}")
class Submission:
def __init__(self, date, problem, lang, new):
self.date = date
self.problem = problem
self.lang = lang
self.new = new
def __lt__(self, o):
return (self.date, self.problem, self.lang) < (
o.date, o.problem, o.lang)
# --- helpers -----------------------------------------------------------------
def get_lang_from_ext(ext):
return EXT_TO_LANG.get(ext, "")
def get_ext_from_lang(lang):
return LANG_TO_EXT.get(lang, "")
def get_comment_tag_from_lang(lang):
return "//" if lang in set(["c", "c++"]) else "#"
def get_basename_from_problem(problem):
num = problem.num
name = problem.name
basename = f"{num:04d}-"
prev = ""
for c in name:
if c.isalnum():
prev = c.lower()
basename += prev
elif prev != "-" and c in " -":
prev = "-"
basename += prev
return basename
def check_date(y, m, d, w):
return w == datetime.datetime(y, m, d).weekday()
def exact_match(s, key):
ro = REGEX_OBJ_DICT.get(key, "")
return ro.fullmatch(s) if ro else False
def fill_in_dates(para, dates):
invalid = []
inconsistent = []
for line in para:
try:
dates.append(Date(line))
except (AttributeError, ValueError):
invalid.append(line)
continue
except AssertionError:
inconsistent.append(line)
continue
return (invalid, inconsistent)
def fill_in_topics(para, topics, key_topics):
for line in para[1:]:
topic_list = REGEX_OBJ_DICT["topic"].findall(line)
for t in topic_list:
t = t[1:-1]
if t[-1] == "*":
t = t[:-1]
key_topics.append(t)
topics.append(t)
def map_topics(topics_set):
"""
Input
-----
topics_set = {"Stack", "Monotonic Stack", "String"}, or
{"Monotonic Stack", "String"}
Output
------
sup_to_sub = {
"Stack": {"Stack - Monotonic Stack"},
"String": set()
}
merged_set = {"Stack - Monotonic Stack", "String"}
"""
sup_to_sub = {}
merged_set = set()
for t in topics_set:
if t in SUB_TO_SUP:
sup = SUB_TO_SUP[t]
sub = f"{sup} - {t}"
sup_to_sub.setdefault(sup, set())
sup_to_sub[sup].add(sub)
merged_set.add(sub)
else:
sup_to_sub.setdefault(t, set())
merged_set.add(t)
return sup_to_sub, merged_set
def scan(filepath):
_, ext = os.path.splitext(filepath)
lang = get_lang_from_ext(ext)
if not lang or lang in SKIP_LIST:
return
paragraphs = []
buf = []
tag = get_comment_tag_from_lang(lang)
def flush():
nonlocal buf
nonlocal paragraphs
if not buf:
return
paragraphs.append(buf)
buf = []
with open(filepath) as file:
for line in file:
line = line[:-1]
if not line:
flush()
elif line.startswith(tag):
buf.append(line[len(tag):].strip())
else:
if not buf:
continue
paragraphs.append(buf)
buf = []
break
problem = None
dates = []
topics = []
key_topics = []
for para in paragraphs:
first_line = para[0]
if not dates and exact_match(first_line, "date"):
invalid, inconsistent = fill_in_dates(para, dates)
if invalid:
print(f"Warning: Invalid dates found in {filepath}")
for i in invalid:
print(" " * 10 + "-", i)
if inconsistent:
print(f"Error : Inconsistent dates found in {filepath}")
for i in inconsistent:
print(" " * 10 + "-", i)
bad = False
if len(set(dates)) != len(dates):
print(f"Error : Dates have duplicates in {filepath}")
bad = True
if sorted(dates) != dates[::-1]:
print(f"Error : Dates not in reverse order in {filepath}")
bad = True
if bad:
for d in dates:
print(" " * 10 + "-", d)
elif not problem and exact_match(first_line, "problem"):
problem = Problem(first_line)
elif not topics and first_line == "Related Topics:":
fill_in_topics(para, topics, key_topics)
bad = False
if len(set(topics)) != len(topics):
print(f"Warning: Topics have duplicates in {filepath}")
bad = True
if sorted(topics) != topics:
print(f"Warning: Topics not sorted in {filepath}")
bad = True
if bad:
for t in topics:
print(" " * 10 + "-", t)
for t in topics:
if t not in LISTED_TOPICS:
print(f"Warning: Non-listed topic \"{t}\" found in " +
f"{filepath}")
return Record(filepath=filepath, problem=problem, lang=lang, dates=dates,
topics=topics, key_topics=key_topics)
_has_problem_num = False
def get_problem_num(line):
"""
Return 1260 from a line "- [1260. Shift 2D Grid] ..."
Return 0 for empty lines before the problem lines
Return 10000 for empty lines after the problem lines
"""
dot_pos = line.find('.')
num_str = line[3:(dot_pos if 4 <= dot_pos <= 7 else 0)]
if num_str.isnumeric():
global _has_problem_num
_has_problem_num = True
return int(num_str)
elif _has_problem_num:
return 10000
else:
return 0
# --- program -----------------------------------------------------------------
class Program:
def __init__(self):
self.args = self.parse()
self.rootpath = self.args.rootpath
def parse(self):
parser = argparse.ArgumentParser(prog="lit", description=DESCRIPTION)
parser.add_argument("--root", dest="rootpath", metavar="PATH",
default=DEFAULT_ROOT, help="root path")
subparsers = parser.add_subparsers(help="subcommand options",
dest="subcommand")
add_parser = subparsers.add_parser("add", help="add or update")
add_parser.add_argument("target_num", metavar="NUM", type=int,
help=HELP_ADD)
ask_parser = subparsers.add_parser("ask", help=HELP_ASK)
ask_parser.add_argument("date_str", metavar="DATE", type=str,
help=HELP_DATE)
log_parser = subparsers.add_parser("log", help="view log")
log_parser.add_argument("--c", dest="lang_filter",
action="append_const", const="c",
help="show c submissions only")
log_parser.add_argument("--c++", "--cpp", dest="lang_filter",
action="append_const", const="c++",
help="show c++ submissions only")
log_parser.add_argument("--python", dest="lang_filter",
action="append_const", const="python",
help="show python submissions only")
log_parser.add_argument("--shell", dest="lang_filter",
action="append_const", const="shell",
help="show shell submissions only")
log_parser.add_argument("--easy", dest="difficulty_filter",
action="append_const", const=0,
help="show easy problems only")
log_parser.add_argument("--medium", dest="difficulty_filter",
action="append_const", const=1,
help="show medium problem only")
log_parser.add_argument("--hard", dest="difficulty_filter",
action="append_const", const=2,
help="show hard problems only")
log_parser.add_argument("--topic", dest="topic_filter", metavar="T",
action="store", nargs="+",
help=HELP_TOPIC)
log_parser.add_argument("--key", dest="key_topic_filter",
metavar="T", action="store", nargs="+",
help=HELP_KEY)
log_parser.add_argument("--new", dest="add_new", action="store_true",
default=False,
help="show initial attempts only")
log_parser.add_argument("--old", dest="add_old", action="store_true",
default=False,
help="show non-initial attempts only")
log_parser.add_argument("--show-topics", action="store_true",
default=False,
help="show topics for each submission")
log_parser.add_argument("--show-key-topics", action="store_true",
default=False,
help="show key topics for each submission")
status_parser = subparsers.add_parser("status", help="view status")
status_parser.add_argument("--list", action="store_true",
default=False, help=HELP_LIST)
args = parser.parse_args()
if not args.subcommand:
parser.print_help()
if (args.subcommand == "ask" and
not REGEX_OBJ_DICT["yy/mm/dd"].fullmatch(args.date_str)):
ask_parser.error("invalid DATE format " +
"(expecting yy/mm/dd, e.g. \"22/06/01\")")
return args
def run(self):
if not self.args.subcommand:
return
if self.args.subcommand != "ask":
print(f"Action : Changing directory to {self.rootpath}")
os.chdir(self.rootpath)
if self.args.subcommand == "add":
self.do_add(self.args.target_num)
elif self.args.subcommand == "ask":
self.do_ask(self.args.date_str)
elif self.args.subcommand == "log":
self.do_log()
elif self.args.subcommand == "status":
self.do_status(self.args.list)
def do_add(self, target_num):
search_dirs = ["."] + CODE_DIRS
staging = []
for sd in search_dirs:
for filename in os.listdir(sd):
filepath = os.path.join(sd, filename)
if not os.path.isfile(filepath):
continue
rec = scan(filepath)
if not rec:
continue
if not rec.problem:
print(f"Warning: Problem title not found in " +
f"{rec.filepath}")
continue
if rec.problem.num == target_num:
staging.append(rec)
problem_set = set([rec.problem for rec in staging])
if len(problem_set) != 1:
if not problem_set:
print(f"Fatal : No candidates for Problem #{target_num}. "
f"Please check. Exit.")
else:
print(f"Fatal : Multiple titles for Problem #{target_num}. "
f"Please check. Exit.")
for rec in staging:
print(" " * 10 +
f"- {rec.filepath}: \"{repr(rec.problem)}\"")
return
target_problem = staging[0].problem
target_basename = get_basename_from_problem(target_problem)
t_search_dirs = [".", TESTCASE_DIR]
for td in t_search_dirs:
for filename in os.listdir(td):
filepath = os.path.join(td, filename)
if not os.path.isfile(filepath):
continue
basename, ext = os.path.splitext(filename)
if (ext == ".txt" and
basename in [f"{target_num}",
f"{target_num:04d}",
target_basename]):
rec = Record(filepath=filepath, problem=target_problem,
lang="testcases", dates=[], topics=[],
key_topics=[])
staging.append(rec)
bad = False
for lang in set([rec.lang for rec in staging]):
lang_set = set([rec for rec in staging if rec.lang == lang])
if len(lang_set) != 1:
print(f"Fatal : Multiple {lang} candidates for Problem "
f"{target_problem}. Please Check. Exit.")
for rec in lang_set:
print(" " * 10 + f"- {rec.filepath}")
bad = True
if bad:
return
target_key_topics_set = set()
for rec in staging:
target_key_topics_set.update(rec.key_topics)
if not target_key_topics_set:
print(f"Fatal : No key topics specified for Problem "
f"{target_problem}. Please add. Exit.")
return
for rec in staging:
if rec.lang in SKIP_LIST:
continue
if not rec.dates:
print(f"Error : Missing dates from {rec.filepath}. "
f"Please add.")
if not rec.key_topics:
print(f"Warning: No key topics specified for {rec.filepath}")
elif len(rec.key_topics) != len(target_key_topics_set):
print(f"Warning: A smaller set of key topics specified for "
f"{rec.filepath}")
print(" " * 10 + f"- all key topics: {target_key_topics_set}")
print(" " * 10 + f"- {rec.filepath}: {rec.key_topics}")
for rec in staging:
target_dir = rec.lang
target_filename = target_basename + get_ext_from_lang(rec.lang)
target_filepath = os.path.join(target_dir, target_filename)
if rec.filepath != target_filepath:
print(f"Action : "
f"Moving \"{rec.filepath}\" ===> \"{target_filepath}\"")
os.rename(rec.filepath, target_filepath)
rec.filepath = target_filepath
target_name = target_problem.name
staging.sort(key=lambda rec: rec.lang)
target_line = (f"- [{target_num}. {target_name}]" +
f"(https://leetcode.com/problems/" +
f"{target_basename[5:]})")
for rec in staging:
tag = (rec.lang if rec.lang == "testcases" else
get_ext_from_lang(rec.lang)[1:])
target_line += f" [`{tag}`]({rec.lang}/{rec.filename})"
sup_to_sub, merged_set = map_topics(target_key_topics_set)
print(f"Note : Ready to add the line below to {merged_set} "
f"section(s) in README.md:")
print()
print(target_line)
print()
page = mo.Page("README.md")
changed = False
for level, line, fat, pos in page:
if level == 2 and line == "Topics":
sec = fat.children[pos]
changed |= mo.add_sections(sup_to_sub.keys(), sec)
break
for level, line, fat, pos in page:
if level == 2 and line == "Topics":
sec = fat.children[pos]
top = [x for x in sec.children if isinstance(x, mo.Section)]
for t in top:
if t.name in sup_to_sub:
changed |= mo.add_sections(sup_to_sub[t.name], t)
break
for level, line, fat, pos in page:
if level in {3, 4} and line in merged_set:
sec = fat.children[pos]
changed |= mo.add_line(target_line, sec, key=get_problem_num)
if not changed:
print(f"Note : No changes made.")
return
bak_datetime = datetime.datetime.now().strftime("%y%m%d%H%M%S")
bak_filename = f"README.{bak_datetime}.md"
if not (os.path.exists(".bak") and os.path.isdir(".bak")):
print("Action : Making .bak directory")
os.mkdir(".bak")
bak_filepath = os.path.join(".bak", bak_filename)
print(f"Action : Backing up old README.md ===> {bak_filepath}")
os.rename("README.md", os.path.join(".bak", bak_filename))
print(f"Action : == APPLYING CHANGES ==")
page.dump("README.md")
print(f"Note : A summary of changes -")
with open(bak_filepath) as bak_file:
with open("README.md") as new_file:
diff = difflib.ndiff(bak_file.readlines(),
new_file.readlines())
delta = [x.rstrip() for x in diff if x.startswith("+ ") or
x.startswith("- ")]
print()
for line in delta:
print(line)
def do_ask(self, date_str):
ro = REGEX_OBJ_DICT["yy/mm/dd"]
y, m, d = ro.search(date_str).groups()
y = int(y) + (2000 if int(y) < 100 else 0)
m = int(m)
d = int(d)
try:
x = datetime.datetime(y, m, d)
except ValueError as e:
print("Error:", e)
return
print(x.strftime("%d %B %Y is %A."))
os = platform.system()
if os in {"Darwin", "Windows"}:
a = x.strftime("%a")
y = y % 100
res = f"{y:02d}/{m:02d}/{d:02d} = {a}"
if platform.system() == "Darwin":
cmd = "pbcopy"
elif platform.system() == "Windows":
cmd = "clip"
subprocess.run(cmd, universal_newlines=True, input=res)
print(f"\"{res}\" has been copied to your clipboard.")
def do_log(self):
records = []
search_dirs = CODE_DIRS
topics_map = {}
key_topics_map = {}
for sd in search_dirs:
for filename in os.listdir(sd):
_, ext = os.path.splitext(filename)
if ext != get_ext_from_lang(sd):
continue
filepath = os.path.join(sd, filename)
rec = scan(filepath)
if not rec.problem:
print(f"Warning: Problem title not found in "
f"{rec.filepath}")
continue
topics_map.setdefault(rec.problem.num, set())
for t in rec.topics:
topics_map[rec.problem.num].add(t)
key_topics_map.setdefault(rec.problem.num, set())
for kt in rec.key_topics:
key_topics_map[rec.problem.num].add(kt)
if rec.dates:
if ((not self.args.lang_filter or
rec.lang in self.args.lang_filter) and (
not self.args.difficulty_filter or
rec.problem.difficulty_level in
self.args.difficulty_filter) and (
not self.args.topic_filter or (
set(rec.topics) &
set(self.args.topic_filter))) and (
not self.args.key_topic_filter or (
set(rec.key_topics) &
set(self.args.key_topic_filter)))):
records.append(rec)
for num in topics_map:
topics_map[num] = sorted(list(topics_map[num]))
for num in key_topics_map:
key_topics_map[num] = [x + "*" for x in
sorted(list(key_topics_map[num]))]
submissions = []
for rec in records:
for i, d in enumerate(rec.dates):
new = i == len(rec.dates) - 1
if ((self.args.add_new and new) or (
self.args.add_old and not new) or (
not self.args.add_new and not self.args.add_old)):
sub = Submission(d, rec.problem, rec.lang, new)
submissions.append(sub)
submissions.sort()
print()
for su in submissions:
line = ["*" if su.new else " ", str(su.date), repr(su.problem)]
if self.args.show_key_topics:
line.append(f"{key_topics_map[su.problem.num]}")
if self.args.show_topics:
line.append(f"{topics_map[su.problem.num]}")
if su.lang != "c++":
line.append(f"[{su.lang.title()}]")
print(" ".join(line))
print()
print(f"There are {len(submissions)} qualified submissions from " +
f"{len(records)} dated source code files.")
def do_status(self, full_list):
search_dirs = ["."] + CODE_DIRS
missing_dates = []
missing_topics = []
missing_key_topics = []
inconsistent_topics_set = set()
inconsistent_key_topics_set = set()
record_map = {}
for sd in search_dirs:
for filename in sorted(os.listdir(sd)):
filepath = os.path.join(sd, filename)
if not os.path.isfile(filepath):
continue
rec = scan(filepath)
if not rec:
continue
if not rec.problem:
print(f"Warning: Problem title not found in "
f"{rec.filepath}")
continue
if not rec.dates:
missing_dates.append(rec.filepath)
if not rec.topics:
missing_topics.append(rec.filepath)
elif not rec.key_topics:
missing_key_topics.append(rec.filepath)
record_map.setdefault(rec.problem.num, [])
record_map[rec.problem.num].append(rec)
record_map = dict(sorted(record_map.items()))
bad_num = set()
for num in record_map:
records = record_map[num]
problem_set = set(rec.problem for rec in records)
if len(problem_set) != 1:
print(f"Error : Multiple titles for Problem #{num}.")
for rec in records:
print(" " * 10 +
f"- {rec.filepath}: \"{repr(rec.problem)}\"")
bad_num.add(num)
for lang in set([rec.lang for rec in records]):
lang_set = set([rec for rec in records if rec.lang == lang])
if len(lang_set) != 1:
print(f"Error : Multiple {lang} candidates for Problem "
f"{rec.problem}.")
for rec in lang_set:
print(" " * 10 + f"- {rec.filepath}")
bad_num.add(num)
for b in bad_num:
del record_map[b]
for num in record_map:
topics_set = set([t for rec in record_map[num]
for t in rec.topics])
key_topics_set = set([kt for rec in record_map[num]
for kt in rec.key_topics])
for rec in record_map[num]:
target_basename = get_basename_from_problem(rec.problem)
target_filename = target_basename + get_ext_from_lang(rec.lang)
target_dir = rec.lang
target_filepath = os.path.join(target_dir, target_filename)
if rec.filepath != target_filepath:
print(f"Warning: Do you want to move \"{rec.filepath}\" "
f"===> \"{target_filepath}\"?")
if rec.topics and set(rec.topics) != topics_set:
inconsistent_topics_set.add(num)
if rec.key_topics and set(rec.key_topics) != key_topics_set:
inconsistent_key_topics_set.add(num)
t_search_dirs = [".", TESTCASE_DIR]
for td in t_search_dirs:
for filename in os.listdir(td):
filepath = os.path.join(td, filename)
basename, ext = os.path.splitext(filename)
if not os.path.isfile(filepath) or ext != ".txt":
if td == TESTCASE_DIR:
print(f"Warning: What is \"{filepath}\"? Expect only "
f"txt files in {TESTCASE_DIR}.")
continue
try:
guess_num = 0
if basename[:4].isnumeric():
guess_num = int(basename[:4])
elif basename[:3].isnumeric():
guess_num = int(basename[:3])
elif basename[:2].isnumeric():
guess_num = int(basename[:2])
elif basename[:1].isnumeric():
guess_num = int(basename[:1])
assert guess_num
expected_basename = get_basename_from_problem(
record_map[guess_num][0].problem)
expected_filepath = os.path.join(TESTCASE_DIR,
expected_basename +
".txt")
if expected_filepath != filepath:
print(f"Warning: Do you want to move \"{filepath}\" "
f"===> \"{expected_filepath}\"?")
except Exception as e:
print(f"Warning: Is \"{filepath}\" a testcases file? "
f"If so please name it properly.")
if not full_list:
return
if missing_dates:
print("Warning: --- Missing Dates ---")
for filepath in missing_dates:
print(" " * 10 + "-", filepath)
if missing_topics:
print("Warning: --- Missing Topics ---")
for filepath in missing_topics:
print(" " * 10 + "-", filepath)
if missing_key_topics:
print("Warning: --- Having Topics But Missing Key Topics ---")
for filepath in missing_key_topics:
print(" " * 10 + "-", filepath)
if inconsistent_topics_set:
print("Warning: --- Having Topics But Inconsistent Across "
"Languages ---")
for num in inconsistent_topics_set:
print(" " * 10 + "+", record_map[num][0].problem)
for rec in record_map[num]:
print(" " * 12 + "-", rec.filepath, rec.topics)
if inconsistent_key_topics_set:
print("Warning: --- Having Key Topics But Inconsistent Across "
"Languages ---")
for num in inconsistent_key_topics_set:
print(" " * 10 + "+", record_map[num][0].problem)
for rec in record_map[num]:
print(" " * 12 + "-", rec.filepath, rec.topics)
# --- main --------------------------------------------------------------------
def main():
prog = Program()
prog.run()
if __name__ == "__main__":
main()
```
#### File: leetcode/scripts/mo.py
```python
import bisect
import io
import os
import sys
class Section:
def __init__(self, level=0, name="", father=None, children=None):
"""
Why children=None instead of children=[]?
Given
def foo(a=[]):
a.append(1)
print(a)
Can you predict the output?
> foo()
> foo()
> foo([2])
> foo([3])
> foo()
"""
self.level = level
self.name = name
self.father = father
self.children = [] if not children else children
def __iter__(self):
return SectionIterator(self)
def __repr__(self):
return f"({self.level}:{len(self.children)}) {self.name}"
def __str__(self):
prefix = "" if self.level != 7 else "#" * self.level
return f"{prefix} {self.name}"
class SectionIterator:
def __init__(self, section):
self.sec_stack = [section]
self.pos_stack = [0]
self.head_todo = bool(section.level)
def _has_next(self):
while self.sec_stack:
sec = self.sec_stack[-1]
pos = self.pos_stack[-1]
if self.head_todo:
return True
if pos == len(sec.children):
self.sec_stack.pop()
self.pos_stack.pop()
else:
if isinstance(sec.children[pos], str):
return True
else:
self.pos_stack[-1] += 1
self.sec_stack.append(sec.children[pos])
self.pos_stack.append(0)
self.head_todo = True
return False
def __next__(self):
if not self._has_next():
raise StopIteration
sec = self.sec_stack[-1]
pos = self.pos_stack[-1]
if self.head_todo:
level = sec.level
line = sec.name
fat = sec.father
pos = self.pos_stack[-2] - 1
self.head_todo = False
else:
level = 7
line = sec.children[pos]
fat = sec
self.pos_stack[-1] += 1
return level, line, fat, pos
def parse_line(line):
count = 0
while count != len(line) and line[count] == '#':
count += 1
if 1 <= count <= 6 and count < len(line) and line[count] == ' ':
level = count
text = line[count + 1:]
else:
level = 7
text = line
return level, text
class Page:
def __init__(self, filename=None):
self.root = Section()
if filename:
self.load(filename)
def get_father(self, prev, level):
while prev.level and level <= prev.level:
prev = prev.father
return prev
def load(self, filename):
with open(filename) as file:
prev = self.root
for line in file:
line = line.rstrip()
level, text = parse_line(line)
fat = self.get_father(prev, level)
if level == 7:
fat.children.append(text)
else:
sec = Section(level, text, fat)
fat.children.append(sec)
prev = sec
def dump(self, filename, newline="\n", override=False):
if not override and os.path.exists(filename):
raise FileExistsError
with io.open(filename, "w", newline=newline) as file:
for level, line, fat, pos in self:
if level != 7:
print("#" * level, end=" ", file=file)
print(line, file=file)
def __iter__(self):
return SectionIterator(self.root)
def add_sections(add_set, fat):
is_section = [isinstance(x, Section) for x in fat.children]
beg = bisect.bisect(is_section, False)
cur_set = set(x.name for x in fat.children[beg:])
diff_set = add_set - cur_set
if not diff_set:
return False
lev = min(fat.level + 1, 6)
add = [Section(level=lev, name=x, father=fat, children=[""])
for x in diff_set]
fat.children.extend(add)
fat.children[beg:] = sorted(fat.children[beg:], key=lambda x: x.name)
return True
def add_line(line, fat, key):
is_section = [isinstance(x, Section) for x in fat.children]
end = bisect.bisect(is_section, False)
values = [key(x) for x in fat.children[:end]]
add_val = key(line)
pos = bisect.bisect_left(values, add_val)
if pos != end:
if fat.children[pos] == line:
return False
elif values[pos] == add_val:
fat.children[pos] = line
else:
fat.children.insert(pos, line)
else:
fat.children.append(line)
fat.children.append("")
return True
``` |
{
"source": "aafulei/sublime-adapted-commands",
"score": 3
} |
#### File: aafulei/sublime-adapted-commands/default_folder_for_new_file.py
```python
import os
# sublime
import sublime
import sublime_plugin
class DefaultFolderForNewFile(sublime_plugin.EventListener):
"""Set default folder to save a new file."""
def on_new_async(self, view):
try:
# set new file's default folder same as that of last active file
active_view = view.window().active_view()
active_index = view.window().views().index(active_view)
last_view = view.window().views()[active_index-1]
file = last_view.file_name()
dir_path = os.path.dirname(file)
active_view.settings().set("default_dir", dir_path)
except Exception as e:
try:
# set new file's default folder the first opened folder
view.settings().set("default_dir", view.window().folders()[0])
except Exception as e:
pass
``` |
{
"source": "aafulei/sublime-expand-selection-to-line-upwards",
"score": 2
} |
#### File: aafulei/sublime-expand-selection-to-line-upwards/expand_selection_to_line_upwards.py
```python
import sublime
import sublime_plugin
class ExpandSelectionToLineAtomicCommand(sublime_plugin.TextCommand):
def run(self, edit, upwards):
for region in self.view.sel():
region_begin = region.begin()
region_end = region.end()
line_begin = self.view.line(region_begin).begin()
line_end = self.view.line(region_end).end()
# expand to one line above / below if this line has been covered
covered = line_begin == region_begin and line_end == region_end
if upwards:
line_begin = self.view.line(region_begin - covered).begin()
new_region = sublime.Region(line_end, line_begin)
else:
line_end = self.view.line(region_end + covered).end()
new_region = sublime.Region(line_begin, line_end)
self.view.sel().add(new_region)
class ExpandSelectionToLineUpwardsCommand(sublime_plugin.TextCommand):
def run(self, edit):
def callback():
cmd = "expand_selection_to_line_atomic"
args = {"upwards": True}
self.view.run_command(cmd, args)
# enable soft undo line by line
delay = 0
sublime.set_timeout(callback, delay)
class ExpandSelectionToLineDownwardsCommand(sublime_plugin.TextCommand):
def run(self, edit):
def callback():
cmd = "expand_selection_to_line_atomic"
args = {"upwards": False}
self.view.run_command(cmd, args)
# enable soft undo line by line
delay = 0
sublime.set_timeout(callback, delay)
``` |
{
"source": "aafulei/sublime-user-settings",
"score": 3
} |
#### File: sublime-user-settings/scripts/align-keymap.py
```python
import argparse
import os
import platform
import shutil
# self
import common
MACOS = "Default (OSX).sublime-keymap"
LINUX = "Default (Linux).sublime-keymap"
WINDOWS = "Default (Windows).sublime-keymap"
# platform.system() returns Linux, Darwin, Java, Windows or an empty string
SYSTEM = platform.system()
if SYSTEM == "Darwin":
SYSTEM = "macOS"
TO_SYS = "Windows"
SOURCE = MACOS
TARGET = WINDOWS
elif SYSTEM == "Windows":
TO_SYS = "macOS"
SOURCE = WINDOWS
TARGET = MACOS
else:
SOURCE = TARGET = ""
ERROR = f"Unsupported system \"{SYSTEM}\". Only supports macOS <---> Windows."
DESC = f"Align Linux and {TO_SYS} Keymaps with {SYSTEM} Keymap"
def _append(log, lineno, old, new):
def trim(line):
if len(line) < 100:
return line
return line[:96] + " ...\n"
log += "% Line {} (old) : {}".format(lineno, trim(old))
log += "% Line {} (new) : {}".format(lineno, trim(new))
return log
def main():
# 0. check target system and ask for confirmation
if not TARGET:
print(ERROR)
return
if not common.confirm(desc=DESC):
return
# 1. change working dir
os.chdir(os.path.join(os.path.dirname(__file__), ".."))
# 2. check file existence
if not common.check_existence([LINUX, MACOS, WINDOWS], required=[SOURCE]):
return
# 3. Create a temp target keymap
tmp = "{}.tmp".format(TARGET)
log = ""
with common.Prompt(f"% Create a temp {TO_SYS} keymap"):
with open(SOURCE, "r") as ifile:
with open(tmp, "w") as ofile:
for lineno, line in enumerate(ifile):
ls = line.strip()
if ls.startswith("//") and ls.endswith(TO_SYS):
new = line.replace("// ", "", 1)
log = _append(log, lineno, line, new)
ofile.write(new)
elif not ls.startswith("//") and ls.endswith(SYSTEM):
new = line.replace("{", "// {", 1)
log = _append(log, lineno, line, new)
ofile.write(new)
else:
ofile.write(line)
print(log, end="")
# 4. temp target keymap ===> target keymap
common.move(tmp, TARGET)
# 5. macOS keymap ===> Linux keymap
common.copy(MACOS, LINUX)
if __name__ == "__main__":
main()
``` |
{
"source": "AAFun/scikit-snowland",
"score": 3
} |
#### File: snowland/gis_tool/transformer.py
```python
import numpy as np
from pyproj import Transformer
def utm_transformer(latitude, longitude):
"""
4326转utm坐标系
"""
utm_x = np.empty_like(latitude)
utm_y = np.empty_like(longitude)
for i, (x, y) in enumerate(zip(longitude, latitude)):
zone_num = int(x / 6) + 31
utm_source_str = f"EPSG:326{zone_num}"
transformer = Transformer.from_crs("EPSG:4326", utm_source_str)
utm_x[i], utm_y[i] = transformer.transform(y, x)
return utm_x, utm_y
```
#### File: graphics/solid_geometry/_ploygon.py
```python
from snowland.graphics.core.computational_geometry_2d import Point2D, Polygon, PolygonWithoutHoles, LineString2D, ConvexPolygon, Rectangle
from snowland.graphics.core.computational_geometry_base import Vector
from snowland.graphics.core.analytic_geometry_2d import Line2D
import numpy as np
npl = np.linalg
npa = np.array
npr = np.random
def __in_polygon(p: Point2D, polygon):
"""
判断点是否在多边形内(不包含边上)
:param point:
:param polygon:
:return:
"""
for point in polygon.p:
if p == point: # 使用重载的等于方法
return True
for hole in polygon.holes:
for point in hole:
if p == point: # 使用重载的等于方法
return True
count = 0
flag = False
for i, point in enumerate(polygon.p[:, :]): # 注意,这个i取0的时候,i-1值是-1,所以这个是循环到所有的边
if (point[1] <= p[1] < polygon.p[i - 1, 1] or polygon.p[i - 1, 1] <= p[1] < point[1]) and \
(p[0] < (polygon.p[i - 1, 0] - point[0]) * (p[1] - point[1]) / (polygon.p[i - 1, 1] - point[1]) + point[
1]):
flag = not flag
count += 1
for hole in polygon.holes:
polygon_temp = PolygonWithoutHoles(hole)
for i, point in enumerate(polygon_temp.p[:, :]): # 注意,这个i取0的时候,i-1值是-1,所以这个是循环到所有的边
if (point[1] <= p[1] < polygon_temp.p[i - 1, 1] or polygon_temp.p[i - 1, 1] <= p[1] < point[1]) and \
(p[0] < (polygon_temp.p[i - 1, 0] - point[0]) *
(p[1] - point[1]) / (polygon_temp.p[i - 1, 1] - point[1]) + point[1]):
flag = not flag
count += 1
return flag
def in_polygon(point, polygon: Polygon):
"""
判断点是否在多边形内
:param point:
:param polygon:
:return:
"""
if isinstance(point, Point2D):
return __in_polygon(point, polygon)
if isinstance(point, np.ndarray) and len(point.shape) == 1:
return __in_polygon(Point2D(point), polygon)
else:
# 多个点判断, 返回多个值
if isinstance(point[0], Point2D):
return npa([__in_polygon(p, polygon) for p in point])
if isinstance(point[0], np.ndarray) and len(point.shape) == 1:
return npa([__in_polygon(Point2D(p), polygon) for p in point])
raise ValueError('error')
# 使用Graham扫描法计算凸包
# 网上的代码好多运行效果并不好
# 算法参见《算法导论》第三版 第605页
# https://blog.csdn.net/john_bian/article/details/85221039
def get_bottom_point(points):
"""
返回points中纵坐标最小的点的索引,如果有多个纵坐标最小的点则返回其中横坐标最小的那个
:param points:
:return:
"""
min_index = 0
n = len(points)
for i in range(0, n):
if points[i][1] < points[min_index][1] or (
points[i][1] == points[min_index][1] and points[i][0] < points[min_index][0]):
min_index = i
return min_index
def sort_polar_angle_cos(points, center_point: Point2D):
"""
按照与中心点的极角进行排序,使用的是余弦的方法
:param points: 需要排序的点
:param center_point: 中心点
:return:
"""
n = len(points)
cos_value = []
rank = []
norm_list = []
for i in range(0, n):
point_ = points[i]
point = [point_[0] - center_point[0], point_[1] - center_point[1]]
rank.append(i)
norm_value = npl.norm(point)
norm_list.append(norm_value)
if norm_value == 0:
cos_value.append(1)
else:
cos_value.append(point[0] / norm_value)
for i in range(0, n - 1):
index = i + 1
while index > 0:
if cos_value[index] > cos_value[index - 1] or (
cos_value[index] == cos_value[index - 1] and norm_list[index] > norm_list[index - 1]):
temp = cos_value[index]
temp_rank = rank[index]
temp_norm = norm_list[index]
cos_value[index] = cos_value[index - 1]
rank[index] = rank[index - 1]
norm_list[index] = norm_list[index - 1]
cos_value[index - 1] = temp
rank[index - 1] = temp_rank
norm_list[index - 1] = temp_norm
index = index - 1
else:
break
sorted_points = []
for i in rank:
sorted_points.append(points[i])
return sorted_points
def vector_angle(vector):
"""
返回一个向量与向量 [1, 0]之间的夹角, 这个夹角是指从[1, 0]沿逆时针方向旋转多少度能到达这个向量
:param vector:
:return:
"""
norm_ = npl.norm(vector)
if norm_ == 0:
return 0
angle = np.arccos(vector[0] / norm_)
if vector[1] >= 0:
return angle
else:
return 2 * np.pi - angle
def coss_multi(v1, v2):
"""
计算两个向量的叉乘
:param v1:
:param v2:
:return:
"""
return v1[0] * v2[1] - v1[1] * v2[0]
def graham_scan(points):
# print("Graham扫描法计算凸包")
bottom_index = get_bottom_point(points)
bottom_point = points.pop(bottom_index)
sorted_points = sort_polar_angle_cos(points, bottom_point)
m = len(sorted_points)
if m < 2:
print("点的数量过少,无法构成凸包")
return
stack = []
stack.append(bottom_point)
stack.append(sorted_points[0])
stack.append(sorted_points[1])
for i in range(2, m):
length = len(stack)
top = stack[length - 1]
next_top = stack[length - 2]
v1 = [sorted_points[i][0] - next_top[0], sorted_points[i][1] - next_top[1]]
v2 = [top[0] - next_top[0], top[1] - next_top[1]]
while coss_multi(v1, v2) >= 0:
stack.pop()
length = len(stack)
top = stack[length - 1]
next_top = stack[length - 2]
v1 = [sorted_points[i][0] - next_top[0], sorted_points[i][1] - next_top[1]]
v2 = [top[0] - next_top[0], top[1] - next_top[1]]
stack.append(sorted_points[i])
return ConvexPolygon(stack)
``` |
{
"source": "aafusam/Phono-Music-Bot",
"score": 2
} |
#### File: mbot/plugins/greetings.py
```python
from datetime import datetime
from pyrogram import filters
from pyrogram.types import InlineKeyboardButton,InlineKeyboardMarkup
from pyrogram.raw.functions import Ping
from mbot import LOG_GROUP, OWNER_ID, SUDO_USERS, Mbot,AUTH_CHATS
from os import execvp,sys
@Mbot.on_message(filters.command("start"))
async def start(client,message):
reply_markup = [[
InlineKeyboardButton(
text="Our Channel", url="https://t.me/StarterNetworkz"),
InlineKeyboardButton(
text="Movies Group",
url="https://t.me/+f6dvEmEpKAwwODM1"),
InlineKeyboardButton(text="Developer",
url="https://t.me/AafuSam013"),
],
[
InlineKeyboardButton(text="Help",callback_data="helphome")
]]
if LOG_GROUP:
invite_link = await client.create_chat_invite_link(chat_id=(int(LOG_GROUP) if str(LOG_GROUP).startswith("-100") else LOG_GROUP))
reply_markup.append([InlineKeyboardButton("LOG Channel", url=invite_link.invite_link)])
if message.chat.type != "private" and message.chat.id not in AUTH_CHATS and message.from_user.id not in SUDO_USERS:
return await message.reply_text("This Bot Will Not Work In Groups Unless It's Authorized.",
reply_markup=InlineKeyboardMarkup(reply_markup))
return await message.reply_text(f"👋Hello {message.from_user.first_name}, 🎧I'm A Simple Telegram Bot To Help You To Download Music From Many Platforms. Currently I Only Support YouTube, Spotify And Deezer!🎧",
reply_markup=InlineKeyboardMarkup(reply_markup))
@Mbot.on_message(filters.command("restart") & filters.chat(OWNER_ID) & filters.private)
async def restart(_,message):
await message.delete()
execvp(sys.executable,[sys.executable,"-m","mbot"])
@Mbot.on_message(filters.command("log") & filters.chat(SUDO_USERS))
async def send_log(_,message):
await message.reply_document("bot.log")
@Mbot.on_message(filters.command("ping"))
async def ping(client,message):
start = datetime.now()
await client.send(Ping(ping_id=0))
ms = (datetime.now() - start).microseconds / 1000
await message.reply_text(f"**Pong!**\nResponse time: `{ms} ms`")
HELP = {
"Youtube": "Cool😉, Now Send Your **Youtube** Music/Video Link To Download",
"Spotify": "Cool😉, Now Send Your **Spotify** Track/Playlist/Album/Show/Episode's Link To Download",
"Deezer": "Cool😉, Now Send Your **Deezer** Playlist/Album/Track Link To Download",
"Jiosaavn": "Sorry☹️, Currently Not Available",
"SoundCloud": "Sorry☹️, Currently Not Available",
"Support": "@AafuSam013"
}
@Mbot.on_message(filters.command("help"))
async def help(_,message):
button = [
[InlineKeyboardButton(text=i, callback_data=f"help_{i}")] for i in HELP
]
await message.reply_text(f"👋Hello **{message.from_user.first_name}**, I'm **@PhonoMusicBot**.\n📢Select Below Platforms From Which You Want To Download Music",
reply_markup=InlineKeyboardMarkup(button))
@Mbot.on_callback_query(filters.regex(r"help_(.*?)"))
async def helpbtn(_,query):
i = query.data.replace("help_","")
button = InlineKeyboardMarkup([[InlineKeyboardButton("Back",callback_data="helphome")]])
text = f"Help for **{i}**\n\n{HELP[i]}"
await query.message.edit(text = text,reply_markup=button)
@Mbot.on_callback_query(filters.regex(r"helphome"))
async def help_home(_,query):
button = [
[InlineKeyboardButton(text=i, callback_data=f"help_{i}")] for i in HELP
]
await query.message.edit(f"👋Hello **{query.from_user.first_name}**, I'm **@PhonoMusicBot**.\n📢Select Below Platforms From Which You Want To Download Music",
reply_markup=InlineKeyboardMarkup(button))
``` |
{
"source": "aag09/azurecli",
"score": 2
} |
#### File: windows/scripts/build-packages.py
```python
from __future__ import print_function
import glob
import os
import sys
import tempfile
import subprocess
def _error_exit(msg):
print('ERROR: '+msg, file=sys.stderr)
sys.exit(1)
def _print_status(msg=''):
print('-- '+msg)
def _get_tmp_dir():
return tempfile.mkdtemp()
def _get_tmp_file():
return tempfile.mkstemp()[1]
def _exec_command(command_list, cwd=None, stdout=None):
"""Returns True in the command was executed successfully"""
try:
_print_status('Executing {}'.format(command_list))
subprocess.check_call(command_list, stdout=stdout, cwd=cwd)
return True
except subprocess.CalledProcessError as err:
print(err, file=sys.stderr)
return False
def _build_package(path_to_package, dist_dir):
cmd_success = _exec_command(['python', 'setup.py', 'bdist_wheel', '-d', dist_dir], cwd=path_to_package)
if not cmd_success:
_error_exit('Error building {}.'.format(path_to_package))
def build_packages(clone_root, dist_dir):
packages_to_build = [
os.path.join(clone_root, 'src', 'azure-cli'),
os.path.join(clone_root, 'src', 'azure-cli-core'),
os.path.join(clone_root, 'src', 'azure-cli-nspkg'),
os.path.join(clone_root, 'src', 'azure-cli-command_modules-nspkg'),
]
packages_to_build.extend(glob.glob(os.path.join(clone_root, 'src', 'command_modules', 'azure-cli-*')))
for p in packages_to_build:
if os.path.isfile(os.path.join(p, 'setup.py')):
_build_package(p, dist_dir)
if __name__ == '__main__':
if len(sys.argv) == 1:
raise ValueError('Please provide temporary path for local built packages')
dist_dir = sys.argv[1]
clone_root = sys.argv[2]
build_packages(clone_root, dist_dir)
print("package were built to {}".format(dist_dir))
print("Done.")
```
#### File: command_modules/batchai/_params.py
```python
from enum import Enum
from azure.cli.command_modules.vm._actions import get_vm_sizes
from azure.cli.core.commands.parameters import (
ignore_type, location_type, resource_group_name_type, enum_choice_list, get_one_of_subscription_locations)
from azure.cli.core.sdk.util import ParametersContext
from azure.mgmt.storage.models import SkuName
def get_vm_size_completion_list(prefix, action, parsed_args, **kwargs): # pylint: disable=unused-argument
try:
location = parsed_args.location
except AttributeError:
location = get_one_of_subscription_locations()
result = get_vm_sizes(location)
return [r.name for r in result]
class SupportedImages(Enum): # pylint: disable=too-few-public-methods
ubuntu_tls = "UbuntuLTS"
ubuntu_dsvm = "UbuntuDSVM"
with ParametersContext(command='batchai cluster create') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.argument('location', options_list=('--location', '-l'), arg_type=location_type,
help='Location. You can configure the default location using `az configure --defaults '
'location=<location>` or specify it in the cluster configuration file.')
c.register_alias('cluster_name', options_list=('--name', '-n'), help='Name of the cluster.')
c.argument('user_name', options_list=('--user-name', '-u'),
help='Name of the admin user to be created on every compute node.', arg_group='Admin Account')
c.argument('ssh_key', options_list=('--ssh-key', '-k'),
help='SSH public key value or path.', arg_group='Admin Account')
c.argument('password', options_list=('--password', '-p'),
help='Password.', arg_group='Admin Account')
c.argument('image', options_list=('--image', '-i'), arg_group='Nodes',
help='Operation system.', **enum_choice_list(SupportedImages))
c.argument('vm_size', options_list=('--vm-size', '-s'),
help='VM size (e.g. Standard_NC6 for 1 GPU node)', completer=get_vm_size_completion_list,
arg_group='Nodes')
c.argument('min_nodes', options_list=('--min',),
help='Min nodes count.', type=int, arg_group='Nodes')
c.argument('max_nodes', options_list=('--max',),
help='Max nodes count.', type=int, arg_group='Nodes')
c.argument('nfs_name', options_list=('--nfs',),
help='Name of a file server to mount. If you need to mount more than one file server, configure them in '
'a configuration file and use --config option.',
arg_group='File Server Mount')
c.argument('nfs_resource_group', options_list=('--nfs-resource-group',),
help='Resource group in which file server is created. Can be omitted if the file server and the cluster '
'belong to the same resource group',
arg_group='File Server Mount')
c.argument('nfs_mount_path', options_list=('--nfs-mount-path',),
help='Relative mount path for nfs. The nfs will be available at '
'$AZ_BATCHAI_MOUNT_ROOT/<relative_mount_path> folder.',
arg_group='File Server Mount')
c.argument('azure_file_share', options_list=('--afs-name',),
help='Name of the azure file share to mount. Please provide AZURE_BATCHAI_STORAGE_ACCOUNT and '
'AZURE_BATCHAI_STORAGE_KEY environment variables or add batchai/storage_key and '
'batchai/storage_account values to az configuration file containing storage account name and key.',
arg_group='Azure File Share Mount')
c.argument('afs_mount_path', options_list=('--afs-mount-path',),
help='Relative mount path for Azure File share. The file share will be available at '
'$AZ_BATCHAI_MOUNT_ROOT/<relative_mount_path> folder. If you need to mount more than one Azure '
'Storage container, configure them in a configuration file and use --config option.',
arg_group='Azure File Share Mount')
c.argument('container_name', options_list=('--container-name',),
help='Name of Azure Storage container to mount. Please provide AZURE_BATCHAI_STORAGE_ACCOUNT and '
'AZURE_BATCHAI_STORAGE_KEY environment variables or add batchai/storage_key and '
'batchai/storage_account values to az configuration file containing storage account name and key. '
'If you need to mount more than one Azure Storage container, configure them in a configuration '
'file and use --config option.',
arg_group='Azure Storage Container Mount')
c.argument('container_mount_path', options_list=('--container-mount-path',),
help='Relative mount path for Azure Storage container. The container will be available at '
'$AZ_BATCHAI_MOUNT_ROOT/<relative_mount_path> folder.',
arg_group='Azure Storage Container Mount')
c.argument('json_file', options_list=('--config', '-c'),
help='A path to a json file containing cluster create parameters '
'(json representation of azure.mgmt.batchai.models.ClusterCreateParameters).',
arg_group='Advanced')
with ParametersContext(command='batchai cluster resize') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.register_alias('cluster_name', options_list=('--name', '-n'), help='Name of the cluster.')
c.argument('target', options_list=('--target', '-t'), help='Target number of compute nodes.')
with ParametersContext(command='batchai cluster auto-scale') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.register_alias('cluster_name', options_list=('--name', '-n'), help='Name of the cluster.')
c.argument('min_nodes', options_list=('--min',), help='Minimum number of nodes.')
c.argument('max_nodes', options_list=('--max',), help='Maximum number of nodes.')
with ParametersContext(command='batchai cluster delete') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.register_alias('cluster_name', options_list=('--name', '-n'), help='Name of the cluster.')
with ParametersContext(command='batchai cluster show') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.register_alias('cluster_name', options_list=('--name', '-n'), help='Name of the cluster.')
with ParametersContext(command='batchai cluster list') as c:
c.argument('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
# Not implemented yet
c.register_alias('clusters_list_options', options_list=('--clusters-list-options',), arg_type=ignore_type)
with ParametersContext(command='batchai cluster list-nodes') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.register_alias('cluster_name', options_list=('--name', '-n'), help='Name of the cluster.')
with ParametersContext(command='batchai job create') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.argument('location', options_list=('--location', '-l'), arg_type=location_type,
help='Location. You can configure the default location using `az configure --defaults '
'location=<location>` or specify it in the job configuration file.')
c.register_alias('job_name', options_list=('--name', '-n'), help='Name of the job.')
c.argument('json_file', options_list=('--config', '-c'),
help='A path to a json file containing job create parameters '
'(json representation of azure.mgmt.batchai.models.JobCreateParameters).')
c.argument('cluster_name', options_list=('--cluster-name',),
help='If specified, the job will run on the given cluster instead of the '
'one configured in the json file.')
c.argument('cluster_resource_group', options_list=('--cluster-resource-group',),
help='Specifies a resource group for the cluster given with --cluster-name parameter. '
'If omitted, --resource-group value will be used.')
with ParametersContext(command='batchai job terminate') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.register_alias('job_name', options_list=('--name', '-n'), help='Name of the job.')
with ParametersContext(command='batchai job delete') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.register_alias('job_name', options_list=('--name', '-n'), help='Name of the job.')
with ParametersContext(command='batchai job show') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.register_alias('job_name', options_list=('--name', '-n'), help='Name of the job.')
with ParametersContext(command='batchai job list') as c:
c.argument('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
# Not implemented yet
c.register_alias('jobs_list_options', options_list=('--jobs-list-options',), arg_type=ignore_type)
with ParametersContext(command='batchai job list-nodes') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.register_alias('job_name', options_list=('--name', '-n'), help='Name of the job.')
with ParametersContext(command='batchai job list-files') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.register_alias('job_name', options_list=('--name', '-n'), help='Name of the job.')
c.register_alias('directory', options_list=('--output-directory-id', '-d'),
help='The Id of the Job output directory (as specified by "id" element in outputDirectories '
'collection in job create parameters). Use "stdouterr" to access job stdout and stderr '
'files.')
with ParametersContext(command='batchai job stream-file') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.register_alias('job_name', options_list=('--job-name', '-j'), help='Name of the job.')
c.register_alias('directory', options_list=('--output-directory-id', '-d'),
help='The Id of the Job output directory (as specified by "id" element in outputDirectories '
'collection in job create parameters). Use "stdouterr" to access job stdout and stderr '
'files.')
c.argument('file_name', options_list=('--name', '-n'), help='The name of the file to stream.')
with ParametersContext(command='batchai file-server create') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.argument('location', options_list=('--location', '-l'), arg_type=location_type,
help='Location. You can configure the default location using `az configure --defaults '
'location=<location>` or specify it in the file server configuration file.')
c.register_alias('file_server_name', options_list=('--name', '-n'), help='Name of the file server.')
c.argument('vm_size', help='VM size.', completer=get_vm_size_completion_list)
c.argument('disk_count', help='Number of disks.', type=int, arg_group='Storage')
c.argument('disk_size', help='Disk size in Gb.', type=int, arg_group='Storage')
c.argument('storage_sku', help='The sku of storage account to persist VM.',
arg_group='Storage', **enum_choice_list(SkuName))
c.argument('user_name', options_list=('--admin-user-name', '-u'),
help='Name of the admin user to be created on every compute node.', arg_group='Admin Account')
c.argument('ssh_key', options_list=('--ssh-key', '-k'),
help='SSH public key value or path.', arg_group='Admin Account')
c.argument('password', options_list=('--password', '-p'), help='Password.', arg_group='Admin Account')
c.argument('json_file', options_list=('--config', '-c'),
help='A path to a json file containing file server create parameters (json representation of '
'azure.mgmt.batchai.models.FileServerCreateParameters). Note, parameters given via command line '
'will overwrite parameters specified in the configuration file.', arg_group='Advanced')
with ParametersContext(command='batchai file-server show') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.register_alias('file_server_name', options_list=('--name', '-n'), help='Name of the file server.')
with ParametersContext(command='batchai file-server delete') as c:
c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
c.register_alias('file_server_name', options_list=('--name', '-n'), help='Name of the file server.')
with ParametersContext(command='batchai file-server list') as c:
c.argument('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type)
# Not implemented yet
c.register_alias('file_servers_list_options', options_list=('--file-servers-list-options',), arg_type=ignore_type)
```
#### File: resource/tests/test_locks.py
```python
from time import sleep
import unittest
from azure.cli.testsdk import ScenarioTest, JMESPathCheck, ResourceGroupPreparer, record_only
from azure.cli.command_modules.resource.custom import _parse_lock_id
class ResourceLockTests(ScenarioTest):
def test_list_locks(self):
# just make sure this doesn't throw
self.cmd('az lock list').get_output_in_json()
@record_only()
def test_subscription_locks(self):
for lock_type in ['ReadOnly', 'CanNotDelete']:
lock_name = self.create_random_name('cli-test-lock', 48)
lock = self.cmd('az lock create -n {} --lock-type {}'.format(lock_name, lock_type)).get_output_in_json()
lock_id = lock.get('id')
self._sleep_for_lock_operation()
locks_list = self.cmd('az lock list').get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, [l['name'] for l in locks_list])
lock = self.cmd('az lock show -n {}'.format(lock_name)).get_output_in_json()
lock_from_id = self.cmd('az lock show --ids {}'.format(lock_id)).get_output_in_json()
self.assertEqual(lock.get('name', None), lock_name)
self.assertEqual(lock_from_id.get('name', None), lock_name)
self.assertEqual(lock.get('level', None), lock_type)
notes = self.create_random_name('notes', 20)
new_lvl = 'ReadOnly' if lock_type == 'CanNotDelete' else 'CanNotDelete'
lock = self.cmd('az lock update -n {} --notes {} --lock-type {}'
.format(lock_name, notes, new_lvl)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), new_lvl)
lock = self.cmd('az lock update --ids {} --lock-type {}'
.format(lock_id, lock_type)).get_output_in_json()
self.assertEqual(lock.get('level', None), lock_type)
self.cmd('az lock delete -n {}'.format(lock_name))
self._sleep_for_lock_operation()
@ResourceGroupPreparer(name_prefix='cli_test_readonly_resource_group_lock')
def test_readonly_resource_group_lock(self, resource_group):
self._lock_operation_with_resource_group('ReadOnly', resource_group)
@ResourceGroupPreparer(name_prefix='cli_test_cannotdelete_resource_group_lock')
def test_cannotdelete_resource_group_lock(self, resource_group):
self._lock_operation_with_resource_group('CanNotDelete', resource_group)
@ResourceGroupPreparer(name_prefix='cli_test_readonly_resource_lock')
def test_readonly_resource_lock(self, resource_group):
self._lock_operation_with_resource('ReadOnly', resource_group)
@ResourceGroupPreparer(name_prefix='cli_test_cannotdelete_resource_lock')
def test_cannotdelete_resource_lock(self, resource_group):
self._lock_operation_with_resource('CanNotDelete', resource_group)
def _lock_operation_with_resource_group(self, lock_type, resource_group):
lock_name = self.create_random_name('cli-test-lock', 48)
self.cmd('az lock create -n {} -g {} --lock-type {}'.format(lock_name, resource_group, lock_type))
self._sleep_for_lock_operation()
self.cmd('az lock show -g {} -n {}'.format(resource_group, lock_name)).assert_with_checks([
JMESPathCheck('name', lock_name),
JMESPathCheck('level', lock_type)])
locks_list = self.cmd("az lock list -g {} --query '[].name' -ojson".format(resource_group)).get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
notes = self.create_random_name('notes', 20)
new_lvl = 'ReadOnly' if lock_type == 'CanNotDelete' else 'CanNotDelete'
lock = self.cmd('az lock update -n {} -g {} --notes {} --lock-type {}'
.format(lock_name, resource_group, notes, new_lvl)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), new_lvl)
self.cmd('az lock delete -g {} -n {}'.format(resource_group, lock_name))
self._sleep_for_lock_operation()
def _lock_operation_with_resource(self, lock_type, resource_group):
rsrc_name = self.create_random_name('cli.lock.rsrc', 30)
rsrc_type = 'Microsoft.Network/virtualNetworks'
lock_name = self.create_random_name('cli-test-lock', 74)
self.cmd('az network vnet create -n {} -g {}'.format(rsrc_name, resource_group))
self.cmd('az lock create -n {} -g {} --resource-type {} --resource-name {} --lock-type {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name, lock_type))
self._sleep_for_lock_operation()
self.cmd('az lock show --name {} -g {} --resource-type {} --resource-name {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name)).assert_with_checks([
JMESPathCheck('name', lock_name),
JMESPathCheck('level', lock_type)])
locks_list = self.cmd("az lock list --query '[].name' -ojson").get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
notes = self.create_random_name('notes', 20)
new_lvl = 'ReadOnly' if lock_type == 'CanNotDelete' else 'CanNotDelete'
lock = self.cmd('az lock update -n {} -g {} --resource-type {} --resource-name {} --notes {} --lock-type {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name, notes, new_lvl)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), new_lvl)
self.cmd('az lock delete --name {} -g {} --resource-name {} --resource-type {}'
.format(lock_name, resource_group, rsrc_name, rsrc_type))
self._sleep_for_lock_operation()
@ResourceGroupPreparer(name_prefix='cli_test_group_lock')
def test_group_lock_commands(self, resource_group):
lock_name = self.create_random_name('cli-test-lock', 48)
self.cmd('group lock create -n {} -g {} --lock-type CanNotDelete'.format(lock_name, resource_group))
self._sleep_for_lock_operation()
self.cmd('group lock show -g {} -n {}'.format(resource_group, lock_name)).assert_with_checks([
JMESPathCheck('name', lock_name),
JMESPathCheck('level', 'CanNotDelete')]).get_output_in_json()
locks_list = self.cmd("group lock list -g {} --query [].name -ojson"
.format(resource_group)).get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
notes = self.create_random_name('notes', 20)
lock = self.cmd('group lock update -n {} -g {} --notes {} --lock-type ReadOnly'
.format(lock_name, resource_group, notes)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), 'ReadOnly')
self.cmd('group lock delete -g {} -n {}'.format(resource_group, lock_name))
self._sleep_for_lock_operation()
@ResourceGroupPreparer(name_prefix='cli_test_resource_lock')
def test_resource_lock_commands(self, resource_group):
rsrc_name = self.create_random_name('cli.lock.rsrc', 30)
rsrc_type = 'Microsoft.Network/virtualNetworks'
lock_name = self.create_random_name('cli-test-lock', 74)
lock_type = 'CanNotDelete'
self.cmd('network vnet create -n {} -g {}'.format(rsrc_name, resource_group))
self.cmd('resource lock create -n {} -g {} --resource-type {} --resource-name {} --lock-type {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name, lock_type))
self._sleep_for_lock_operation()
self.cmd('resource lock show --name {} -g {} --resource-type {} --resource-name {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name)).assert_with_checks([
JMESPathCheck('name', lock_name),
JMESPathCheck('level', lock_type)])
list_cmd = "resource lock list -g {} --resource-type {} --resource-name {} " \
"--query [].name -ojson".format(resource_group, rsrc_type, rsrc_name)
locks_list = self.cmd(list_cmd).get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
notes = self.create_random_name('notes', 20)
lock = self.cmd('resource lock update -n {} -g {} --resource-type {} --resource-name {} --notes {} '
'--lock-type ReadOnly'
.format(lock_name, resource_group, rsrc_type, rsrc_name, notes)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), 'ReadOnly')
self.cmd('resource lock delete --name {} -g {} --resource-name {} --resource-type {}'
.format(lock_name, resource_group, rsrc_name, rsrc_type))
self._sleep_for_lock_operation()
@record_only()
def test_subscription_locks(self):
lock_name = self.create_random_name('cli-test-lock', 48)
lock = self.cmd('az account lock create -n {} --lock-type CanNotDelete'.format(lock_name)).get_output_in_json()
lock_id = lock.get('id')
locks_list = self.cmd('az account lock list --query [].name').get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
lock = self.cmd('az account lock show -n {}'.format(lock_name)).get_output_in_json()
lock_from_id = self.cmd('az account lock show --ids {}'.format(lock_id)).get_output_in_json()
self.assertEqual(lock.get('name', None), lock_name)
self.assertEqual(lock_from_id.get('name', None), lock_name)
self.assertEqual(lock.get('level', None), 'CanNotDelete')
notes = self.create_random_name('notes', 20)
lock = self.cmd('az account lock update -n {} --notes {} --lock-type {}'
.format(lock_name, notes, 'ReadOnly')).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), 'ReadOnly')
lock = self.cmd('az account lock update --ids {} --lock-type {}'
.format(lock_id, 'CanNotDelete')).get_output_in_json()
self.assertEqual(lock.get('level', None), 'CanNotDelete')
self.cmd('az account lock delete -n {}'.format(lock_name))
@ResourceGroupPreparer(name_prefix='cli_test_lock_commands_with_ids')
def test_lock_commands_with_ids(self, resource_group):
vnet_name = self.create_random_name('cli-lock-vnet', 30)
subnet_name = self.create_random_name('cli-lock-subnet', 30)
group_lock_name = self.create_random_name('cli-test-lock', 50)
vnet_lock_name = self.create_random_name('cli-test-lock', 50)
subnet_lock_name = self.create_random_name('cli-test-lock', 20)
vnet = self.cmd('az network vnet create -n {} -g {}'.format(vnet_name, resource_group)).get_output_in_json()
subnetaddress = vnet.get('newVNet').get('addressSpace').get('addressPrefixes')[0]
self.cmd('az network vnet subnet create -n {} --address-prefix {} --vnet-name {} -g {}'
.format(subnet_name, subnetaddress, vnet_name, resource_group))
locks = []
locks.append(self.cmd('az lock create -n {} -g {} --lock-type CanNotDelete'
.format(group_lock_name, resource_group)).get_output_in_json())
locks.append(self.cmd('az lock create -n {} -g {} --resource-type Microsoft.Network/virtualNetworks'
' --resource-name {} --lock-type CanNotDelete'
.format(vnet_lock_name, resource_group, vnet_name)).get_output_in_json())
locks.append(self.cmd('az lock create -n {} -g {} --resource-name {} --resource-type subnets '
'--namespace Microsoft.Network --parent virtualNetworks/{} --lock-type CanNotDelete'
.format(subnet_lock_name, resource_group, subnet_name, vnet_name)).get_output_in_json())
self._sleep_for_lock_operation()
space_delimited_ids = ' '.join([lock.get('id', None) for lock in locks])
my_locks = self.cmd('az lock show --ids {} --query [].name'.format(space_delimited_ids)).get_output_in_json()
self.assertTrue(len(my_locks) == 3)
for lock in my_locks:
self.assertIn(lock, [group_lock_name, vnet_lock_name, subnet_lock_name])
my_locks = self.cmd('az lock update --ids {} --notes somenotes --lock-type ReadOnly'
.format(space_delimited_ids)).get_output_in_json()
self.assertTrue(len(my_locks) == 3)
for lock in my_locks:
self.assertEqual(lock.get('notes', None), 'somenotes')
self.assertEqual(lock.get('level', None), 'ReadOnly')
self.cmd('az lock delete --ids {}'.format(space_delimited_ids))
self._sleep_for_lock_operation()
my_locks = self.cmd("az lock list -g {} -ojson".format(resource_group)).get_output_in_json()
self.assertFalse(my_locks)
def _sleep_for_lock_operation(self):
if self.is_live:
sleep(5)
class ParseIdTests(unittest.TestCase):
def test_parsing_lock_ids(self):
tests = [
{
'input': "/subscriptions/subId/providers/"
"Microsoft.Authorization/locks/sublock",
'expected': {
'resource_group': None,
'resource_provider_namespace': None,
'parent_resource_path': None,
'resource_type': None,
'resource_name': None,
'lock_name': 'sublock'
}
},
{
'input': "/subscriptions/subId/resourceGroups/examplegroup/providers/"
"Microsoft.Authorization/locks/grouplock",
'expected': {
'resource_group': 'examplegroup',
'resource_provider_namespace': None,
'parent_resource_path': None,
'resource_type': None,
'resource_name': None,
'lock_name': 'grouplock'
}
},
{
'input': "/subscriptions/subId/resourcegroups/mygroup/providers/"
"Microsoft.Network/virtualNetworks/myvnet/providers/"
"Microsoft.Authorization/locks/vnetlock",
'expected': {
'resource_group': 'mygroup',
'resource_provider_namespace': 'Microsoft.Network',
'parent_resource_path': None,
'resource_type': 'virtualNetworks',
'resource_name': 'myvnet',
'lock_name': 'vnetlock'
}
},
{
'input': "/subscriptions/subId/resourceGroups/mygroup/providers/"
"Microsoft.Network/virtualNetworks/myvnet/subnets/subnet/providers/"
"Microsoft.Authorization/locks/subnetlock",
'expected': {
'resource_group': 'mygroup',
'resource_provider_namespace': 'Microsoft.Network',
'parent_resource_path': 'virtualNetworks/myvnet',
'resource_type': 'subnets',
'resource_name': 'subnet',
'lock_name': 'subnetlock'
}
},
{
'input': "/subscriptions/subId/resourceGroups/mygroup/providers/"
"Microsoft.Provider1/resourceType1/name1/providers/"
"Microsoft.Provider2/resourceType2/name2/providers/"
"Microsoft.Authorization/locks/somelock",
'expected': {
'resource_group': 'mygroup',
'resource_provider_namespace': 'Microsoft.Provider1',
'parent_resource_path': 'resourceType1/name1/providers/Microsoft.Provider2',
'resource_type': 'resourceType2',
'resource_name': 'name2',
'lock_name': 'somelock'
}
}
]
for test in tests:
kwargs = _parse_lock_id(test['input'])
self.assertDictEqual(kwargs, test['expected'])
fail_tests = [
"/notsubscriptions/subId/providers/Microsoft.Authorization/locks/sublock",
"/subscriptions/subId/notResourceGroups/examplegroup/providers/Microsoft.Authorization/locks/grouplock",
"/subscriptions/subId/resourceGroups/examplegroup/providers/Microsoft.NotAuthorization/not_locks/grouplock",
"/subscriptions/subId/resourcegroups/mygroup/Microsoft.Network/virtualNetworks/myvnet/providers/"
"Microsoft.Authorization/locks/missingProvidersLock",
"/subscriptions/subId/resourcegroups/mygroup/providers/Microsoft.Network/myvnet/providers/"
"Microsoft.Authorization/locks/missingRsrcTypeLock",
"/subscriptions/subId/providers/Microsoft.Network/virtualNetworks/myvnet/subnets/subnet/providers/"
"Microsoft.Authorization/locks/missingRsrcGroupLock",
"not_a_id_at_all"
]
for test in fail_tests:
with self.assertRaises(AttributeError):
_parse_lock_id(test)
if __name__ == '__main__':
unittest.main()
```
#### File: automation/release/packaged.py
```python
from __future__ import print_function
import argparse
import os
import sys
import tempfile
import tarfile
import shutil
import json
from subprocess import check_call, check_output
from .version_patcher import VersionPatcher
from ..utilities.path import get_all_module_paths, get_repo_root
REPO_ROOT_DIR = get_repo_root()
COMPLETION_FILE = os.path.join(REPO_ROOT_DIR, 'packaged_releases', 'az.completion')
ARCHIVE_FILE_TMPL = 'azure-cli_packaged_{}'
class Patch(object): # pylint: disable=too-few-public-methods
def __init__(self, src_of_patch, path_to_patch):
"""
- src: Relative path from the repo root
- dest: Relative path to file to patch in the packaged release
"""
self.src_of_patch = src_of_patch
self.path_to_patch = path_to_patch
def apply(self, working_dir):
src = os.path.join(REPO_ROOT_DIR, self.src_of_patch)
dest = os.path.join(working_dir, self.path_to_patch)
shutil.copy(src, dest)
PATCHES = [
]
def error_exit(msg):
print('ERROR: '+msg, file=sys.stderr)
sys.exit(1)
def _gen_tag(c_name, c_version):
return '{}-{}'.format(c_name, c_version)
def _verified_tags(components):
available_tags = check_output(['git', 'tag'], cwd=REPO_ROOT_DIR)
available_tags = str(available_tags, 'utf-8')
available_tags = available_tags.split()
for c_name, c_version in components:
t = _gen_tag(c_name, c_version)
if t not in available_tags:
print('Tag {} not found.'.format(t))
return False
return True
def create_packaged_archive(version, components, archive_dest=None, use_version_patch=True):
# Verify the components and versions by checking git tags
if not _verified_tags(components):
error_exit('Some components or versions are not valid.')
working_dir = tempfile.mkdtemp()
print('Using tmp directory {}'.format(working_dir))
modules = {n: p for n, p in get_all_module_paths()}
cur_git_commitish = check_output(['git', 'rev-parse', 'HEAD'], cwd=REPO_ROOT_DIR).strip()
for c_name, c_version in components:
c_path = modules[c_name]
git_tag = _gen_tag(c_name, c_version)
check_call(['git', 'checkout', git_tag], cwd=REPO_ROOT_DIR)
patcher = VersionPatcher(use_version_patch, c_name, c_path)
patcher.patch()
sub_dir = 'command_modules' if '/command_modules/' in c_path else ''
shutil.copytree(c_path, os.path.join(working_dir, 'src', sub_dir, c_name))
patcher.unpatch()
check_call(['git', 'checkout', cur_git_commitish], cwd=REPO_ROOT_DIR)
# Add completion file
completion_dest = os.path.join(working_dir, 'az.completion')
shutil.copy(COMPLETION_FILE, completion_dest)
# Apply patches
for patch in PATCHES:
patch.apply(working_dir)
# Build archive
archive_filename = ARCHIVE_FILE_TMPL.format(version)
archive_dest = os.path.expanduser(archive_dest) if archive_dest else os.getcwd()
archive_path = os.path.join(archive_dest, archive_filename+'.tar.gz')
with tarfile.open(archive_path, 'w:gz') as tar:
tar.add(working_dir, arcname=archive_filename)
print("Archive saved to {}".format(archive_path))
print("Done.")
def _type_components_list(value):
c_name, c_version = value.split('=', 1)
if not c_name.startswith('azure-cli'):
c_name = 'azure-cli-' + c_name
return (c_name, c_version)
def _type_json_file(value):
with open(os.path.expanduser(value)) as open_file:
data = json.load(open_file)
return [(k, data[k]) for k in data]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Automated generation of the packaged release archive.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--components', '-c', nargs='+',
help="Space separated list in 'component=version' format. "
"(e.g. azure-cli=2.0.0 vm=2.0.0)",
type=_type_components_list)
group.add_argument('--file-data', '-f',
help='Path to JSON file with commands in key/value format. '
'(e.g. {"azure-cli":"2.0.0", ...})',
type=_type_json_file)
parser.add_argument('--version', '-v', required=True,
help="The version to name the packaged release.")
parser.add_argument('--dest', '-d',
help="The destination directory to place the archive. "
"Defaults to current directory.")
args = parser.parse_args()
components_list = args.components or args.file_data
create_packaged_archive(args.version, components_list, args.dest)
``` |
{
"source": "aagaard/dbservice",
"score": 2
} |
#### File: apps/homes/admin.py
```python
from django.contrib import admin
from dbservice.apps.utils import MEASUREMENT_UNIT_CHOICES
from . import models
admin.site.register(models.FixedValueMeterPort)
@admin.register(models.VirtualEnergyPort)
class VirtualPortEnergyAdmin(admin.ModelAdmin):
fields = ('name', 'consumption', 'current', 'voltage', 'power_factor')
view_on_site = True
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "consumption":
kwargs["queryset"] = models.MeterPort.objects.filter(
unit=MEASUREMENT_UNIT_CHOICES[0][0]
)
if db_field.name == "current":
kwargs["queryset"] = models.MeterPort.objects.filter(
unit=MEASUREMENT_UNIT_CHOICES[3][0]
)
if db_field.name == "voltage":
kwargs["queryset"] = models.MeterPort.objects.filter(
unit=MEASUREMENT_UNIT_CHOICES[2][0]
)
if db_field.name == "power_factor":
kwargs["queryset"] = models.MeterPort.objects.filter(
unit=MEASUREMENT_UNIT_CHOICES[6][0]
)
return super(VirtualPortEnergyAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs)
```
#### File: apps/homes/views.py
```python
import datetime
from dateutil.relativedelta import relativedelta
from rest_framework import status, viewsets
from rest_framework.decorators import link, list_route
from rest_framework.exceptions import ParseError
from rest_framework.fields import ValidationError
from rest_framework.response import Response
from dbservice.apps.homes.models import Measurement
from dbservice.apps.users.models import User
from dbservice.apps.utils.viewsets import (BulkCreateModelMixin,
JSONSchemaViewSet)
from . import filters, models, serializers
from .aggregated import (aggregated, get_temperature_home,
response_virtual_energy_port_measurements)
from .condensed import condensed
from .status import get_status
from .utils import response_fixed_value_measurements, response_measurements
class ApplianceViewSet(viewsets.ModelViewSet):
"""
`/schemas/homes/appliances/list/`
`/schemas/homes/appliances/detail/` and
`/schemas/homes/appliances/update/`
Appliance may be filtered on `residential_home`.
"""
model = models.Appliance
serializer_class = serializers.ApplianceSerializer
filter_class = filters.ApplianceFilter
filter_fields = ('residential_home')
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return qs.filter(residential_home__dno_customer_id=user)
# def create(self, request):
# ec_period = request.POST.get('energy_consumption_period', None)
# ep_period = request.POST.get('energy_production_period', None)
# import ipdb; ipdb.set_trace()
# oneflow_defined = bool(ec_period) != bool(ep_period)
# if not oneflow_defined:
# raise ParseError(
# "A single consumption/production period must be defined")
# serializer = ApplianceSerializer(data=request.DATA)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors,
# status=status.HTTP_400_BAD_REQUEST)
class ApplianceSchema(JSONSchemaViewSet):
schema_for = serializers.ApplianceSerializer
app_name = 'homes-v1'
class EnergyConsumptionPeriodViewSet(viewsets.ModelViewSet):
"""
Data format described in
`/schemas/homes/energy_consumption_period/list/`
`/schemas/homes/energy_consumption_period/detail/` and
`/schemas/homes/energy_consumption_period/update/`
Energy consumption period may be filtered on `appliance`.
"""
model = models.EnergyConsumptionPeriod
serializer_class = serializers.EnergyConsumptionPeriodSerializer
filter_class = filters.EnergyConsumptionPeriodFilter
filter_fields = ('appliance')
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return qs.filter(appliance__residential_home__dno_customer_id=user)
class EnergyConsumptionPeriodSchema(JSONSchemaViewSet):
schema_for = serializers.EnergyConsumptionPeriodSerializer
app_name = 'homes-v1'
class EnergyProductionPeriodViewSet(viewsets.ModelViewSet):
"""
Data format described in
`/schemas/homes/energy_production_period/list/`
`/schemas/homes/energy_production_period/detail/` and
`/schemas/homes/energy_production_period/update/`
Energy production period may be filtered on `appliance`.
"""
model = models.EnergyProductionPeriod
serializer_class = serializers.EnergyProductionPeriodSerializer
filter_class = filters.EnergyProductionPeriodFilter
filter_fields = ('appliance')
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return qs.filter(appliance__residential_home__dno_customer_id=user)
class EnergyProductionPeriodSchema(JSONSchemaViewSet):
schema_for = serializers.EnergyProductionPeriodSerializer
app_name = 'homes-v1'
class MainMeterViewSet(viewsets.ModelViewSet):
"""
Data format described in
`/schemas/homes/main_meters/list`
`/schemas/homes/main_meters/detail/` and
`/schemas/homes/main_meters/update`
Main meters may be filtered on `residential_home`.
"""
model = models.MainMeter
serializer_class = serializers.MainMeterSerializer
filter_class = filters.MainMeterFilter
filter_fields = ('residential_home')
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return qs.filter(residential_home__dno_customer_id=user)
class MainMeterSchema(JSONSchemaViewSet):
schema_for = serializers.MainMeterSerializer
app_name = 'homes-v1'
class SubMeterViewSet(viewsets.ModelViewSet, BulkCreateModelMixin):
"""
Data format described in
`/schemas/homes/sub_meters/list/`
`/schemas/homes/sub_meters/detail/` and
`/schemas/homes/sub_meters/update/`.
Submeters may be filtered on `residential_home`.
Bulk creation possible at `/homes/sub_meters/bulk/`
(post JSON array of objects to create).
"""
model = models.SubMeter
serializer_class = serializers.SubMeterSerializer
filter_class = filters.SubMeterFilter
filter_fields = ('residential_home')
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return qs.filter(residential_home__dno_customer_id=user)
class SubMeterSchema(JSONSchemaViewSet):
schema_for = serializers.SubMeterSerializer
app_name = 'homes-v1'
class MeterPortViewSet(viewsets.ModelViewSet, BulkCreateModelMixin):
"""
Data format described in
`/schemas/homes/meter_ports/list/`
`/schemas/homes/meter_ports/detail/` and
`/schemas/homes/meter_ports/update/`
Meter ports may be filtered on `mainmeter`, `submeter`,
`resource_type`, `unit`.
Configuration of a meter port can be obtained at
`/homes/meter_ports/{id}/`,
Bulk creation possible at `/homes/meter_ports/bulk/`
(post JSON array of objects to create).
"""
model = models.MeterPort
serializer_class = serializers.MeterPortSerializer
filter_class = filters.MeterPortFilter
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
null_defs = ['None', 'none', 'Null', 'null', 'NULL']
meters = ['submeter', 'mainmeter']
myfilter = {"{}__isnull".format(key): True
for key, val in self.request.GET.items()
if key in meters and val in null_defs}
if len(myfilter) > 0:
return models.MeterPort.objects.filter(**myfilter)
if user.is_superuser:
return qs
else:
return (
qs.filter(mainmeter__residential_home__dno_customer_id=user) |
qs.filter(submeter__residential_home__dno_customer_id=user)
)
def create(self, request):
"""
Ensure that only one flow is associated to one submeter or main meter
is the only meter
"""
submeter_def = bool(request.POST.get('submeter', None))
mainmeter_def = bool(request.POST.get('mainmeter', None))
ec_def = bool(request.POST.get('energy_consumption_period', None))
ep_def = bool(request.POST.get('energy_production_period', None))
# We cannot associate a meterport to a submeter and a main meter
if submeter_def and mainmeter_def:
raise ParseError(
"Meter port cannot be associated to a submeter and main meter")
# If main meter is defined then the submeter cannot not be defined
if mainmeter_def:
if ec_def or ep_def:
raise ParseError(
'Meter port cannot be associated ' +
'to an energy consumption and production period ' +
'for a main meter'
)
# If submeter defined then ensure only one or none period is defined
if submeter_def:
if ec_def and ep_def:
raise ParseError(
"Only a single consumption/production period " +
"can be defined for submeters"
)
# Serialize the data and check if it is valid
serializer = serializers.MeterPortSerializer(
data=request.DATA, context={'request': request}
)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@link()
def get_measurements(self, request, pk=None):
return response_measurements(request, meter_port_pk=pk)
class MeterPortSchema(JSONSchemaViewSet):
schema_for = serializers.MeterPortSerializer
app_name = 'homes-v1'
class FixedValueMeterPortViewSet(viewsets.ModelViewSet, BulkCreateModelMixin):
"""
Data format described in
`/schemas/homes/fixed_value_meter_ports/list/`
`/schemas/homes/fixed_value_meter_ports/detail/` and
`/schemas/homes/fixed_value_meter_ports/update/`.
Configuration of a meter port can be obtained at
`/homes/fixed_value_meter_ports/{id}/`.
Virtual measurements are obtained at
`/homes/fixed_value_meter_ports/{id}/get_measurements/?from_timestamp={tf}&to_timestamp={tt}`.
Bulk creation possible at `/homes/fixed_value_meter_ports/bulk/`
(post JSON array of objects to create).
"""
user = User
model = models.FixedValueMeterPort
serializer_class = serializers.FixedValueMeterPortSerializer
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return qs.filter(user=user)
@link()
def get_measurements(self, request, pk=None):
return response_fixed_value_measurements(request, pk)
class FixedValueMeterPortSchema(JSONSchemaViewSet):
schema_for = serializers.MeterPortSerializer
app_name = 'homes-v1'
class VirtualEnergyPortViewSet(viewsets.ModelViewSet):
"""
Data format described in
`/schemas/homes/virtual_energy_ports/list/`
`/schemas/homes/virtual_energy_ports/detail/` and
`/schemas/homes/virtual_energy_ports/update/`
A virtual energy port consists of four meterports that implicit represents
a energy consumption port; a main meter consumption meter port, a current
meterport, a voltage meterport and a power_factor meter port.
An aligned energy measurement set consists of a tuple of an accumulated
consumption start value, an accumulated stop value, an average current
value, an average voltage, and an average power factor within the time
period. The from_timestamp and to_timestamp indicate the time interval of
energy measurement. It can be obtained from
`/homes/virtual_energy_ports/{meter_port_id}/get_measurements/?from_timestamp={tf}&to_timestamp={tt}`
"""
model = models.VirtualEnergyPort
serializer_class = serializers.VirtualEnergyPortSerializer
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return (
qs.filter(mainmeter__residential_home__dno_customer_id=user) |
qs.filter(submeter__residential_home__dno_customer_id=user)
)
@link()
def get_measurements(self, request, pk=None):
return response_virtual_energy_port_measurements(request, pk)
class VirtualEnergyPortSchema(JSONSchemaViewSet):
schema_for = serializers.VirtualEnergyPortSerializer
app_name = 'homes-v1'
class MeasurementViewSet(viewsets.ModelViewSet, BulkCreateModelMixin):
"""
Data format described in `/schemas/homes/measurements/list/`,
`/schemas/homes/measurements/detail/` and
`/schemas/homes/measurements/update/`.
Measurements may be filtered on `meter_port`,
`min_value`,`max_value`,`min_timestamp` and `max_timestamp`.
Condensed stored data for each input can be obtained at
`/homes/measurements/{meter_port_id}/hourly_condensed/`,
`/homes/measurements/{meter_port_id}/daily_condensed/`,
`/homes/measurements/{meter_port_id}/monthly_condensed/` and
`/homes/measurements/{meter_port_id}/yearly_condensed/`. For condensed
data, query parameters `from_timestamp` and `to_timestamp` must be
provided. The data format for condensed data is described in
`/static/condensed-list.json`.
Latest measurements can be viewed with `/homes/measurements/latest/`.
Bulk creation possible at `/homes/measurements/bulk/`
(post JSON array of objects to create).
"""
throttle_scope = 'measurements'
model = models.Measurement
serializer_class = serializers.MeasurementSerializer
filter_class = filters.MeasurementFilter
filter_fields = ('meter_port', 'timestamp', 'value')
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return (
qs.filter(meter_port__mainmeter__residential_home__dno_customer_id=user) | # noqa
qs.filter(meter_port__submeter__residential_home__dno_customer_id=user) # noqa
)
@list_route()
def latest(self, request):
all_measurements = Measurement.objects.all().order_by('-timestamp')
page = self.paginate_queryset(all_measurements)
serializer = self.get_pagination_serializer(page)
return Response(serializer.data)
@link()
def hourly_condensed(self, request, pk=None):
return condensed(request, pk, datetime.timedelta(hours=1))
@link()
def daily_condensed(self, request, pk=None):
return condensed(request, pk, datetime.timedelta(days=1))
@link()
def monthly_condensed(self, request, pk=None):
return condensed(request, pk, relativedelta(months=1))
@link()
def yearly_condensed(self, request, pk=None):
return condensed(request, pk, relativedelta(years=1))
class MeasurementSchema(JSONSchemaViewSet):
schema_for = serializers.MeasurementSerializer
app_name = 'homes-v1'
class ResidentialHomeViewSet(viewsets.ModelViewSet):
"""
Data format described in `/schemas/homes/residential_homes/list`
`/schemas/homes/residential_homes/detail/` and
`/schemas/homes/residential_homes/update`.
Electrical energy consumption and production for the residential home can
be obtained at
`/homes/residential_homes/{home_id}/get_energy_{consumption,production}/
?from_timestamp={tf}&to_timestamp={tt}[&tau={tau}]`, where `{home_id}` is
the id of the residential home, `{tf}` and `{tt}` represent the
timeslot. `{tau}` is optional and can have following values 1min, 5min,
10min, 15min, 20min, 30min, hourly, daily, weekly, monthly, yearly.
Consumption data is based on main meter data and production data is based
on submeters.
Get status of the residential can be obtained at
`/homes/residential_homes/{home_id}/get_status/
?from_timestamp={tf}&to_timestamp={tt}&tolerance={tol}`, where `tf` is the
start timestamp for checking to `tt` and `tol` is the tolerance `30min`,
`1h`, `12h`, `1d`, `1w` and `1m`for which time interval between
measurements that it must be within.
Temperature measurements can be obtained from
`/homes/residential_homes/{home_id}/get_temperature/[?from_timestamp={tf}&to_timestamp={tt}]`.
Residential homes can be filtered on `dno_customer_id`.
"""
model = models.ResidentialHome
serializer_class = serializers.ResidentialHomeSerializer
filter_class = filters.ResidentialHomeFilter
filter_fields = ('dno_customer_id')
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return qs.filter(dno_customer_id=user)
@link()
def get_energy_consumption(self, request, pk=None):
return aggregated(request, pk, 'consumption')
@link()
def get_energy_production(self, request, pk=None):
return aggregated(request, pk, 'production')
@link()
def get_temperature(self, request, pk=None):
return get_temperature_home(request, pk)
@link()
def get_status(self, request, pk=None):
return get_status(request, pk)
class ResidentialHomeSchema(JSONSchemaViewSet):
schema_for = serializers.ResidentialHomeSerializer
app_name = 'homes-v1'
```
#### File: apps/users/models.py
```python
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from dbservice.apps.private.models import UserDetails
class UserManager(BaseUserManager):
def create_user(self, email, password=None):
now = timezone.now()
email = UserManager.normalize_email(email)
user = self.model(email=email,
is_superuser=False,
last_login=now)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""
The User class for all users of the system.
is_staff == True => Sys. admin.
is_superuser == True => DNO user
not is_staff and not is_superuser => Residential home user
"""
is_staff = models.BooleanField(_('staff status'), default=False)
is_active = models.BooleanField(_('is active'), default=True)
email = models.EmailField(
verbose_name='Email address',
unique=True,
max_length=255,
)
USERNAME_FIELD = 'email'
objects = UserManager()
def get_short_name(self):
return self.email
def get_full_name(self):
return self.email
class Meta:
ordering = ['email']
@receiver(post_save, sender=User)
def autocreate_userdetails(sender, instance, created, raw=False, **kwargs):
if raw:
return
if created:
UserDetails.objects.create(user=instance)
``` |
{
"source": "aagallag/binjatron",
"score": 2
} |
#### File: aagallag/binjatron/__init__.py
```python
from binaryninja import *
import voltron
from threading import Thread
from voltron.core import Client
from voltron.plugin import api_request
from scruffy import ConfigFile, PackageFile
import sys
log = voltron.setup_logging()
client = Client()
last_bp_addrs = []
last_pc_addr = 0
last_pc_addr_colour = 0
syncing = False
vers = None
slide = 0
notification = None
sync_callbacks = []
mute_errors_after = 3
config = ConfigFile('~/.binjatron.conf', defaults=PackageFile('defaults.yaml'), apply_env=True, env_prefix='BTRON')
config.load()
bp_colour = enums.HighlightStandardColor(config.bp_colour)
pc_colour = enums.HighlightStandardColor(config.pc_colour)
no_colour = enums.HighlightStandardColor(0)
def _get_function(view, address):
func = view.get_function_at(address)
if func is None:
return view.get_function_at(view.get_previous_function_start_before(address))
return func
def sync(view):
global syncing, vers, notification
def build_requests():
return [
api_request('registers', registers=['pc'], block=True),
api_request('breakpoints', block=True),
]
def callback(results=[], error=None):
global last_bp_addrs, last_pc_addr, last_pc_addr_colour, sync_callbacks, mute_errors_after, syncing
if error:
if mute_errors_after > 0:
log_error("Error synchronising: {}".format(error))
elif mute_errors_after == 0:
# Prevent errors from filling up the entire log if the debugger closes and we lose sync
log_alert("Voltron encountered three sync errors in a row. Muting errors until the next succesful sync.")
syncing = False
mute_errors_after -= 1
else:
if(mute_errors_after < 0):
log_info("Sync restored after {} attempts".format(mute_errors_after * -1))
syncing = True
mute_errors_after = 3
if client and len(results):
if results[1].breakpoints:
addrs = [l['address'] - slide for s in [bp['locations'] for bp in results[1].breakpoints] for l in s]
# add colours to all the breakpoints currently set in the debugger
for addr in addrs:
func = _get_function(view, addr)
if func:
func.set_auto_instr_highlight(addr, bp_colour)
# remove colours from any addresses that had breakpoints the last time we updated, but don't now
for addr in set(last_bp_addrs) - set(addrs):
func = _get_function(view, addr)
if func:
func.set_auto_instr_highlight(addr, no_colour)
# save this set of breakpoint addresses for next time
last_bp_addrs = addrs
elif last_bp_addrs:
if (results[1].status == 'success') or (hasattr(results[1], 'message') and 'busy' not in results[1].message.lower()):
# We end up here if the debugger has been closed and re-opened
replace_breakpoints = show_message_box(
'New Session',
'The Voltron instance currently syncing reports no breakpoints set, but breakpoints have been set in Binary Ninja. Restore these breakpoints?',
buttons=enums.MessageBoxButtonSet.YesNoButtonSet)
if replace_breakpoints:
for addr in set(last_bp_addrs):
set_breakpoint(view, addr)
else:
for addr in set(last_bp_addrs):
func = _get_function(view, addr)
if func:
func.set_auto_instr_highlight(addr, no_colour)
last_bp_addrs = []
if results[0].registers:
# get the current PC from the debugger
addr = results[0].registers.values()[0] - slide
# find the function where that address is
func = _get_function(view, addr)
if last_pc_addr:
# update the highlight colour of the previous PC to its saved value
_get_function(view, last_pc_addr).set_auto_instr_highlight(last_pc_addr, last_pc_addr_colour)
# save the PC and current colour for that instruction
last_pc_addr_colour = func.get_instr_highlight(addr)
last_pc_addr = addr
# update the highlight colour to show the current PC
func.set_auto_instr_highlight(addr, pc_colour)
# Run sync callbacks and remove them from the list if specified
for cb, _ in sync_callbacks:
cb(results)
sync_callbacks = filter(lambda cbt: not cbt[1], sync_callbacks)
elif not results[1].breakpoints or (results[0].message == 'No such target'): # Clear the program counter highlight if the program isn't running
if last_pc_addr:
# update the highlight colour of the previous PC to its saved value
_get_function(view, last_pc_addr).set_auto_instr_highlight(last_pc_addr, last_pc_addr_colour)
if not syncing:
try:
log_info("Starting synchronisation with Voltron")
# register for notifications
notification = BinjatronNotification(view)
view.register_notification(notification)
# Start the client
vers = client.perform_request("version")
client.start(build_requests=build_requests, callback=callback)
syncing = True
except:
log_info("Couldn't connect to Voltron")
else:
log_info("Already synchronising with Voltron")
def stop(view):
global syncing, client, slide, notification
if syncing:
log_info("Stopping synchronisation with Voltron")
# clear any colours we've set
if last_pc_addr:
func = _get_function(view, last_pc_addr)
func.set_auto_instr_highlight(last_pc_addr, last_pc_addr_colour)
for addr in last_bp_addrs:
func = _get_function(view, addr)
func.set_auto_instr_highlight(addr, no_colour)
# stop the voltron client
client.stop()
client = Client()
# unregister notifications
view.unregister_notification(notification)
notification = None
syncing = False
slide = 0
else:
log_alert("Not synchronising with Voltron")
def set_breakpoint(view, address):
global vers
try:
if not vers:
vers = client.perform_request("version")
# build a breakpoint set command for the debugger
if 'lldb' in vers.host_version:
cmd = "breakpoint set -a 0x{:x}".format(address + slide)
elif 'gdb' in vers.host_version:
cmd = "break *0x{:x}".format(address + slide)
else:
raise Exception("Debugger host version {} not supported".format(vers.host_version))
# send it
res = client.perform_request("command", command=cmd, block=False)
if res.is_error:
raise Exception("Failed to set breakpoint: {}".format(res))
# update the voltron views
res = client.perform_request("command", command="voltron update", block=False)
# add colour in binja
func = _get_function(view, address)
if func:
func.set_auto_instr_highlight(address, bp_colour)
except:
log_alert("Failed to set breakpoint")
def delete_breakpoint(view, address):
global vers, last_bp_addrs
try:
if not vers:
vers = client.perform_request("version")
# get a list of breakpoints from the debugger and find the one we're after
res = client.perform_request("breakpoints")
bp_id = None
if res.is_success:
for bp in res.breakpoints:
for location in bp['locations']:
if address == location['address'] - slide:
bp_id = bp['id']
break
# build a breakpoint delete command for the debugger
if 'lldb' in vers.host_version:
cmd = "breakpoint delete {}".format(bp_id)
elif 'gdb' in vers.host_version:
cmd = "delete {}".format(bp_id)
else:
raise Exception("Debugger host version {} not supported".format(vers.host_version))
# send it
res = client.perform_request("command", command=cmd, block=False)
if res.is_error:
raise Exception("Failed to delete breakpoint: {}".format(res))
# update the voltron views
res = client.perform_request("command", command="voltron update", block=False)
# remove the breakpoint colour in binja
func = _get_function(view, address)
if func:
func.set_auto_instr_highlight(address, no_colour)
last_bp_addrs = filter(lambda k : k != address, last_bp_addrs)
except:
log_alert("Failed to delete breakpoint")
def set_slide(view, address):
global slide
if 'async' in vers.capabilities:
# if we're using a debugger that supports async, grab the current PC
res = client.perform_request("registers", registers=["pc"], block=False)
pc = res.registers.values()[0]
else:
# otherwise we just have to use the last PC we saved
if last_pc_addr == 0:
log_alert("Your debugger does not support async API access, and Binary Ninja hasn't received any data from it yet. Please run the `voltron update` command in the debugger, or step the debugger, or let it run until it hits a breakpoint so Binjatron can get the register state.")
else:
pc = last_pc_addr
slide = pc - address
# if we have an async debugger, we can update now. otherwise we'll have to wait for the user to step again
if 'async' in vers.capabilities:
client.update()
def clear_slide(view):
global slide
slide = 0
def custom_request(request, args, alert=True):
""" Allows external code to pass arbitrary commands to the voltron client
request: type of request - usually 'command'
args: dict containing keyword arguments for the request
alert: boolean indicating whether errors should result in a popup or simply
log to the console. Defaults to True."""
global vers
client_result = None
try:
if not vers:
vers = client.perform_request("version")
if 'lldb' in vers.host_version or 'gdb' in vers.host_version:
cmd = request
else:
raise Exception("Debugger host version {} not supported".format(vers.host_version))
client_result = client.perform_request(request, **args)
if client_result.is_error:
raise Exception("\"" + cmd + "\": {}".format(client_result))
# update the voltron views
client.perform_request("command", command="voltron update", block=False)
except:
log_info(sys.exc_info()[1])
if alert:
log_alert(request + " failed: " + str(args))
else:
log_info(request + " failed: " + str(args))
# Even if we encountered an exception, we return the results so external code can
# handle the error if necessary.
return client_result
def register_sync_callback(cb, should_delete=False):
""" Allows external code to register a callback to be run upon a succesful sync
cb: function pointer to the callback. Gets `results` as an argument
should_delete: boolean indicating whether the callback should be removed from
the list after a single call. Defaults to False. """
global sync_callbacks
sync_callbacks.append((cb, should_delete))
def sync_state():
""" Return the sync state so that external code can determine whether voltron is currently syncing with binjatron """
return syncing
class BinjatronNotification(BinaryDataNotification):
def __init__(self, view):
self.view = view
def data_written(self, view, offset, length):
log_info("data_written({:x}, {})".format(offset, length))
# get the data that was written
data = view.read(offset, length)
# write it to memory in the debugger
res = client.perform_request("write_memory", address=offset + slide, value=data, block=False)
if not res.is_success:
log_error("Failed to write memory in debugger: {}".format(res))
# update the voltron views
res = client.perform_request("command", command="voltron update", block=False)
def data_inserted(self, view, offset, length):
log_info("data_inserted()")
def data_removed(self, view, offset, length):
log_info("data_removed()")
PluginCommand.register("Voltron: Sync", "", sync)
PluginCommand.register("Voltron: Stop syncing", "", stop)
PluginCommand.register_for_address("Voltron: Breakpoint set", "", set_breakpoint)
PluginCommand.register_for_address("Voltron: Breakpoint clear", "", delete_breakpoint)
PluginCommand.register_for_address("Voltron: Slide set", "", set_slide)
PluginCommand.register("Voltron: Slide clear", "", clear_slide)
``` |
{
"source": "aagallag/mopidy-subidy",
"score": 2
} |
#### File: mopidy-subidy/mopidy_subidy/__init__.py
```python
from __future__ import unicode_literals
import os
from mopidy import ext, config
__version__ = '0.2.1'
class SubidyExtension(ext.Extension):
dist_name = 'Mopidy-Subidy'
ext_name = 'subidy'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(SubidyExtension, self).get_config_schema()
schema['url'] = config.String()
schema['username'] = config.String()
schema['password'] = config.Secret()
schema['legacy_auth'] = config.Boolean(optional=True)
schema['api_version'] = config.String(optional=True)
return schema
def setup(self, registry):
from .backend import SubidyBackend
registry.add('backend', SubidyBackend)
```
#### File: mopidy-subidy/mopidy_subidy/library.py
```python
from mopidy import backend, models
from mopidy.models import Ref, SearchResult
from mopidy_subidy import uri
import logging
logger = logging.getLogger(__name__)
class SubidyLibraryProvider(backend.LibraryProvider):
def __create_vdirs():
vdir_templates = [
dict(id="root", name="Subsonic"),
dict(id="artists", name="Artists"),
dict(id="albums", name="Albums"),
dict(id="rootdirs", name="Directories"),
]
# Create a dict with the keys being the `id`s in `vdir_templates`
# and the values being objects containing the vdir `id`,
# the human readable name as `name`, and the URI as `uri`.
vdirs = {}
for template in vdir_templates:
vdir = template.copy()
vdir.update(uri=uri.get_vdir_uri(vdir["id"]))
vdirs[template['id']] = vdir
return vdirs
_vdirs = __create_vdirs()
def __raw_vdir_to_ref(vdir):
if vdir is None:
return None
return Ref.directory(
name=vdir['name'],
uri=vdir['uri'])
root_directory = __raw_vdir_to_ref(_vdirs['root'])
_raw_vdir_to_ref = staticmethod(__raw_vdir_to_ref)
def __init__(self, *args, **kwargs):
super(SubidyLibraryProvider, self).__init__(*args, **kwargs)
self.subsonic_api = self.backend.subsonic_api
def browse_songs(self, album_id):
return self.subsonic_api.get_songs_as_refs(album_id)
def browse_albums(self, artist_id=None):
return self.subsonic_api.get_albums_as_refs(artist_id)
def browse_artists(self):
return self.subsonic_api.get_artists_as_refs()
def browse_rootdirs(self):
return self.subsonic_api.get_rootdirs_as_refs()
def browse_diritems(self, directory_id):
return self.subsonic_api.get_diritems_as_refs(directory_id)
def lookup_song(self, song_id):
song = self.subsonic_api.get_song_by_id(song_id)
if song is None:
return []
else:
return [song]
def lookup_album(self, album_id):
return self.subsonic_api.get_songs_as_tracks(album_id)
def lookup_artist(self, artist_id):
return list(self.subsonic_api.get_artist_as_songs_as_tracks_iter(artist_id))
def lookup_directory(self, directory_id):
return list(self.subsonic_api.get_recursive_dir_as_songs_as_tracks_iter(directory_id))
def lookup_playlist(self, playlist_id):
return self.subsonic_api.get_playlist_as_playlist(playlist_id).tracks
def browse(self, browse_uri):
if browse_uri == uri.get_vdir_uri('root'):
root_vdir_names = ["rootdirs", "artists", "albums"]
root_vdirs = [self._vdirs[vdir_name] for vdir_name in root_vdir_names]
sorted_root_vdirs = sorted(root_vdirs, key=lambda vdir: vdir["name"])
return [self._raw_vdir_to_ref(vdir) for vdir in sorted_root_vdirs]
elif browse_uri == uri.get_vdir_uri("rootdirs"):
return self.browse_rootdirs()
elif browse_uri == uri.get_vdir_uri("artists"):
return self.browse_artists()
elif browse_uri == uri.get_vdir_uri("albums"):
return self.browse_albums()
else:
uri_type = uri.get_type(browse_uri)
if uri_type == uri.DIRECTORY:
return self.browse_diritems(uri.get_directory_id(browse_uri))
elif uri_type == uri.ARTIST:
return self.browse_albums(uri.get_artist_id(browse_uri))
elif uri_type == uri.ALBUM:
return self.browse_songs(uri.get_album_id(browse_uri))
else:
return []
def lookup_one(self, lookup_uri):
type = uri.get_type(lookup_uri)
if type == uri.ARTIST:
return self.lookup_artist(uri.get_artist_id(lookup_uri))
if type == uri.ALBUM:
return self.lookup_album(uri.get_album_id(lookup_uri))
if type == uri.DIRECTORY:
return self.lookup_directory(uri.get_directory_id(lookup_uri))
if type == uri.SONG:
return self.lookup_song(uri.get_song_id(lookup_uri))
if type == uri.PLAYLIST:
return self.lookup_playlist(uri.get_playlist_id(lookup_uri))
def lookup(self, uri=None, uris=None):
if uris is not None:
return dict((uri, self.lookup_one(uri)) for uri in uris)
if uri is not None:
return self.lookup_one(uri)
return None
def refresh(self, uri):
pass
def search_uri(self, query):
type = uri.get_type(lookup_uri)
if type == uri.ARTIST:
artist = self.lookup_artist(uri.get_artist_id(lookup_uri))
if artist is not None:
return SearchResult(artists=[artist])
elif type == uri.ALBUM:
album = self.lookup_album(uri.get_album_id(lookup_uri))
if album is not None:
return SearchResult(albums=[album])
elif type == uri.SONG:
song = self.lookup_song(uri.get_song_id(lookup_uri))
if song is not None:
return SearchResult(tracks=[song])
return None
def search_by_artist_album_and_track(self, artist_name, album_name, track_name):
tracks = self.search_by_artist_and_album(artist_name, album_name)
track = next(item for item in tracks.tracks if track_name in item.name)
return SearchResult(tracks=[track])
def search_by_artist_and_album(self, artist_name, album_name):
artists = self.subsonic_api.get_raw_artists()
artist = next(item for item in artists if artist_name in item.get('name'))
albums = self.subsonic_api.get_raw_albums(artist.get('id'))
album = next(item for item in albums if album_name in item.get('title'))
return SearchResult(tracks=self.subsonic_api.get_songs_as_tracks(album.get('id')))
def get_distinct(self, field, query):
search_result = self.search(query)
if not search_result:
return []
if field == 'track' or field == 'title':
return [track.name for track in (search_result.tracks or [])]
if field == 'album':
return [album.name for album in (search_result.albums or [])]
if field == 'artist':
if not search_result.artists:
return [artist.name for artist in self.browse_artists()]
return [artist.name for artist in search_result.artists]
def search(self, query=None, uris=None, exact=False):
if 'artist' in query and 'album' in query and 'track_name' in query:
return self.search_by_artist_album_and_track(query.get('artist')[0], query.get('album')[0], query.get('track_name')[0])
if 'artist' in query and 'album' in query:
return self.search_by_artist_and_album(query.get('artist')[0], query.get('album')[0])
if 'artist' in query:
return self.subsonic_api.find_as_search_result(query.get('artist')[0])
if 'any' in query:
return self.subsonic_api.find_as_search_result(query.get('any')[0])
return SearchResult(artists=self.subsonic_api.get_artists_as_artists())
``` |
{
"source": "AAGaming00/FetchCord",
"score": 2
} |
#### File: FetchCord/fetch_cord/run_rpc.py
```python
from pypresence import Presence
import time
import sys
import os
import psutil
# import info about system
from fetch_cord.args import parse_args
from fetch_cord.testing import gpuid, appid, cpuappid
if os.name != "nt":
from fetch_cord.testing import desktopid, termappid, hostappid
from fetch_cord.out import packagesline, termid, shellid, kernelline, shell_line, termfontline, \
dewmid, termline, lapordesk, hostline, resline
from fetch_cord.out import gpuinfo, sysosline, sysosid, cpuinfo
if os.name == "nt":
from fetch_cord.out import moboline, memline
uptime = psutil.boot_time()
args = parse_args()
def main():
if os.name != "nt":
if hostline == "" and args.nodistro and args.noshell and args.nohardware:
print("ERROR: no hostline is available!")
sys.exit(1)
# printing info with debug switch
if args.debug:
print("run-rpc")
print(uptime)
print(appid)
print(gpuid)
if os.name != "nt":
print(packagesline[0])
if sysosid.lower() == "macos":
runmac()
elif os.name == "nt":
wandowz()
else:
loonix()
print("Connecting")
time.sleep(5)
# discord uses unix time to interpret time for rich presnse, this is uptime in unix time
if sysosid.lower() != "macos" and os.name != "nt":
start_time = float(uptime)
print("RPC connection successful.")
def runmac():
global RPC
from fetch_cord.testing import devicetype, product, bigicon, ver
client_id = '740822755376758944' # macos appid for discord rpc
if args.debug:
print("runmac")
print("devicetype: %s" % devicetype)
print("product %s" % product)
print("bigicon: %s" % bigicon)
print("ver: %s" % ver)
print("uptime: %s" % uptime)
print("client_id: %s" % client_id)
time.sleep(5)
while True:
RPC = Presence(client_id)
RPC.connect()
RPC.update(state=packagesline[0], # uptadte state as packages
details=kernelline[0], # update details as kernel
large_image=bigicon, # set icon
large_text=sysosline[0], # set large icon text
small_image=devicetype, # set small image icon
small_text=product, # set small image text
start=start_time)
time.sleep(30)
def custom_time():
ctime = int(args.time)
time.sleep(ctime)
# cycle
def cycle0():
global RPC
if args.debug:
print("cycle 0")
client_id = appid
RPC = Presence(client_id)
RPC.connect()
RPC.update(state=packagesline[0],
details=kernelline[0],
large_image="big",
large_text=sysosline[0],
small_image=desktopid,
small_text=dewmid,
start=start_time)
if args.debug:
print("appid: %s" % client_id)
if args.time:
custom_time()
elif args.nohost and args.nohardware and args.noshell:
time.sleep(9999)
else:
time.sleep(30)
# cycle
def cycle1():
global RPC
if args.debug:
print("cycle 1")
client_id = cpuappid
RPC = Presence(client_id)
RPC.connect()
RPC.update(state=cpuinfo,
details=gpuinfo,
large_image="big",
large_text=cpuinfo,
small_image=gpuid,
small_text=gpuinfo,
start=start_time)
if args.debug:
print("appid: %s" % client_id)
if args.time:
custom_time()
elif args.nodistro and args.noshell and args.nohost:
time.sleep(9999)
else:
time.sleep(30)
# cycle
def cycle2():
global RPC
if args.debug:
print("cycle 2")
client_id = termappid
RPC = Presence(client_id)
RPC.connect()
RPC.update(state=shell_line[0],
details=termfontline,
large_image="big",
large_text=termline[0],
small_image=shellid,
small_text=shell_line[0],
start=start_time)
if args.debug:
print("appid: %s" % client_id)
if args.time:
custom_time()
elif args.nodistro and args.nohardware and args.nohost:
time.sleep(9999)
else:
time.sleep(30)
def cycle3():
# if not then forget it
if hostline:
global RPC
if args.debug:
print("cycle 3")
client_id = hostappid
RPC = Presence(client_id)
RPC.connect()
RPC.update(state=resline,
details=hostline[0],
large_image="big",
large_text=hostline[0],
small_image=lapordesk,
small_text=lapordesk,
start=start_time)
if args.debug:
print("appid: %s" % client_id)
if args.time:
custom_time()
elif args.nodistro and args.nohardware and args.noshell:
time.sleep(9999)
else:
time.sleep(30)
# back from whence you came
else:
loonix()
def w_cycle0():
global RPC
if args.debug:
print("cycle 0")
client_id = appid
RPC = Presence(client_id)
RPC.connect()
RPC.update(state=sysosline[0],
details=memline[0],
large_image="big",
large_text=sysosline[0],
small_image=moboline[0],
small_text=moboline[0],
start=start_time)
if args.debug:
print("appid: %s" % client_id)
if args.time:
custom_time()
elif args.nohardware:
time.sleep(9999)
else:
time.sleep(30)
def w_cycle1():
global RPC
if args.debug:
print("cycle 1")
client_id = cpuappid
RPC = Presence(client_id)
RPC.connect()
RPC.update(state=cpuinfo,
details=gpuinfo,
large_image="big",
large_text=cpuinfo,
small_image=gpuid,
small_text=gpuinfo,
start=start_time)
if args.debug:
print("appid: %s" % client_id)
if args.time:
custom_time()
elif args.nodistro:
time.sleep(9999)
else:
time.sleep(30)
def loonix():
try:
while True:
if not args.nodistro:
cycle0()
RPC.clear(pid=os.getpid())
if not args.nohardware:
cycle1()
RPC.clear(pid=os.getpid())
if not args.noshell:
cycle2()
RPC.clear(pid=os.getpid())
if not args.nohost:
cycle3()
RPC.clear(pid=os.getpid())
except KeyboardInterrupt:
print("Closing connection.")
sys.exit(0)
def wandowz():
try:
while True:
if not args.nodistro:
w_cycle0()
RPC.clear(pid=os.getpid())
if not args.nohardware:
w_cycle1()
RPC.clear(pid=os.getpid())
except KeyboardInterrupt:
print("Closing connection.")
sys.exit(0)
```
#### File: FetchCord/fetch_cord/testing.py
```python
import os
import sys
from fetch_cord.args import parse_args
from fetch_cord.bash import exec_bash
from fetch_cord.out import cpumodel, cpuvendor, gpuvendor, sysosid
if os.name != "nt":
from fetch_cord.out import wmid, deid, termid, shellid, sysosid, hostline, termline
# appid for discord app
appid = "none"
# number of packages
packages = "none"
# predefine ids
cpuid = "none"
cpuappid = "none"
gpuid = "none"
termappid = "none"
desktopid = "none"
hostappid = "none"
# distros set id and package number
def iUbuntu():
global appid
appid = '740434138036699178'
def iVoid():
global appid
appid = '740484961353597039'
def iOpenSuseLeap():
global appid
appid = '740156532137787433'
def iOpenSuseTumble():
global appid
appid = '742180413132505088'
def iCentos():
global appid
appid = '740483295388631071'
def iArch():
global appid
appid = '740476198437650473'
def iArtix():
global appid
appid = '741918141248045107'
def iFedora():
global appid
appid = '740485660703719464'
def iGentoo():
global appid
appid = '740484380652208140'
def iDebian():
global appid
appid = '740490017218232392'
def iManjaro():
global appid
appid = '740614258177605642'
def iLinuxMint():
global appid
appid = '740633577481568317'
def iLMDE():
global appid
appid = '741726946588622988'
def iPop():
global appid
appid = '740660055925587978'
def iEnde():
global appid
appid = '740809641545564170'
def iNixOS():
global appid
appid = '742887089179197462'
def iWindows10():
global appid
appid = '741949889465942099'
def iWindows8_1():
global appid
appid = '741952065294827520'
def iWindows8():
global appid
appid = '741952179488948324'
def iWindows7():
global appid
appid = '741952383512346696'
def Unknown_distro():
global appid
appid = '742887089179197462'
# MacOS versions
def iHsiera():
global bigicon
bigicon = "hsierria"
def iMojave():
global bigicon
bigicon = "mojave"
def iCatalina():
global bigicon
bigicon = "catalina"
# macOS hardwawre
def laporp():
global devicetype
if product[0:7] == "MacBook":
devicetype = "laptop"
else:
devicetype = "desktop"
def macos():
global product, devicetype, bigicon, ver
if sysosid.lower() == "macos":
devicetype = "none"
bigicon = "none"
ver = os.popen("sw_vers -productVersion").read()
product = os.popen("sysctl -n hw.model").read()
try:
versions[ver[0:5]]()
except IndexError:
bigicon = "bigslurp"
except KeyError:
print("Unsupported MacOS version")
laporp()
# def desktops and defind id
def iKde():
global desktopid
desktopid = "kde"
def iGnome():
global desktopid
desktopid = "gnome"
def iXfce():
global desktopid
desktopid = "xfce"
def iCinnamon():
global desktopid
desktopid = "cinnamon"
def iBudgie():
global desktopid
desktopid = "budgie"
def iDeepin():
global desktopid
desktopid = "deepin"
def iMate():
global desktopid
desktopid = "mate"
def iUnity():
# this is to check wether the user is actually using unity
# or using unity as an xdg value to fix issues with electron apps
if wmid.lower() == "compiz":
global desktopid
desktopid = "unity"
else:
desktopid = wmid
def iPantheon():
global desktopid
desktopid = "pantheon"
def iAero():
global desktopid
desktopid = "aero"
# window managers
def iDwm():
global desktopid
desktopid = "dwm"
def iAwesome():
global desktopid
desktopid = "awesome"
def Ii3():
global desktopid
desktopid = "i3"
def iEnlightenment():
global desktopid
desktopid = "enlightenment"
def iXmonad():
global desktopid
desktopid = "xmonad"
def iBspwm():
global desktopid
desktopid = "bspwm"
def iSway():
global desktopid
desktopid = "sway"
def Unknown_de_wm():
global desktopid
desktopid = 'unknown'
# cpuids
def Ryzen():
global cpuid, cpuappid
cpuid = "Ryzen"
cpuappid = '740752899054895105'
def Intelcorei3():
global cpuid, cpuappid
cpuid = "Intel(R) Core(TM) i3"
cpuappid = '741044208512532570'
def Intelcorei5():
global cpuid, cpuappid
cpuid = "Intel(R) Core(TM) i5"
cpuappid = '741099939198926920'
def Intelcorei7():
global cpuid, cpuappid
cpuid = "Intel(R) Core(TM) i7"
cpuappid = '741100300219187335'
def Intelcorei9():
global cpuid, cpuappid
cpuid = "Intel(R) Core(TM) i9"
cpuappid = '741100622040006719'
def Intelpentium():
global cpuid, cpuappid
cpuid = "Intel(R) Pentium(R)"
cpuappid = '741203845706940467'
def Intelceleron():
global cpuid, cpuappid
cpuid = "Intel(R) Celeron(R)"
cpuappid = '742904581360713849'
def Ryzen3():
global cpuid, cpuappid
cpuid = "AMD Ryzen 3"
cpuappid = '741153175779803146'
def Ryzen5():
global cpuid, cpuappid
cpuid = "AMD Ryzen 5"
cpuappid = '741152732756312125'
def Ryzen7():
global cpuid, cpuappid
cpuid = "AMD Ryzen 7"
cpuappid = '740752899054895105'
def Ryzen9():
global cpuid, cpuappid
cpuid = "AMD Ryzen 9"
cpuappid = '741152930899427364'
def Ryzenth():
global cpuid, cpuappid
cpuid = "AMD Ryzen Threadripper"
cpuappid = '742075019257184338'
def Unknown_cpu():
global cpuid, cpuappid
cpuid = "Unknown CPU"
cpuappid = '742887089179197462'
# gpuids
def Intelgpu():
global gpuid
gpuid = "intel"
def Nvidiagpu():
global gpuid
gpuid = "nvidia"
def Nvidia_intelgpu():
global gpuid
gpuid = "nvidia-intel"
def Nvidia_amdgpu():
global gpuid
gpuid = "nvidia-amd"
def Amdgpu():
global gpuid
gpuid = "amd"
def Amd_intelgpu():
global gpuid
gpuid = "amd-intel"
def Nvidia_amd_intelgpu():
# again, why
global gpuid
gpuid = "nvidia-amd-intel"
def Vmwaregpu():
global gpuid
gpuid = "vmware"
def Virtiogpu():
global gpuid
gpuid = "virtio"
def Unknown_gpu():
global gpuid
gpuid = 'unknown'
# terminals
def St():
global termappid
termappid = '741280043220861030'
def Kitty():
global termappid
termappid = '741285676250824725'
def Alacritty():
global termappid
termappid = '741291339945345045'
def Xterm():
global termappid
termappid = '741287143187546125'
def Konsole():
global termappid
termappid = '741286819676553258'
def Gnometerminal():
global termappid
termappid = '741328861115056160'
def Coolretroterm():
global termappid
termappid = '741731097498353794'
def Urxvt():
global termappid
termappid = '743246048968835092'
def Fetchcord():
global termappid
termappid = '742096605502767235'
def Xfce4_terminal():
global termappid
termappid = '744332423072055296'
def Unknown_term():
global termappid
termappid = '742887089179197462'
# shells
def Fish():
global shell
shell = "fish"
def Zsh():
global shell
shell = "zsh"
def Bash():
global shell
shell = "bash"
def Unknown_shell():
global shell
shell = "unknown"
# hosts
def iAsus():
global hostappid
hostappid = "743936082780880928"
def iTUF():
global hostappid
hostappid = '744330890343219262'
def iDell():
global hostappid
hostappid = "743970870631858288"
def iHP():
global hostappid
hostappid = "743971270395297852"
def iLenovo():
global hostappid
hostappid = '744326223412461630'
def iAcer():
global hostappid
hostappid = '744326890512318544'
def Unknown_host():
global hostappid
hostappid = "742887089179197462"
amdcpus = {
"ryzen 3": Ryzen3,
"ryzen 5": Ryzen5,
"ryzen 7": Ryzen7,
"ryzen 9": Ryzen9,
"ryzen threadripper": Ryzenth,
}
intelcpus = {
"intel i3": Intelcorei3,
"intel i5": Intelcorei5,
"intel i7": Intelcorei7,
"intel i9": Intelcorei9,
"intel pentium": Intelpentium,
"intel celeron": Intelceleron,
"pentium": Intelpentium,
}
gpus = {
"intel": Intelgpu,
"nvidia": Nvidiagpu,
"amd": Amdgpu,
"radeon": Amdgpu,
"vmware": Vmwaregpu,
"virtio": Virtiogpu,
# multi GPUs
"nvidiaintel": Nvidia_intelgpu,
"nvidiaamd": Nvidia_amdgpu,
"amdintel": Amd_intelgpu,
"radeonintel": Amd_intelgpu,
"nvidiaamdintel": Nvidia_amd_intelgpu,
}
distros = {
"ubuntu": iUbuntu,
"opensuseleap": iOpenSuseLeap,
"arch": iArch,
"artix": iArtix,
"fedora": iFedora,
"void": iVoid,
"gentoo": iGentoo,
"centos": iCentos,
"debian": iDebian,
"opensusetumbleweed": iOpenSuseTumble,
"manjaro": iManjaro,
"linuxmint": iLinuxMint,
"lmde": iLMDE,
"pop!_os": iPop,
"endeavouros": iEnde,
"windows10": iWindows10,
"windows7": iWindows7,
"windows8": iWindows8,
"windows8.1": iWindows8_1,
"nixos": iNixOS,
}
versions = {
"10.13": iHsiera,
"10.14": iMojave,
"10.15": iCatalina
}
# window managers
windowmanagers = {
"dwm": iDwm,
"i3": Ii3,
"awesome": iAwesome,
"enlightenment": iEnlightenment,
"bspwm": iBspwm,
"xmonad": iXmonad,
"sway": iSway,
}
# desktops
desktops = {
"kde": iKde,
"plasma": iKde,
"xfce": iXfce,
"budgie": iBudgie,
"gnome": iGnome,
"deepin": iDeepin,
"cinnamon": iCinnamon,
"mate": iMate,
"unity": iUnity,
"aero": iAero,
"pantheon": iPantheon,
}
terminals = {
"st": St,
"kitty": Kitty,
"alacritty": Alacritty,
"xterm": Xterm,
"konsole": Konsole,
"dolphin": Konsole,
"gnome-terminal": Gnometerminal,
"cool-retro-term": Coolretroterm,
"urxvt": Urxvt,
"xfce4-terminal": Xfce4_terminal,
}
shells = {
"fish": Fish,
"zsh": Zsh,
"bash": Bash,
}
hosts= {
"inspiron": iDell,
"latitude": iDell,
"g3": iDell,
"hp": iHP,
"tuf": iTUF,
"asus": iAsus,
"acer": iAcer,
"thinkpad": iLenovo,
"lenovo": iLenovo,
}
args = parse_args()
hostlist = ['Acer', 'TUF', 'HP', 'ThinkPad', 'Inspiron', 'Lenovo', 'Latitude', 'G3']
hostid = ""
if os.name != "nt":
if hostline != "":
hostsplit = hostline[0].split()
hostid = []
for line in range(len(hostsplit)):
if hostsplit[line] in hostlist:
hostid.append(hostsplit[line].rstrip('\n'))
try:
hostid = hostid[0]
except IndexError:
hostid = ""
pass
terminallist = ["st", "kitty", "alacritty", "xterm", "konsole", "gnome-terminal", "cool-retro-term", "urxvt"]
if args.terminal:
if args.terminal in terminallist:
termid = args.terminal
termline[0] = "Terminal: %s" % args.terminal
else:
print("\nInvalid terminal, only %s are supported.\n"
"Please make a github issue if you would like to have your terminal added.\n"
"https://github.com/MrPotatoBobx/FetchCord" % terminallist)
sys.exit(1)
if args.debug:
print("hostsplit: %s" % hostsplit)
print("hostid: %s" % hostid)
# bunch of try except blocks to catch keyerrors and tell the enduser that thier distro/others arent supported
if sysosid.lower() not in ["macos", "windows10", "windows8", "windows7", "winodows8.1"]:
try:
terminals[termid.lower()]()
except KeyError:
print("Unsupported Terminal. contact me on github to resolve this.(Keyerror)")
Unknown_term()
try:
shells[shellid.lower()]()
except KeyError:
print("Unsupported Shell, contact me on guthub to resolve this.(Keyerror)")
Unknown_shell()
try:
if sysosid.lower() != "macos":
hosts[hostid.lower()]()
except KeyError:
print("Unknown Host, contact us on github to resolve this.(Keyerror)")
Unknown_host()
try:
if deid != "N/A":
desktops[deid.lower()]()
except KeyError:
print("Unsupported De contact me on github to resolve this.(Keyerror)")
Unknown_de_wm()
try:
if deid == "N/A":
windowmanagers[wmid.lower()]()
except KeyError:
print("Unsupported Wm contact me on github to resolve this.(Keyerror)")
Unknown_de_wm()
if sysosid.lower() != "macos":
try:
distros[sysosid.lower()]()
except KeyError:
print("Unsupported Distro, contact me on the GitHub page to resolve this.(keyerror)")
Unknown_distro()
try:
if cpuvendor == "AMD":
amdcpus[cpumodel.lower()]()
elif cpuvendor in ["Intel", "Pentium"]:
intelcpus[cpumodel.lower()]()
except KeyError:
print("unknown CPU, contact me on github to resolve this.(Keyerror)")
Unknown_cpu()
try:
gpus[gpuvendor.lower()]()
except KeyError:
print("Unknown GPU, contact me on github to resolve this.(Keyerror)")
Unknown_gpu()
elif sysosid.lower() == "macos":
macos()
if args.debug:
print("testing")
if os.name != "nt":
print("deid: %s" % deid)
print("wmid: %s" % wmid)
print("termid: %s" % termid)
print("shellid: %s" % shellid)
print("hostid: %s" % hostid)
print("cpumodel: %s" % cpumodel)
print("gpuvendor: %s" % gpuvendor)
``` |
{
"source": "Aagamshah9/EE559-Hand-Postures-Mathematical-Pattern-Recognition-",
"score": 3
} |
#### File: Aagamshah9/EE559-Hand-Postures-Mathematical-Pattern-Recognition-/plotSVMBoundaries.py
```python
import numpy as np
import matplotlib.pyplot as plt
def plotSVMBoundaries(training, label_train, classifier, support_vectors = []):
#Plot the decision boundaries and data points for minimum distance to
#class mean classifier
#
# training: traning data
# label_train: class lables correspond to training data
# classifier: sklearn classifier model, must have a predict() function
#
# Total number of classes
nclass = max(np.unique(label_train))
# Set the feature range for ploting
max_x = np.ceil(max(training[:, 0])) + 0.01
min_x = np.floor(min(training[:, 0])) - 0.01
max_y = np.ceil(max(training[:, 1])) + 0.01
min_y = np.floor(min(training[:, 1])) - 0.01
xrange = (min_x, max_x)
yrange = (min_y, max_y)
# step size for how finely you want to visualize the decision boundary.
inc = 0.005
# generate grid coordinates. this will be the basis of the decision
# boundary visualization.
(x, y) = np.meshgrid(np.arange(xrange[0], xrange[1]+inc/100, inc), np.arange(yrange[0], yrange[1]+inc/100, inc))
# size of the (x, y) image, which will also be the size of the
# decision boundary image that is used as the plot background.
image_size = x.shape
xy = np.hstack( (x.reshape(x.shape[0]*x.shape[1], 1, order='F'), y.reshape(y.shape[0]*y.shape[1], 1, order='F')) ) # make (x,y) pairs as a bunch of row vectors.
# distance measure evaluations for each (x,y) pair.
pred_label = classifier.predict(xy)
print(pred_label)
# reshape the idx (which contains the class label) into an image.
decisionmap = pred_label.reshape(image_size, order='F')
#turn on interactive mode
plt.figure()
plt.ion()
#show the image, give each coordinate a color according to its class label
plt.imshow(decisionmap, extent=[xrange[0], xrange[1], yrange[0], yrange[1]], origin='lower')
unique_labels = np.unique(label_train)
# plot the class training data.
plt.plot(training[label_train == unique_labels[0], 0],training[label_train == unique_labels[0], 1], 'rx')
plt.plot(training[label_train == unique_labels[1], 0],training[label_train == unique_labels[1], 1], 'go')
if nclass == 3:
plt.plot(training[label_train == unique_labels[2], 0],training[label_train == unique_labels[2], 1], 'b*')
# include legend for training data
if nclass == 3:
l = plt.legend(('Class 1', 'Class 2', 'Class 3'), loc=2)
else:
l = plt.legend(('Class 1', 'Class 2'), loc=2)
plt.gca().add_artist(l)
# plot support vectors
if len(support_vectors)>0:
sv_x = support_vectors[:, 0]
sv_y = support_vectors[:, 1]
plt.scatter(sv_x, sv_y, s = 100, c = 'blue')
plt.show()
plt.savefig('2d_train.jpg')
``` |
{
"source": "AaganMaskey/NCIT",
"score": 3
} |
#### File: NCIT/notices/models.py
```python
from django.db import models
from django.utils import timezone
from django.urls import reverse
"""
All the database entities of notices app are described in this module.
"""
class Notice(models.Model):
"""
Notice model represents the notice uploaded by the college admin.
Each notice should have a unique heading/title.
"""
heading = models.CharField(max_length=100, unique=True)
date_posted = models.DateTimeField(default=timezone.now)
body = models.TextField()
author = models.CharField(max_length=100, default='NCIT')
image = models.ImageField(upload_to='uploads/notices/', blank=True)
def get_absolute_url(self):
"""
:return: url of the corresponding notice on basis of heading as notices/{heading}
"""
return reverse('notice', args=(self.heading,))
def __str__(self):
return f'{self.heading}'
@staticmethod
def get_notices_by_date():
"""
:return: list of notices in descending order of date_posted (latest notice first)
"""
return Notice.objects.all().order_by('-date_posted')
@staticmethod
def get_notice(heading):
"""
:type heading: string
:param heading: heading/title of notice
:return: notice object with the heading passed as parameter
"""
return Notice.objects.get(heading=heading)
``` |
{
"source": "AaganMaskey/viber-bot-python",
"score": 2
} |
#### File: api/viber_requests/viber_seen_request.py
```python
from future.utils import python_2_unicode_compatible
from ..event_type import EventType
from viberbot.api.viber_requests.viber_request import ViberRequest
class ViberSeenRequest(ViberRequest):
def __init__(self):
super(ViberSeenRequest, self).__init__(EventType.SEEN)
self._message_token = None
self._user_id = None
def from_dict(self, request_dict):
super(ViberSeenRequest, self).from_dict(request_dict)
self._message_token = request_dict['message_token']
self._user_id = request_dict['user_id']
return self
@property
def meesage_token(self):
return self._message_token
@property
def user_id(self):
return self._user_id
@python_2_unicode_compatible
def __str__(self):
return u"ViberSeenRequest [{0}, message_token={1}, user_id={2}]" \
.format(super(ViberSeenRequest, self).__str__(), self._message_token, self._user_id)
``` |
{
"source": "a-agarkov/otus-python-2017-11",
"score": 3
} |
#### File: 3_OOP/otus_hw3_scoring_api/scoring.py
```python
import hashlib
import json
import datetime as dt
def get_score(store, phone, email, birthday=None, gender=None, first_name=None, last_name=None):
try:
key_parts = [first_name or "",
last_name or "",
dt.datetime.strptime('01.02.1990', '%d.%m.%Y').strftime("%Y%m%d")]
key = "uid:" + hashlib.md5("".join(key_parts).encode()).hexdigest()
except:
key = None
# try get from cache,
# fallback to heavy calculation in case of cache miss
cached_val = None
if key:
cached_val = store.cache_get(key, collection='score_collection', target_value_name='score')
score = cached_val or 0
if cached_val:
return score
if phone:
score += 1.5
if email:
score += 1.5
if birthday and gender:
score += 1.5
if first_name and last_name:
score += 0.5
# cache for 60 minutes
store.cache_set(key=key,
value=score,
expire_after_seconds=60 * 60,
collection='score_collection',
target_value_name='score')
return score
def get_interests(store, cid):
r = store.get("i:%s" % cid)
return json.loads(r) if r else []
``` |
{
"source": "aagarwal1996/additive_trees",
"score": 3
} |
#### File: aagarwal1996/additive_trees/honest_trees.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statistics
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
'''
This python script is used to compute predictions and MSE for honest trees and forests. For empty cells we use average over parent node instead.
'''
def flatten_list(t):
return [item for sublist in t for item in sublist]
def get_test_prediction(decision_path,node_id_to_honest_av,node_id_to_honest_count):
test_pred = 0.0
for node_id in decision_path[::-1]:
if node_id_to_honest_count[node_id] == 0:
continue
else:
test_pred = node_id_to_honest_av[node_id]
break
return test_pred
def get_all_decision_paths(CART,X_honest):
'''
This method returns 1. the decision path of each sample and 2. all node_ids used in decision paths for X_honest
'''
node_indicator = CART.decision_path(X_honest)
leaf_id = CART.apply(X_honest)
sample_id_to_decision_path = {}
node_ids = []
for i in range(len(X_honest)):
sample_id = i
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
sample_id_to_decision_path[i] = node_index
node_ids.append(node_index)
return sample_id_to_decision_path,np.unique(np.array(flatten_list(node_ids)))
def get_honest_leaf_averages(CART,X_honest,y_honest):
X_honest_decsion_paths,X_honest_node_ids = get_all_decision_paths(CART,X_honest)
node_id_to_honest_av = {}
node_id_to_honest_count = {}
all_node_ids = range(CART.tree_.node_count)
for node_id in all_node_ids:
if node_id in X_honest_node_ids:
honest_sample_ids_at_node = [sample_id for sample_id,decision_path in X_honest_decsion_paths.items() if node_id in decision_path]
node_id_to_honest_av[node_id] = y_honest[honest_sample_ids_at_node].mean()
node_id_to_honest_count[node_id] = len(honest_sample_ids_at_node)
else:
node_id_to_honest_av[node_id] = 'nan'
node_id_to_honest_count[node_id] = 0
return node_id_to_honest_av,node_id_to_honest_count
def get_honest_tree_test_preds(CART,X_test,y_test,node_id_to_honest_av,node_id_to_honest_count):
X_test_decision_paths = get_all_decision_paths(CART,X_test)[0]
test_preds = []
#count = 0
for i in range(len(X_test_decision_paths)):
test_sample_decision_path = X_test_decision_paths[i]
test_sample_pred = get_test_prediction(test_sample_decision_path,node_id_to_honest_av,node_id_to_honest_count)
test_preds.append(test_sample_pred)
return test_preds
def get_honest_test_MSE(CART,X_honest,y_honest,X_test,y_test):
node_id_to_honest_av,node_id_to_honest_count = get_honest_leaf_averages(CART,X_honest,y_honest)
test_preds = get_honest_tree_test_preds(CART,X_test,y_test,node_id_to_honest_av,node_id_to_honest_count)
test_MSE = mean_squared_error(test_preds,y_test)
return test_MSE
def get_honest_forest_test_MSE(RF,X_honest_y_honest,X_test,y_test):
def mean(a):
return sum(a) / len(a)
n_tree = len(RF)
all_tree_preds = []
for i in range(n_tree):
tree_leaf_id_to_honest_av = get_honest_leaf_averages(RF[i],X_honest,y_honest)
tree_test_preds = get_honest_tree_test_preds(RF[i],X_test,y_test,leaf_id_to_honest_av)
all_tree_preds.append(test_tree_preds)
RF_honest_preds = map(mean, zip(*all_tree_preds))
return RF_honest_preds
``` |
{
"source": "aagarwal1996/early_stopping",
"score": 2
} |
#### File: early_stopping/Simulations/train_models.py
```python
import sys
import math
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score,GridSearchCV
from sklearn.model_selection import KFold
from sklearn.neighbors import KNeighborsRegressor
from generate_data import *
from tree_structure_analysis import *
sys.path.append("../")
'''
This script is used to train the following models:
1. honest and non-honest CART with min_sample_leaf condition (default = 5)
2. honest and non-honest CART with CCP with CV
3. honest and non-honest CART with early stopping
4. KNN
'''
def CART(X_train,y_train,X_honest,y_honest,X_test,y_test,honest = False):
if honest == False:
CART = DecisionTreeRegressor()
CART.fit(X_train,y_train)
CART_preds = CART.predict(X_test)
return mean_squared_error(CART_preds,y_test)
else:
CART = DecisionTreeRegressor()
CART.fit(X_train,y_train)
honest_test_mse = get_honest_test_MSE(CART,X_honest,y_honest,X_test,y_test)
return honest_test_mse
def CART_CCP(X_train,y_train,X_honest,y_honest,X_test,y_test,sigma,s,folds = 5):
id_threshold = (max(sigma**2,1))/(len(X_train))
alphas = np.geomspace(0.1*id_threshold, 1000*id_threshold, num=5)
scores = []
models = []
for alpha in alphas:
CART = DecisionTreeRegressor(ccp_alpha = alpha)
CART.fit(X_train,y_train)
models.append(CART)
scores.append(cross_val_score(CART, X_train, y_train, cv=folds).mean())
best_CART = models[scores.index(max(scores))]
exact_recovery_indicator = check_tree_structure(best_CART,s)
dishonest_MSE = mean_squared_error(best_CART.predict(X_test),y_test)
#honest_MSE = get_honest_test_MSE(best_CART,X_honest,y_honest,X_test,y_test)
#return honest_MSE,dishonest_MSE
return dishonest_MSE,exact_recovery_indicator
def CART_early_stopping(X_train,y_train,X_honest,y_honest,X_test,y_test,sigma,s):
id_threshold = (max(sigma**2,1))/(len(X_train))
CART_early_stopping = DecisionTreeRegressor(min_impurity_decrease = id_threshold)
CART_early_stopping.fit(X_train,y_train)
exact_recovery_indicator = check_tree_structure(CART_early_stopping,s)
return mean_squared_error(CART_early_stopping.predict(X_test),y_test),exact_recovery_indicator
def KNN(X_train,y_train,X_test,y_test,folds):
knn_regressor = KNeighborsRegressor()
num_samples = len(X_train)
param_grid = {'n_neighbors': np.arange(1, round(3*math.log(num_samples)))}
knn_gscv = GridSearchCV(knn_regressor, param_grid, cv=5)
knn_gscv.fit(X_train, y_train)
optimal_nearest_neighbours = knn_gscv.best_params_['n_neighbors']
optimal_knn_regressor = KNeighborsRegressor(n_neighbors = optimal_nearest_neighbours)
optimal_knn_regressor.fit(X_train,y_train)
knn_mse = mean_squared_error(optimal_knn_regressor.predict(X_test),y_test)
return knn_mse
def train_all_models(X_train,y_train,X_honest,y_honest,X_test,y_test,sigma,s,folds = 5):
#honest_CART = CART(X_train,y_train,X_honest,y_honest,X_test,y_test,honest = True)
#dishonest_CART = CART(X_train,y_train,X_honest,y_honest,X_test,y_test,honest = False)
CART_MSE = CART(X_train,y_train,X_honest,y_honest,X_test,y_test,honest = False)
CART_CCP_MSE,CART_CCP_exact_recovery_indicator = CART_CCP(X_train,y_train,X_honest,y_honest,X_test,y_test,sigma,s,folds)
CART_early_stopping_MSE,CART_early_stopping_exact_recovery_indicator = CART_early_stopping(X_train,y_train,X_honest,y_honest,X_test,y_test,sigma,s)
knn_mse = KNN(X_train,y_train,X_test,y_test,folds = folds)
model_mses = [CART_MSE,CART_CCP_MSE,CART_early_stopping_MSE,knn_mse]
model_recovery_probability = [CART_CCP_exact_recovery_indicator,CART_early_stopping_exact_recovery_indicator]
#honest_CART_CCP,dishonest_CART_CCP = CART_CCP(X_train,y_train,X_honest,y_honest,X_test,y_test,sigma,k = 5)
return model_mses,model_recovery_probability
#CART_MSE,CART_CCP_MSE,CART_early_stopping_MSE,knn_mse
#,honest_CART_CCP
#,,dishonest_CART_CCP
``` |
{
"source": "aagarwal1999/194-web-app",
"score": 2
} |
#### File: data/covid_kaggle/covid_kaggle_dataset.py
```python
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
import kaggle
from pathlib import Path
class CovidDataset(Dataset):
def __init__(self, dataframe=None, tokenizer=None, source_len=None, summ_len=None, use_title=False):
self.tokenizer = tokenizer
self.data = dataframe
self.source_len = source_len
self.summ_len = summ_len
if use_title:
self.text = self.data.title
else:
self.text = self.data.abstract
self.ctext = self.data.text_body
def __len__(self):
return len(self.text)
def __getitem__(self, index):
ctext = str(self.ctext[index])
ctext = ' '.join(ctext.split())
text = str(self.text[index])
text = ' '.join(text.split())
source = self.tokenizer.batch_encode_plus([ctext], max_length= self.source_len, pad_to_max_length=True,return_tensors='pt')
target = self.tokenizer.batch_encode_plus([text], max_length= self.summ_len, pad_to_max_length=True,return_tensors='pt')
source = "summarize " + source
source_ids = source['input_ids'].squeeze()
source_mask = source['attention_mask'].squeeze()
target_ids = target['input_ids'].squeeze()
target_mask = target['attention_mask'].squeeze()
return {
'source_ids': source_ids.to(dtype=torch.long),
'source_mask': source_mask.to(dtype=torch.long),
'target_ids': target_ids.to(dtype=torch.long),
'target_ids_y': target_ids.to(dtype=torch.long)
}
```
#### File: data/data_old/scraping.py
```python
import requests
from bs4 import BeautifulSoup
import json
from newspaper import Article
import pandas as pd
from urllib.parse import urlparse
# from outfile import run
import csv
# scraping function
# def medicalnews_rss():
# try:
# r = requests.get('https://medicalxpress.com/rss')
# return print('The scraping job succeeded: ', r.status_code)
# except Exception as e:
# print('The scraping job failed. See exception: ')
# print(e)
# print('Starting scraping')
# medicalnews_rss()
# print('Finished scraping')
# def medicalnews_rss():
# try:
# r = requests.get('https://medicalxpress.com/rss')
# soup = BeautifulSoup(r.content, features='xml')
# return print(soup)
# except Exception as e:
# print('The scraping job failed. See exception: ')
# print(e)
def save_function(article_list):
with open('medrss.txt', 'w') as outfile:
json.dump(article_list, outfile)
def save_text(article_list):
text_data=[]
for a in article_list:
article = Article(a['link'])
article.download()
article.parse()
article.nlp()
text_data.append([a['title'], article.text, urlparse(a['link']).netloc])
df = pd.DataFrame(text_data, columns = ['Title', 'Text', 'Domain'])
# filename = 'data_links_data.csv' ## TODO: clean this up
filename = "".join(args.input.split('.')[:-1]) + '_data.csv'
df.to_csv(filename)
def test_article(article_text):
valid =True
art_tokenized = article_text.tokenize()
if art_tokenized.text.length <10:
valid==False
elif art_tokenized.text.contains(adwords):
valid=False
return valid
def medicalnews_rss(source_links_list):
article_list = []
# try:
for rss_link in source_links_list:
try:
# import pdb; pdb.set_trace()
r = requests.get(rss_link[0])
soup = BeautifulSoup(r.content, features='xml')
articles = soup.findAll('item')
for a in articles:
title = a.find('title').text
link = a.find('link').text
published = a.find('pubDate').text
article = {
'title': title,
'link': link,
'published': published
}
article_list.append(article)
except:
print('This link is currently not accessible: ', rss_link[0])
# processedlist = run()
# save_function(article_list)
return save_text(article_list)
# except Exception as e:
# print('The scraping job failed. See exception: ')
# print(e)
if __name__ == "__main__":
# data = pd.read_csv("list_urls.csv")
# links = data[data.columns[1]]
# source_links_list = links.values.tolist()
# source_links = ['https://www.medscape.com/cx/rssfeeds/2700.xml', 'https://www.medicaldaily.com/rss', 'https://www.medpagetoday.com/rss/headlines.xml', ]
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, default='link_files/data_links.csv')
args = parser.parse_args()
# import pdb; pdb.set_trace()
with open(args.input) as f:
reader = csv.reader(f)
data = list(reader)
print(data)
medicalnews_rss(data)
```
#### File: models/t5/t5_finetune.py
```python
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
# Importing the T5 modules from huggingface/transformers
from transformers import T5Tokenizer, T5ForConditionalGeneration
import wandb
from torch import cuda
device = 'cuda' if cuda.is_available() else 'cpu'
from transformers import T5ForConditionalGeneration, T5Tokenizer
class CustomDataset(Dataset):
def __init__(self, dataframe, tokenizer, source_len, summ_len, use_title=False):
self.tokenizer = tokenizer
self.data = dataframe
self.source_len = source_len
self.summ_len = summ_len
if use_title:
self.text = self.data.title
else:
self.text = self.data.abstract
self.ctext = self.data.text_body
def __len__(self):
return len(self.text)
def __getitem__(self, index):
ctext = str(self.ctext[index])
ctext = ' '.join(ctext.split())
text = str(self.text[index])
text = ' '.join(text.split())
source = self.tokenizer.batch_encode_plus([ctext], max_length= self.source_len, pad_to_max_length=True,return_tensors='pt')
target = self.tokenizer.batch_encode_plus([text], max_length= self.summ_len, pad_to_max_length=True,return_tensors='pt')
source_ids = source['input_ids'].squeeze()
source_mask = source['attention_mask'].squeeze()
target_ids = target['input_ids'].squeeze()
target_mask = target['attention_mask'].squeeze()
return {
'source_ids': source_ids.to(dtype=torch.long),
'source_mask': source_mask.to(dtype=torch.long),
'target_ids': target_ids.to(dtype=torch.long),
'target_ids_y': target_ids.to(dtype=torch.long)
}
def train(epoch, tokenizer, model, device, loader, optimizer):
model.train()
for idx ,data in enumerate(loader, 0):
y = data['target_ids'].to(device, dtype = torch.long)
y_ids = y[:, :-1].contiguous()
# lm_labels = y[:, 1:].clone().detach()
# lm_labels[y[:, 1:] == tokenizer.pad_token_id] = -100
ids = data['source_ids'].to(device, dtype = torch.long)
mask = data['source_mask'].to(device, dtype = torch.long)
loss = model(input_ids=ids, attention_mask=mask, labels=y_ids).loss
# loss = outputs[0]
# import pdb; pdb.set_trace()
if idx %10 == 0:
wandb.log({"Training Loss": loss.item()})
# if _%500==0:
# print(f'Epoch: {epoch}, Loss: {loss.item()}')
print("Epoch: ", epoch, " Batch: ", _, " Loss: ", loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx == 1000:
break
# xm.optimizer_step(optimizer)
# xm.mark_step()
def validate(epoch, tokenizer, model, device, loader):
model.eval()
predictions = []
actuals = []
with torch.no_grad():
for _, data in enumerate(loader, 0):
y = data['target_ids'].to(device, dtype = torch.long)
ids = data['source_ids'].to(device, dtype = torch.long)
mask = data['source_mask'].to(device, dtype = torch.long)
generated_ids = model.generate(
input_ids = ids,
attention_mask = mask,
max_length=150,
num_beams=2,
repetition_penalty=2.5,
length_penalty=1.0,
early_stopping=True
)
preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
target = [tokenizer.decode(t, skip_special_tokens=True, clean_up_tokenization_spaces=True)for t in y]
if _%100==0:
print("Completed ", _)
# print(f'Completed {_}')
predictions.extend(preds)
actuals.extend(target)
if _ == 10: # evaluate 10 files for now
break
return predictions, actuals
def main():
wandb.init(project="med_transformers_summarization")
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--max_raw_len", default=512, type=int)
parser.add_argument("--epochs", default=2, type=int)
parser.add_argument("--title", action="store_true", default=False)
parser.add_argument("--batch", default=4, type=int)
args = parser.parse_args()
config = wandb.config # Initialize config
config.TRAIN_BATCH_SIZE = 1 # input batch size for training (default: 64)
config.VALID_BATCH_SIZE = args.batch # input batch size for testing (default: 1000)
config.TRAIN_EPOCHS = args.epochs # number of epochs to train (default: 10)
config.VAL_EPOCHS = 1
config.LEARNING_RATE = 1e-4 # learning rate (default: 0.01)
config.SEED = 42 # random seed (default: 42)
config.MAX_LEN = args.max_raw_len
config.SUMMARY_LEN = 150
# Set random seeds and deterministic pytorch for reproducibility
torch.manual_seed(config.SEED) # pytorch random seed
np.random.seed(config.SEED) # numpy random seed
torch.backends.cudnn.deterministic = True
# tokenzier for encoding the text
tokenizer = T5Tokenizer.from_pretrained("t5-base")
# Importing and Pre-Processing the domain data
# Selecting the needed columns only.
# Adding the summarzie text in front of the text. This is to format the dataset similar to how T5 model was trained for summarization task.
df = pd.read_csv('./kaggle_covid-19_open_csv_format.csv',encoding='latin-1')
if args.title:
df = df[['title', 'text_body']]
else:
df = df[['abstract','text_body']]
print("*********")
print("Dataset has {} rows before clean up. ".format(len(df.index)))
df = df.dropna() # drop NaN rows
print("Dataset has {} rows after clean up. ".format(len(df.index)))
print("*********")
df.text_body = 'summarize: ' + df.text_body
print(df.head())
use_title = "title" if args.title else "abstract"
print("----------------- Training configs -----------------")
print("Max length of raw text: ", config.MAX_LEN)
print("Total training epochs: ", config.TRAIN_EPOCHS)
print("Training with {}".format(use_title))
print("Batch size: ", config.TRAIN_BATCH_SIZE)
print("----------------------------------------------------")
# Creation of Dataset and Dataloader
# Defining the train size. So 80% of the data will be used for training and the rest will be used for validation.
train_size = 0.8
train_dataset=df.sample(frac=train_size,random_state = config.SEED)
val_dataset=df.drop(train_dataset.index).reset_index(drop=True)
train_dataset = train_dataset.reset_index(drop=True)
print("FULL Dataset: {}".format(df.shape))
print("TRAIN Dataset: {}".format(train_dataset.shape))
print("TEST Dataset: {}".format(val_dataset.shape))
# Creating the Training and Validation dataset for further creation of Dataloader
training_set = CustomDataset(train_dataset, tokenizer, config.MAX_LEN, config.SUMMARY_LEN, use_title=args.title)
val_set = CustomDataset(val_dataset, tokenizer, config.MAX_LEN, config.SUMMARY_LEN, use_title=args.title)
# Defining the parameters for creation of dataloaders
train_params = {
'batch_size': config.TRAIN_BATCH_SIZE,
'shuffle': True,
'num_workers': 0
}
val_params = {
'batch_size': config.VALID_BATCH_SIZE,
'shuffle': False,
'num_workers': 0
}
# Creation of Dataloaders for testing and validation. This will be used down for training and validation stage for the model.
training_loader = DataLoader(training_set, **train_params)
val_loader = DataLoader(val_set, **val_params)
# Defining the model. We are using t5-base model and added a Language model layer on top for generation of Summary.
# Further this model is sent to device (GPU/TPU) for using the hardware.
import os
if os.path.isdir('local-t5-base'):
model = T5ForConditionalGeneration.from_pretrained("local-t5-base")
else:
model = T5ForConditionalGeneration.from_pretrained("t5-base")
model = model.to(device)
# Defining the optimizer that will be used to tune the weights of the network in the training session.
optimizer = torch.optim.Adam(params = model.parameters(), lr=config.LEARNING_RATE)
# Log metrics with wandb
wandb.watch(model, log="all")
# Training loop
print('Initiating Fine-Tuning for the model on our dataset')
for epoch in range(config.TRAIN_EPOCHS):
train(epoch, tokenizer, model, device, training_loader, optimizer)
from datetime import datetime
now = datetime.now()
dt_string = now.strftime("%m-%d-%H-%M")
model.save_pretrained("./output/{}_t5_{}/model/".format(dt_string, use_title))
tokenizer.save_pretrained("./output/{}_t5_{}/tokenizer/".format(dt_string, use_title))
# Validation loop and saving the resulting file with predictions and acutals in a dataframe.
# Saving the dataframe as predictions.csv
print('Now generating summaries on our fine tuned model for the validation dataset and saving it in a dataframe')
for epoch in range(config.VAL_EPOCHS):
predictions, actuals = validate(epoch, tokenizer, model, device, val_loader)
final_df = pd.DataFrame({'Generated Text': predictions,'Actual Text': actuals})
final_df.to_csv("./output/{}_t5_{}/predictions.csv".format(dt_string, use_title))
print('Output Files generated for review')
# torch.save(model.state_dict(), 'cur_best.pt')
if __name__ == '__main__':
main()
```
#### File: 194-web-app/prod_pipeline/metrics.py
```python
from nltk.tokenize import sent_tokenize, word_tokenize
import re
from collections import Counter
import numpy as np
from storage.models.production_data import ProductionData
from storage.models.dataset import Dataset
from data.covid_kaggle.load_dataframe import CovidKaggleDataExplorer
from pathlib import Path
class Metrics:
def __init__(self, path_to_medical_dict):
f = open(path_to_medical_dict, "r")
self.medical_dict = set(f.read().split("\n"))
def compute_metrics(self, dataset):
count = len(dataset)
avg_word_count = 0
avg_sentence_count = 0
avg_num_medical_terms = 0
avg_sentence_length = 0
avg_sentence_variance = 0
medical_words_counter = Counter()
for text in dataset:
sentences = sent_tokenize(text)
words = word_tokenize(text)
avg_sentence_count = len(sentences)
avg_word_count += len(words)
sent_length_arr = [len(word_tokenize(sent)) for sent in sentences]
avg_sentence_length += np.mean(sent_length_arr)
avg_sentence_variance += np.std(sent_length_arr)
non_punct = re.compile('.*[A-Za-z0-9].*')
medical_words = [w.lower() for w in words if non_punct.match(w) and w.lower() in self.medical_dict]
avg_num_medical_terms += len(medical_words)
counts = Counter(medical_words)
medical_words_counter.update(counts)
most_common_medical_words = [key for key, val in medical_words_counter.most_common() if len(key) > 4][:25]
avg_num_medical_terms /= count
avg_sentence_count /= count
avg_word_count /= count
avg_sentence_length /= count
avg_sentence_variance /= count
return {
"most_common_words": ",".join(most_common_medical_words),
"avg_num_medical_terms": avg_num_medical_terms,
"avg_sentence_count": avg_sentence_count,
"avg_sentence_length": avg_sentence_length,
"avg_sentence_variance": avg_sentence_variance,
"avg_word_count": avg_word_count,
"count": count
}
def insert_metrics_into_database(session, production_only=False):
medical_dict_path = Path(__file__).parent.parent / 'data' / 'medical_wordlist' / 'wordlist.txt'
metrics = Metrics(medical_dict_path)
data = session.query(ProductionData).with_entities(ProductionData.data).all()
metrics_production = metrics.compute_metrics([datum[0] for datum in data])
num_rows_updated = session.query(Dataset).filter_by(name="production").update(metrics_production)
if num_rows_updated == 0:
metrics_production.update({"name": "production"})
new_metric = Dataset(**metrics_production)
session.add(new_metric)
if production_only:
return metrics_production
dataset_explorer = CovidKaggleDataExplorer()
data = dataset_explorer.get_data()
metrics_training = metrics.compute_metrics(data)
num_rows_updated = session.query(Dataset).filter_by(name="training").update(metrics_training)
if num_rows_updated == 0:
metrics_training.update({"name": "training"})
new_metric = Dataset(**metrics_training)
session.add(new_metric)
return {
"production": metrics_production,
"training": metrics_training
}
```
#### File: 194-web-app/prod_pipeline/prod.py
```python
from storage.models.production_data import ProductionData
from storage.models.dataset import Dataset
from .metrics import insert_metrics_into_database
import datetime
class ProdOperations:
def __init__(self, session):
self.session = session
# load models
def summarize(self, text):
# predict model 1
# predict model 2
time = datetime.date.today()
one_line_summary = "hello"
one_paragraph_summary = "hi"
new_data_pont = ProductionData(data=text, time=time, one_line_summary=one_line_summary, one_paragraph_summary=one_paragraph_summary)
self.session.add(new_data_pont)
return one_line_summary, one_paragraph_summary
def get_recent_prod_data(self, num_items=10):
prod_data = self.session.query(ProductionData).order_by(ProductionData.time).limit(num_items).all()
return [{"data": prod_datum.data,
"time": prod_datum.time ,
"one_line_summary": prod_datum.one_line_summary,
"one_paragraph_summary": prod_datum.one_paragraph_summary }
for prod_datum in prod_data]
def get_daily_prod_data_calls(self):
curr_time = datetime.date.today()
num_calls = self.session.query(ProductionData).filter(ProductionData.time == curr_time).count()
print(num_calls)
return num_calls
def get_all_metrics(self):
row2dict = lambda r: {c.name: str(getattr(r, c.name)) for c in r.__table__.columns}
metrics_prod = row2dict(self.session.query(Dataset).filter_by(name='production').first())
metrics_train = row2dict(self.session.query(Dataset).filter_by(name='training').first())
del metrics_prod['id']
del metrics_prod['name']
del metrics_train['id']
del metrics_train['name']
return [{
"name": " ".join([word.capitalize() for word in key.split("_")]),
"training": val,
"production": metrics_prod[key]
} for key, val in metrics_train.items()]
def refresh_metrics(self):
insert_metrics_into_database(self.session, production_only=True)
``` |
{
"source": "aagelii/.Data---Kaggle-Competition-1",
"score": 3
} |
#### File: aagelii/.Data---Kaggle-Competition-1/pyESN.py
```python
import numpy as np
def correct_dimensions(s, targetlength):
"""checks the dimensionality of some numeric argument s, broadcasts it
to the specified length if possible.
Args:
s: None, scalar or 1D array
targetlength: expected length of s
Returns:
None if s is None, else numpy vector of length targetlength
"""
if s is not None:
s = np.array(s)
if s.ndim == 0:
s = np.array([s] * targetlength)
elif s.ndim == 1:
if not len(s) == targetlength:
raise ValueError("arg must have length " + str(targetlength))
else:
raise ValueError("Invalid argument")
return s
def identity(x):
return x
class ESN():
def __init__(self, n_inputs, n_outputs, n_reservoir=200,
spectral_radius=0.95, sparsity=0, noise=0.001, input_shift=None,
input_scaling=None, teacher_forcing=True, feedback_scaling=None,
teacher_scaling=None, teacher_shift=None,
out_activation=identity, inverse_out_activation=identity,
random_state=None, silent=True):
"""
Args:
n_inputs: nr of input dimensions
n_outputs: nr of output dimensions
n_reservoir: nr of reservoir neurons
spectral_radius: spectral radius of the recurrent weight matrix
sparsity: proportion of recurrent weights set to zero
noise: noise added to each neuron (regularization)
input_shift: scalar or vector of length n_inputs to add to each
input dimension before feeding it to the network.
input_scaling: scalar or vector of length n_inputs to multiply
with each input dimension before feeding it to the netw.
teacher_forcing: if True, feed the target back into output units
teacher_scaling: factor applied to the target signal
teacher_shift: additive term applied to the target signal
out_activation: output activation function (applied to the readout)
inverse_out_activation: inverse of the output activation function
random_state: positive integer seed, np.rand.RandomState object,
or None to use numpy's builting RandomState.
silent: supress messages
"""
# check for proper dimensionality of all arguments and write them down.
self.n_inputs = n_inputs
self.n_reservoir = n_reservoir
self.n_outputs = n_outputs
self.spectral_radius = spectral_radius
self.sparsity = sparsity
self.noise = noise
self.input_shift = correct_dimensions(input_shift, n_inputs)
self.input_scaling = correct_dimensions(input_scaling, n_inputs)
self.teacher_scaling = teacher_scaling
self.teacher_shift = teacher_shift
self.out_activation = out_activation
self.inverse_out_activation = inverse_out_activation
self.random_state = random_state
# the given random_state might be either an actual RandomState object,
# a seed or None (in which case we use numpy's builtin RandomState)
if isinstance(random_state, np.random.RandomState):
self.random_state_ = random_state
elif random_state:
try:
self.random_state_ = np.random.RandomState(random_state)
except TypeError as e:
raise Exception("Invalid seed: " + str(e))
else:
self.random_state_ = np.random.mtrand._rand
self.teacher_forcing = teacher_forcing
self.silent = silent
self.initweights()
def initweights(self):
# initialize recurrent weights:
# begin with a random matrix centered around zero:
W = self.random_state_.rand(self.n_reservoir, self.n_reservoir) - 0.5
# delete the fraction of connections given by (self.sparsity):
W[self.random_state_.rand(*W.shape) < self.sparsity] = 0
# compute the spectral radius of these weights:
radius = np.max(np.abs(np.linalg.eigvals(W)))
# rescale them to reach the requested spectral radius:
self.W = W * (self.spectral_radius / radius)
# random input weights:
self.W_in = self.random_state_.rand(
self.n_reservoir, self.n_inputs) * 2 - 1
# random feedback (teacher forcing) weights:
self.W_feedb = self.random_state_.rand(
self.n_reservoir, self.n_outputs) * 2 - 1
def _update(self, state, input_pattern, output_pattern):
"""performs one update step.
i.e., computes the next network state by applying the recurrent weights
to the last state & and feeding in the current input and output patterns
"""
if self.teacher_forcing:
preactivation = (np.dot(self.W, state)
+ np.dot(self.W_in, input_pattern)
+ np.dot(self.W_feedb, output_pattern))
else:
preactivation = (np.dot(self.W, state)
+ np.dot(self.W_in, input_pattern))
return (np.tanh(preactivation)
+ self.noise * (self.random_state_.rand(self.n_reservoir) - 0.5))
def _scale_inputs(self, inputs):
"""for each input dimension j: multiplies by the j'th entry in the
input_scaling argument, then adds the j'th entry of the input_shift
argument."""
if self.input_scaling is not None:
inputs = np.dot(inputs, np.diag(self.input_scaling))
if self.input_shift is not None:
inputs = inputs + self.input_shift
return inputs
def _scale_teacher(self, teacher):
"""multiplies the teacher/target signal by the teacher_scaling argument,
then adds the teacher_shift argument to it."""
if self.teacher_scaling is not None:
teacher = teacher * self.teacher_scaling
if self.teacher_shift is not None:
teacher = teacher + self.teacher_shift
return teacher
def _unscale_teacher(self, teacher_scaled):
"""inverse operation of the _scale_teacher method."""
if self.teacher_shift is not None:
teacher_scaled = teacher_scaled - self.teacher_shift
if self.teacher_scaling is not None:
teacher_scaled = teacher_scaled / self.teacher_scaling
return teacher_scaled
def fit(self, inputs, outputs, inspect=False):
"""
Collect the network's reaction to training data, train readout weights.
Args:
inputs: array of dimensions (N_training_samples x n_inputs)
outputs: array of dimension (N_training_samples x n_outputs)
inspect: show a visualisation of the collected reservoir states
Returns:
the network's output on the training data, using the trained weights
"""
# transform any vectors of shape (x,) into vectors of shape (x,1):
if inputs.ndim < 2:
inputs = np.reshape(inputs, (len(inputs), -1))
if outputs.ndim < 2:
outputs = np.reshape(outputs, (len(outputs), -1))
# transform input and teacher signal:
inputs_scaled = self._scale_inputs(inputs)
teachers_scaled = self._scale_teacher(outputs)
if not self.silent:
print("harvesting states...")
# step the reservoir through the given input,output pairs:
states = np.zeros((inputs.shape[0], self.n_reservoir))
for n in range(1, inputs.shape[0]):
states[n, :] = self._update(states[n - 1], inputs_scaled[n, :],
teachers_scaled[n - 1, :])
# learn the weights, i.e. find the linear combination of collected
# network states that is closest to the target output
if not self.silent:
print("fitting...")
# we'll disregard the first few states:
transient = min(int(inputs.shape[1] / 10), 100)
# include the raw inputs:
extended_states = np.hstack((states, inputs_scaled))
# Solve for W_out:
self.W_out = np.dot(np.linalg.pinv(extended_states[transient:, :]),
self.inverse_out_activation(teachers_scaled[transient:, :])).T
# remember the last state for later:
self.laststate = states[-1, :]
self.lastinput = inputs[-1, :]
self.lastoutput = teachers_scaled[-1, :]
# optionally visualize the collected states
if inspect:
from matplotlib import pyplot as plt
# (^-- we depend on matplotlib only if this option is used)
plt.figure(
figsize=(states.shape[0] * 0.0025, states.shape[1] * 0.01))
plt.imshow(extended_states.T, aspect='auto',
interpolation='nearest')
plt.colorbar()
if not self.silent:
print("training error:")
# apply learned weights to the collected states:
pred_train = self._unscale_teacher(self.out_activation(
np.dot(extended_states, self.W_out.T)))
if not self.silent:
print(np.sqrt(np.mean((pred_train - outputs)**2)))
return pred_train
def predict(self, inputs, continuation=True):
"""
Apply the learned weights to the network's reactions to new input.
Args:
inputs: array of dimensions (N_test_samples x n_inputs)
continuation: if True, start the network from the last training state
Returns:
Array of output activations
"""
if inputs.ndim < 2:
inputs = np.reshape(inputs, (len(inputs), -1))
n_samples = inputs.shape[0]
if continuation:
laststate = self.laststate
lastinput = self.lastinput
lastoutput = self.lastoutput
else:
laststate = np.zeros(self.n_reservoir)
lastinput = np.zeros(self.n_inputs)
lastoutput = np.zeros(self.n_outputs)
inputs = np.vstack([lastinput, self._scale_inputs(inputs)])
states = np.vstack(
[laststate, np.zeros((n_samples, self.n_reservoir))])
outputs = np.vstack(
[lastoutput, np.zeros((n_samples, self.n_outputs))])
for n in range(n_samples):
states[
n + 1, :] = self._update(states[n, :], inputs[n + 1, :], outputs[n, :])
outputs[n + 1, :] = self.out_activation(np.dot(self.W_out,
np.concatenate([states[n + 1, :], inputs[n + 1, :]])))
return self._unscale_teacher(self.out_activation(outputs[1:]))
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.