metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joelvaneenwyk/mint-amazon-tagger",
"score": 2
} |
#### File: mint-amazon-tagger/mintamazontagger/mintclient.py
```python
import atexit
import getpass
import logging
import os
from mintapi.api import Mint, MINT_ROOT_URL
from mintamazontagger.my_progress import no_progress_factory
from mintamazontagger.currency import micro_usd_to_usd_float
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
UPDATE_TRANS_ENDPOINT = '/updateTransaction.xevent'
class MintClient():
def __init__(
self,
email=None, password=None,
session_path=None, headless=False, mfa_method='sms',
wait_for_sync=False, mfa_input_callback=None,
progress_factory=no_progress_factory):
self.email = email
self.password = password
self.session_path = session_path
self.headless = headless
self.mfa_method = mfa_method
self.mfa_input_callback = mfa_input_callback
self.wait_for_sync = wait_for_sync
self.progress_factory = progress_factory
self.mintapi = None
def close(self):
if self.mintapi:
self.mintapi.close()
self.mintapi = None
def get_mintapi(self):
if self.mintapi:
return self.mintapi
email = self.email
password = self.password
if not email:
email = input('Mint email: ')
if not password:
password = getpass.getpass('Mint password: ')
if not email or not password:
logger.error('Missing Mint email or password.')
exit(1)
logger.info('You may be asked for an auth code at the command line! '
'Be sure to press ENTER after typing the 6 digit code.')
login_progress = self.progress_factory('Logging into Mint', 0)
# The cwd when installed on a users system is typically not writable.
# HACK: Pass through desired download location once that's supported.
cwd = os.getcwd()
os.chdir(os.path.expanduser("~"))
mint_client = Mint.create(email, password,
mfa_method=self.mfa_method,
mfa_input_callback=self.mfa_input_callback,
session_path=self.session_path,
headless=self.headless,
wait_for_sync=self.wait_for_sync)
os.chdir(cwd)
login_progress.finish()
def close_mint_client():
if mint_client:
mint_client.close()
atexit.register(close_mint_client)
self.mintapi = mint_client
return mint_client
def get_categories(self):
# Create a map of Mint category name to category id.
logger.info('Creating Mint Category Map.')
mint_api = self.get_mintapi()
categories = dict([
(cat_dict['name'], cat_id)
for (cat_id, cat_dict)
in mint_api.get_categories().items()])
return categories
def get_transactions(self, start_date):
start_date_str = start_date.strftime('%m/%d/%y')
mint_api = self.get_mintapi()
logger.info('Get all Mint transactions since {}.'.format(
start_date_str))
transactions = mint_api.get_transactions_json(
start_date=start_date_str,
include_investment=False,
skip_duplicates=True)
return transactions
def send_updates(self, updates, progress, ignore_category=False):
mint_client = self.get_mintapi()
num_requests = 0
for (orig_trans, new_trans) in updates:
if len(new_trans) == 1:
# Update the existing transaction.
trans = new_trans[0]
modify_trans = {
'task': 'txnedit',
'txnId': '{}:0'.format(trans.id),
'note': trans.note,
'merchant': trans.merchant,
'token': mint_client.token,
}
if not ignore_category:
modify_trans = {
**modify_trans,
'category': trans.category,
'catId': trans.category_id,
}
logger.debug(
'Sending a "modify" transaction request: {}'.format(
modify_trans))
response = mint_client.post(
'{}{}'.format(
MINT_ROOT_URL,
UPDATE_TRANS_ENDPOINT),
data=modify_trans).text
progress.next()
logger.debug('Received response: {}'.format(response))
num_requests += 1
else:
# Split the existing transaction into many.
# If the existing transaction is a:
# - credit: positive amount is credit, negative debit
# - debit: positive amount is debit, negative credit
itemized_split = {
'txnId': '{}:0'.format(orig_trans.id),
'task': 'split',
'data': '', # Yup this is weird.
'token': mint_client.token,
}
all_credit = all(not trans.is_debit for trans in new_trans)
for (i, trans) in enumerate(new_trans):
amount = trans.amount
# If it's a split credit, everything should be positive
if all_credit and amount < 0:
amount = -amount
amount = micro_usd_to_usd_float(amount)
itemized_split['amount{}'.format(i)] = amount
# Yup. Weird:
itemized_split['percentAmount{}'.format(i)] = amount
itemized_split['merchant{}'.format(i)] = trans.merchant
# Yup weird. '0' means new?
itemized_split['txnId{}'.format(i)] = 0
if not ignore_category:
itemized_split['category{}'.format(i)] = trans.category
itemized_split['categoryId{}'.format(i)] = (
trans.category_id)
else:
itemized_split['category{}'.format(i)] = (
orig_trans.category)
itemized_split['categoryId{}'.format(i)] = (
orig_trans.category_id)
logger.debug(
'Sending a "split" transaction request: {}'.format(
itemized_split))
response = mint_client.post(
'{}{}'.format(
MINT_ROOT_URL,
UPDATE_TRANS_ENDPOINT),
data=itemized_split)
json_resp = response.json()
# The first id is always the original transaction (now
# parent transaction id).
new_trans_ids = json_resp['txnId'][1:]
assert len(new_trans_ids) == len(new_trans)
for itemized_id, trans in zip(new_trans_ids, new_trans):
# Now send the note for each itemized transaction.
itemized_note = {
'task': 'txnedit',
'txnId': '{}:0'.format(itemized_id),
'note': trans.note,
'token': mint_client.token,
}
note_response = mint_client.post(
'{}{}'.format(
MINT_ROOT_URL,
UPDATE_TRANS_ENDPOINT),
data=itemized_note)
logger.debug(
'Received note response: {}'.format(
note_response.text))
progress.next()
logger.debug('Received response: {}'.format(response.text))
num_requests += 1
progress.finish()
return num_requests
``` |
{
"source": "joelverhagen/Knapcode.CommonLibrary.NET",
"score": 3
} |
#### File: ComLib.Apps.FluentScript/scripts/example_3_functions2.py
```python
# Example 1: hours
def hours, hour, hrs, hr( amount )
{
return new Time(0, amount, 0, 0)
}
# Example 2: minutes
def minutes, minute, mins, min( amount )
{
return new Time( 0, 0, amount, 0 )
}
time = 3 hours + 2 hr + 40 minutes
print time is #{time}
println()
# enable use of units.
enable units
total = 5 inches + 3 feet + 2 yards
print total is #{total.Value} inches
println()
v = 0.9.8.8
print fluentscript version is #{v.Text()}
``` |
{
"source": "JoelVG/pybraille",
"score": 3
} |
#### File: pybraille/tests/test_braille.py
```python
from pybraille.main import convertText, convertFile
def testConvertText():
assert convertText("hello") == "⠓⠑⠇⠇⠕"
def testConvertFile():
assert convertFile("tests/sample.txt") == "⠠⠞⠓⠊⠎ ⠙⠊⠗⠑⠉⠞⠕⠗⠽ ⠉⠕⠝⠞⠁⠊⠝⠎ ⠞⠑⠎⠞ ⠋⠊⠇⠑⠎⠲"
``` |
{
"source": "JoelVinayKumar/e-Tenders",
"score": 2
} |
#### File: JoelVinayKumar/e-Tenders/pheasant.py
```python
from flask import Flask, request, session, redirect, url_for, abort, render_template, flash
from bs4 import BeautifulSoup
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
import requests
import smtplib
app= Flask(__name__)
app.config['DEBUG'] = True
app.config['SECRET_KEY'] ='super-secret-key'
app.config['USERNAME'] = 'admin'
app.config['PASSWORD'] = '<PASSWORD>'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///practicum.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
smtpObj = smtplib.SMTP('smtp.sendgrid.net',587)
smtpObj.login("apikey", "")
db=SQLAlchemy(app)
class Login(db.Model):
__tablename__ = 'users'
email = db.Column(db.String(100), primary_key=True)
username = db.Column(db.String(100),unique=True)
password = db.Column(db.String(24))
subscription = db.Column(db.String(100))
def __init__(self, email,username, password, subscription):
self.email = email
self.username = username
self.password = password
self.subscription= subscription
def __repr__(self):
return '<Entry %r %r %r %r>' % (self.email,self.username, self.password, self.subscription)
class Tenders(db.Model):
__tablename__ = 'tenders'
S_No = db.Column(db.Integer, primary_key=True,unique=False)
ePub = db.Column(db.DateTime())
BidSub = db.Column(db.DateTime())
TenderOpDate = db.Column(db.DateTime())
Title = db.Column(db.String(100))
Org = db.Column(db.String(100))
def __init__(self, S_No,ePub, BidSub, TenderOpDate, Title, Org):
self.S_No = S_No
self.ePub = ePub
self.BidSub = BidSub
self.TenderOpDate= TenderOpDate
self.Title= Title
self.Org= Org
def __repr__(self):
return '<Entry %r %r %r %r %r %r>' % (self.S_No,self.ePub, self.BidSub, self.TenderOpDate, self.Title, self.Org)
db.create_all()
def authenticate(e, p):
details= Login.query.filter_by(email=e).filter_by(password=p).all()
if(len(details)>0):
return True
else:
return False
# X=[]
# def fun(i):
# f2=open("out/"+i+".html")
# soup=BeautifulSoup(f2.read(),'lxml')
# f2.close()
# tb = soup.find_all("table",{"class":"list_table","id":"table"})
# tl = tb[0].find_all("tr")
# for x in range(1,len(tl)):
# L={}
# f = tl[x].find_all("td")
# L['id']= f[0].text
# L['ePublishedDate']= datetime.strptime(f[1].text, '%d-%b-%Y %I:%M %p')
# L['BidSubmissionDate']= datetime.strptime(f[2].text, '%d-%b-%Y %I:%M %p')
# L['TenderOpeningDate']= datetime.strptime(f[3].text, '%d-%b-%Y %I:%M %p')
# L['Title']= f[4].text
# L['Organisation']= f[5].text
# print("The length of dictionary is "+str(len(X)))
# # print("https://eprocure.gov.in"+f[4].find("a")['href'])
# new_tender=Tenders(f[0].text,datetime.strptime(f[1].text, '%d-%b-%Y %I:%M %p'),datetime.strptime(f[2].text, '%d-%b-%Y %I:%M %p'),datetime.strptime(f[3].text, '%d-%b-%Y %I:%M %p'),f[4].text,f[5].text)
# # print(new_tender)
# db.session.add(new_tender)
# db.session.commit()
# X.append(L)
# for i in range(1,21):
# fun(str(i))
def expired_tenders():
t=[]
time_now= datetime.now()
k= Tenders.query.all()
for a in k:
if a.BidSub < time_now:
session.delete(a)
Orgz=[]
def org():
Q=[]
k= Tenders.query.order_by(Tenders.Org).all()
for a in k:
Q.append(a.Org)
for b in Q:
if b not in Orgz:
Orgz.append(b)
return Orgz
def tenders():
return Tenders.query.all()
def user_emails():
emails=[]
users= Login.query.all()
for u in users:
emails.append(u)
return emails
def mail_body(a):
t = Tenders.query.filter_by(Org=a).order_by(Tenders.BidSub.desc()).all()
html_mailer="""
MIME-Version: 1.0
Content-type: text/html
Subject: SMTP HTML e-mail test
<table>\
<thead>\
<tr>\
<th>S.No</th>\
<th>e-Published Date</th>\
<th>Bid Submission Closing Date</th>\
<th>Tender Opening Date</th>\
<th>Title and Ref.No./Tender Id</th>\
<th>Organisation Name</th>\
</tr>\
</thead>\
<tbody>"""
print(t)
for a in t:
html_mailer+="<tr>"
html_mailer+="<td>"+str(a.S_No)+"</td>"+"<td>"+str(a.ePub)+"</td>"+"<td>"+str(a.BidSub)+"</td>"+"<td>"+str(a.TenderOpDate)+"</td>"+"<td>"+str(a.Title)+"</td>"+"<td>"+str(a.Org)+"</td>"
html_mailer+="</tr></tbody></table>"
return html_mailer
@app.route('/test')
def test():
return mail_body('Bharat Petroleum Corporation Limited')
def send_all_mails():
A=Login.query.order_by(Login.email).all()
if len(A)>0:
for i in range(len(A)):
user_email=A[i].email
user_subscription=str(mail_body(A[0].subscription))
smtpObj.sendmail("<EMAIL>", str(user_email), user_subscription)
print("Mail sent to "+user_email)
else:
print("Sorry. No users found")
@app.route('/send')
def send():
send_all_mails()
msg= "All mails are sent to users successfully!"
return redirect('/')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/tenders')
def tenders():
all = Tenders.query.all()
return render_template('tenders.html',all= all)
@app.route('/sign_up',methods=['GET','POST'])
def sign_up():
error = None
if request.method== 'GET':
return render_template('sign_up.html', orgs=org())
else:
uname = request.form.get('username')
email = request.form.get('email')
pwd = request.form.get('pwd')
rpwd = request.form.get('rpwd')
sub = request.form.get('sel')
if pwd==rpwd:
if email in user_emails():
success = "Email already in our records."
return render_template('sign_up.html',success=success)
else:
new_user = Login(email,uname,pwd,sub)
db.session.add(new_user)
db.session.commit()
success= "New account created for "+request.form.get('username')+"\nCheck your inbox."
# msg = Message('Message from Pheasant', sender = '<EMAIL>', recipients = [email])
# msg.body = "Hello,"+str(uname)+".\nWe've created an account for you.\nThank you for subscribing to"+str(sub)+".\nWe will keep you posted daily."
# mail.send(msg)
return render_template('sign_up.html',success=success)
else:
error= "Sorry, passwords don't match."
return render_template('sign_up.html',error=error)
@app.route('/login',methods=['GET','POST'])
def login():
if request.method == 'POST':
if authenticate(request.form['email'], request.form['pwd']):
session['logged_in'] = True
a= request.form['email']
session['log_user'] = Login.query.filter_by(email=a).one().username
return redirect('/')
else:
error='Invalid credentials'
return render_template('login.html',error=error)
if request.method == 'GET':
return render_template('login.html')
@app.route('/logout')
def logout():
if session['logged_in'] == True:
session['logged_in'] = False
return redirect('/')
else:
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "joelvisroman/dataviva-site",
"score": 3
} |
#### File: joelvisroman/dataviva-site/config.py
```python
import os
'''Used for finding environment variables through configuration
if a default is not given, the site will raise an exception'''
def get_env_variable(var_name, default=-1):
try:
return os.environ[var_name]
except KeyError:
if default != -1:
return default
error_msg = "Set the %s os.environment variable" % var_name
raise Exception(error_msg)
''' Base directory of where the site is held '''
basedir = os.path.abspath(os.path.dirname(__file__))
UPLOAD_FOLDER = os.path.join(basedir, 'dataviva/static/uploads/')
''' CSRF (cross site forgery) for signing POST requests to server '''
CSRF_EN = True
''' Secret key should be set in environment var '''
SECRET_KEY = get_env_variable(
"DATAVIVA_SECRET_KEY", "default-dataviva.mg-secr3t")
''' Default debugging to True '''
DEBUG = True
DEBUG_WITH_APTANA = True
SQLALCHEMY_ECHO = True
GZIP_DATA = get_env_variable("DATAVIVA_GZIP_DATA", True)
''' Whether or not to allow User Account Activity '''
ACCOUNTS = get_env_variable("DATAVIVA_ACCOUNTS", True)
'''
Details for connecting to the database, credentials set as environment
variables.
'''
SQLALCHEMY_DATABASE_URI = "mysql://{0}:{1}@{2}/{3}".format(
get_env_variable("DATAVIVA_DB_USER", "root"),
get_env_variable("DATAVIVA_DB_PW", ""),
get_env_variable("DATAVIVA_DB_HOST", "localhost"),
get_env_variable("DATAVIVA_DB_NAME", "dataviva"))
''' If user prefers to connect via socket set env var '''
if "DATAVIVA_DB_SOCKET" in os.environ:
SQLALCHEMY_DATABASE_URI += "?unix_socket=" + \
get_env_variable("DATAVIVA_DB_SOCKET")
''' If an env var for production is set turn off all debugging support '''
if "DATAVIVA_PRODUCTION" in os.environ:
SQLALCHEMY_ECHO = False
DEBUG = False
JSONIFY_PRETTYPRINT_REGULAR = False
SESSION_COOKIE_DOMAIN = ".dataviva.info"
''' Available languages '''
LANGUAGES = {
'en': 'English',
'pt': 'Português'
}
''' For full text search '''
WHOOSH_BASE = os.path.join(basedir, 'dataviva/utils/search_indices')
'''
API base URL
'''
API_BASE_URL = get_env_variable("DATAVIVA_API_BASE_URL")
'''
Oauth tokens set in environment variables from their respecive sources
'''
GOOGLE_OAUTH_ID = get_env_variable("DATAVIVA_OAUTH_GOOGLE_ID")
GOOGLE_OAUTH_SECRET = get_env_variable("DATAVIVA_OAUTH_GOOGLE_SECRET")
TWITTER_OAUTH_ID = get_env_variable("DATAVIVA_OAUTH_TWITTER_ID")
TWITTER_OAUTH_SECRET = get_env_variable("DATAVIVA_OAUTH_TWITTER_SECRET")
FACEBOOK_OAUTH_ID = get_env_variable("DATAVIVA_OAUTH_FACEBOOK_ID")
FACEBOOK_OAUTH_SECRET = get_env_variable("DATAVIVA_OAUTH_FACEBOOK_SECRET")
AWS_ACCESS_KEY = get_env_variable('DATAVIVA_OAUTH_AWS_ID')
AWS_SECRET_KEY = get_env_variable('DATAVIVA_OAUTH_AWS_SECRET')
''' S3 Buckets '''
S3_BUCKET = get_env_variable('S3_BUCKET', 'dataviva-dev')
S3_HOST = get_env_variable('S3_HOST', 'https://dataviva-site-production.s3.amazonaws.com')
'''
Mail credentials to send automatic emails to users
'''
MAIL_SERVER = get_env_variable("DATAVIVA_MAIL_SERVER", 'smtp.gmail.com')
MAIL_PORT = get_env_variable("DATAVIVA_MAIL_PORT", 587)
MAIL_USE_TLS = get_env_variable("DATAVIVA_MAIL_USE_TLS", False)
MAIL_USE_SSL = get_env_variable("DATAVIVA_MAIL_USE_SSL", False)
MAIL_USERNAME = get_env_variable("DATAVIVA_MAIL_USERNAME", '<EMAIL>')
MAIL_PASSWORD = get_env_variable("DATAVIVA_MAIL_PASSWORD", "")
'''
Administrator email
'''
ADMINISTRATOR_EMAIL = '<EMAIL>'
'''
Pagination
'''
ITEMS_PER_PAGE = 10
BOOTSTRAP_VERSION = 3
```
#### File: api/hedu/services.py
```python
from dataviva.api.hedu.models import Ybu, Ybc_hedu, Yu, Yuc, Yc_hedu, Ybuc
from dataviva.api.attrs.models import University as uni, Course_hedu, Bra
from dataviva import db
from sqlalchemy.sql.expression import func, desc, not_
class University:
def __init__(self, university_id):
self._hedu = None
self._hedu_sorted_by_enrolled = None
self._hedu_sorted_by_entrants = None
self._hedu_sorted_by_graduates = None
self.university_id = university_id
if university_id is None:
self.max_year_query = db.session.query(func.max(Yu.year))
self.hedu_query = Yu.query.filter(Yu.year == self.max_year_query)
else:
self.max_year_query = db.session.query(
func.max(Yu.year)).filter_by(university_id=university_id)
self.hedu_query = Yu.query.filter(
Yu.university_id == self.university_id,
Yu.year == self.max_year_query)
def __hedu__(self):
if not self._hedu:
hedu_data = self.hedu_query.first_or_404()
self._hedu = hedu_data
return self._hedu
def __hedu_list__(self):
if not self._hedu:
hedu_data = self.hedu_query.all()
self._hedu = hedu_data
return self._hedu
def __hedu_sorted_by_enrolled__(self):
if not self._hedu_sorted_by_enrolled:
self._hedu_sorted_by_enrolled = self.__hedu_list__()
self._hedu_sorted_by_enrolled.sort(
key=lambda hedu: hedu.enrolled, reverse=True)
return self._hedu_sorted_by_enrolled
def __hedu_sorted_by_entrants__(self):
if not self._hedu_sorted_by_entrants:
self._hedu_sorted_by_entrants = self.__hedu_list__()
self._hedu_sorted_by_entrants.sort(
key=lambda hedu: hedu.entrants, reverse=True)
return self._hedu_sorted_by_entrants
def __hedu_sorted_by_graduates__(self):
if not self._hedu_sorted_by_graduates:
self._hedu_sorted_by_graduates = self.__hedu_list__()
self._hedu_sorted_by_graduates.sort(
key=lambda hedu: hedu.graduates, reverse=True)
return self._hedu_sorted_by_graduates
def name(self):
return self.__hedu__().university.name()
def university_type(self):
return self.__hedu__().university.school_type()
def enrolled(self):
return self.__hedu__().enrolled
def entrants(self):
return self.__hedu__().entrants
def graduates(self):
return self.__hedu__().graduates
def profile(self):
return self.__hedu__().university.desc_pt
def year(self):
return self.max_year_query.first()[0]
def highest_enrolled_number(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.enrolled
def highest_entrants_number(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.entrants
def highest_graduates_number(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.graduates
def highest_enrolled_by_university(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.enrolled
else:
return None
def highest_enrolled_by_university_name(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.university.name()
else:
return None
class UniversityMajors(University):
def __init__(self, university_id):
University.__init__(self, university_id)
self.max_year_query = db.session.query(func.max(Yuc.year))
self.hedu_query = Yuc.query.filter(
Yuc.university_id == self.university_id,
Yuc.year == self.max_year_query,
func.length(Yuc.course_hedu_id) == 6)
def major_with_more_enrollments(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.course_hedu.name()
def major_with_more_entrants(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.course_hedu.name()
def major_with_more_graduates(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.course_hedu.name()
class Major:
def __init__(self, course_hedu_id, bra_id):
self._hedu = None
self._hedu_sorted_by_enrolled = None
self._hedu_sorted_by_entrants = None
self._hedu_sorted_by_graduates = None
self._hedu_major_rank = None
self.course_hedu_id = course_hedu_id
self.bra_id = bra_id
if course_hedu_id is None and bra_id is None:
self.max_year_query = db.session.query(func.max(Yc_hedu.year))
self.hedu_query = Ybc_hedu.query.filter(Ybc_hedu.year == self.max_year_query)
else:
self.max_year_query = db.session.query(
func.max(Yc_hedu.year)).filter_by(course_hedu_id=course_hedu_id)
if bra_id != '':
self.hedu_query = Ybc_hedu.query.filter(
Ybc_hedu.course_hedu_id == self.course_hedu_id,
Ybc_hedu.bra_id == self.bra_id,
Ybc_hedu.year == self.max_year_query)
else:
self.hedu_query = Yc_hedu.query.filter(
Yc_hedu.course_hedu_id == self.course_hedu_id,
Yc_hedu.year == self.max_year_query)
def __hedu__(self):
if not self._hedu:
hedu_data = self.hedu_query.first_or_404()
self._hedu = hedu_data
return self._hedu
def __hedu_list__(self):
if not self._hedu:
hedu_data = self.hedu_query.all()
self._hedu = hedu_data
return self._hedu
def __hedu_sorted_by_enrolled__(self):
if not self._hedu_sorted_by_enrolled:
self._hedu_sorted_by_enrolled = self.__hedu_list__()
self._hedu_sorted_by_enrolled.sort(
key=lambda hedu: hedu.enrolled, reverse=True)
return self._hedu_sorted_by_enrolled
def __hedu_sorted_by_entrants__(self):
if not self._hedu_sorted_by_entrants:
self._hedu_sorted_by_entrants = self.__hedu_list__()
self._hedu_sorted_by_entrants.sort(
key=lambda hedu: hedu.entrants, reverse=True)
return self._hedu_sorted_by_entrants
def __hedu_sorted_by_graduates__(self):
if not self._hedu_sorted_by_graduates:
self._hedu_sorted_by_graduates = self.__hedu_list__()
self._hedu_sorted_by_graduates.sort(
key=lambda hedu: hedu.graduates, reverse=True)
return self._hedu_sorted_by_graduates
def name(self):
return self.__hedu__().course_hedu.name()
def enrolled(self):
return self.__hedu__().enrolled
def entrants(self):
return self.__hedu__().entrants
def graduates(self):
return self.__hedu__().graduates
def profile(self):
return self.__hedu__().course_hedu.desc_pt
def year(self):
return self.__hedu__().year
def highest_enrolled_number(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.enrolled
def highest_entrants_number(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.entrants
def highest_graduates_number(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.graduates
def location_name(self):
return Bra.query.filter(Bra.id == self.bra_id).first().name()
def highest_enrolled_by_major(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.enrolled
else:
return None
def highest_enrolled_by_major_name(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.course_hedu.name()
else:
return None
class MajorUniversities(Major):
def __init__(self, course_hedu_id, bra_id):
Major.__init__(self, course_hedu_id, bra_id)
self.course_hedu_id = course_hedu_id
self.max_year_query = db.session.query(
func.max(Yuc.year)).filter_by(course_hedu_id=course_hedu_id)
if bra_id == '':
self.hedu_query = Yuc.query.filter(
Yuc.course_hedu_id == self.course_hedu_id,
Yuc.year == self.max_year_query)
else:
self.hedu_query = Ybuc.query.filter(
Ybuc.course_hedu_id == self.course_hedu_id,
Ybuc.bra_id == self.bra_id,
Ybuc.year == self.max_year_query)
def university_with_more_enrolled(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.university.name()
def university_with_more_entrants(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.university.name()
def university_with_more_graduates(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.university.name()
class MajorMunicipalities(Major):
def __init__(self, course_hedu_id, bra_id):
Major.__init__(self, course_hedu_id, bra_id)
self.course_hedu_id = course_hedu_id
self.max_year_query = db.session.query(
func.max(Ybc_hedu.year)).filter_by(course_hedu_id=course_hedu_id)
if bra_id == '':
self.hedu_query = Ybc_hedu.query.filter(
Ybc_hedu.course_hedu_id == self.course_hedu_id,
Ybc_hedu.year == self.max_year_query,
not_(Ybc_hedu.bra_id.like('0xx%')),
func.length(Ybc_hedu.bra_id) == 9)
else:
self.hedu_query = Ybc_hedu.query.filter(
Ybc_hedu.course_hedu_id == self.course_hedu_id,
Ybc_hedu.year == self.max_year_query,
Ybc_hedu.bra_id.like(self.bra_id+'%'),
not_(Ybc_hedu.bra_id.like('0xx%')),
func.length(Ybc_hedu.bra_id) == 9)
def municipality_with_more_enrolled(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.bra.name()
def municipality_with_more_enrolled_state(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.bra.abbreviation
def municipality_with_more_entrants(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.bra.name()
def municipality_with_more_entrants_state(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.bra.abbreviation
def municipality_with_more_graduates(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.bra.name()
def municipality_with_more_graduates_state(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.bra.abbreviation
class LocationUniversity:
def __init__(self, bra_id):
self._hedu_sorted_by_enrolled = None
self._hedu = None
self.bra_id = bra_id
self.max_year_query = db.session.query(
func.max(Ybu.year)).filter_by(bra_id=bra_id)
self.hedu_query = Ybu.query.join(uni).filter(
Ybu.bra_id == self.bra_id,
Ybu.year == self.max_year_query)
def __hedu__(self):
if not self._hedu:
hedu_data = self.hedu_query.one()
self._hedu = hedu_data
return self._hedu
def __hedu_list__(self):
if not self._hedu:
hedu_data = self.hedu_query.all()
self._hedu = hedu_data
return self._hedu
def __hedu_sorted_by_enrolled__(self):
if not self._hedu_sorted_by_enrolled:
self._hedu_sorted_by_enrolled = self.__hedu_list__()
self._hedu_sorted_by_enrolled.sort(
key=lambda hedu: hedu.enrolled, reverse=True)
return self._hedu_sorted_by_enrolled
def year(self):
return self.max_year_query.first()[0]
def highest_enrolled_by_university(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.enrolled
else:
return None
def highest_enrolled_by_university_name(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.university.name()
else:
return None
class LocationMajor(LocationUniversity):
def __init__(self, bra_id):
LocationUniversity.__init__(self, bra_id)
self._hedu = None
self.bra_id = bra_id
self.max_year_query = db.session.query(
func.max(Ybc_hedu.year)).filter_by(bra_id=bra_id)
self.hedu_query = Ybc_hedu.query.join(Course_hedu).filter(
Ybc_hedu.bra_id == self.bra_id,
Ybc_hedu.course_hedu_id_len == 6,
Ybc_hedu.year == self.max_year_query)
def highest_enrolled_by_major(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.enrolled
else:
return None
def highest_enrolled_by_major_name(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.course_hedu.name()
else:
return None
```
#### File: api/stats/cache.py
```python
from dataviva import view_cache as cache
import pickle
def object_cache(key, value=None):
if not value:
tmp = cache.get(key)
if not tmp:
return None
return pickle.loads(tmp)
else:
cache.set(key, pickle.dumps(value))
return value
def profile_cache_serialized(ptype, attrs):
key = "profile_" + str(attrs)
obj = object_cache(key)
if not obj:
if type(attrs) is list:
obj = ptype(*attrs)
else:
obj = ptype(attrs)
obj = obj.serialize()
object_cache(key, obj)
return obj
```
#### File: api/stats/helper.py
```python
import pickle
import re
from dataviva.utils import table_helper, query_helper
from dataviva.api.stats.util import compute_allowed
from dataviva.utils.cached_query import cached_query
from dataviva.api.stats.cache import object_cache
from dataviva import db
import dataviva.api.rais.models as rais
import dataviva.api.secex.models as secex
import dataviva.api.hedu.models as hedu
import dataviva.api.attrs.models as attrs
from dataviva.api.stats.util import get_profiles
from sqlalchemy import func
from sqlalchemy import desc, asc
from dataviva.api.stats.cache import profile_cache_serialized
from dataviva.api.stats.util import gen_table_list
from dataviva import __year_range__
__latest_year__ = {k: v[-1] for k,v in __year_range__.items()}
# TODO: Add SECEX once API is updated
possible_tables = {
"bra_id" : [rais.Yb_rais, secex.Ymb, hedu.Yb_hedu, hedu.Ybu, attrs.Bra],
"cnae_id" : [rais.Yi],
"cbo_id" : [rais.Yo],
"university_id" : [hedu.Yu, hedu.Ybu],
"course_hedu_id" : [hedu.Yc_hedu],
"hs_id": [secex.Ymp]
}
allowed_when_not = compute_allowed(gen_table_list(possible_tables))
max_depth = {
"bra_id": 9,
"course_hedu_id": 6,
"cnae_id": 6,
"cbo_id": 4,
"hs_id": 6,
"wld_id": 5,
}
no_length_column = { attrs.Bra: 9 }
CAROUSEL_NS = "carousel:"
filters_map = {
secex.Ymb : [secex.Ymb.year == __latest_year__['secex'], secex.Ymb.month == 0],
secex.Ymp : [secex.Ymp.year == __latest_year__['secex'], secex.Ymp.month == 0],
rais.Yb_rais : [rais.Yb_rais.year == __latest_year__['rais']],
rais.Yi : [rais.Yi.year == __latest_year__['rais']],
rais.Yo : [rais.Yo.year == __latest_year__['rais'], rais.Yo.cbo_id != u'xxxx'],
hedu.Ybu : [hedu.Ybu.year == __latest_year__['hedu'], hedu.Ybu.bra_id != '0xx000007'],
hedu.Yu : [hedu.Yu.year == __latest_year__['hedu']],
hedu.Yb_hedu : [hedu.Yb_hedu.year == __latest_year__['hedu'], hedu.Yb_hedu.bra_id != '0xx000007'],
hedu.Yc_hedu : [hedu.Yc_hedu.year == __latest_year__['hedu']],
}
def carousel_maker(title, typestr, metric, shows, limit=10, offset=0, sort="desc"):
result = stats_list(metric, shows, limit=limit, offset=offset, sort=sort)
posters = get_profiles(result, typestr)
return {
"title": title,
"type": typestr,
"posters": posters
}
def make_key(*args, **kwargs):
return str(kwargs)
def stats_list(metric, shows, limit=None, offset=None, sort="desc", depth=None, listify=False):
if type(shows) is str:
shows = [shows]
raw = compute_stats(metric, shows, limit=limit, offset=offset, sort=sort, depth=depth)
return raw["data"]
def cities_by_pop(value):
Ybs = attrs.Ybs
filters = [Ybs.stat_id == 'pop', Ybs.stat_val >= value, Ybs.year == __latest_year__['stats'], func.char_length(Ybs.bra_id) == 9]
res = Ybs.query.filter(*filters).with_entities(Ybs.bra_id).all()
if res:
return [row[0] for row in res]
return res
def compute_stats(metric, shows, limit=None, offset=None, sort="desc", depth=None, filters=[]):
cache_key = CAROUSEL_NS + "".join(([metric] + shows) + ([str(limit), str(offset),sort,str(depth)]))
prev = cached_query(cache_key)
if prev:
return pickle.loads(prev)
kwargs = {metric:"dummy"}
kwargs[shows[0]] = 'show'
for show in shows[1:]:
kwargs[show] = "dummy"
table = table_helper.select_best_table(kwargs, allowed_when_not, possible_tables)
if not table:
raise Exception("No Valid Table Available!")
filters = []
show_columns = [getattr(table, show) for show in shows]
metric_col = getattr(table, metric)
i = 0
for show_column in show_columns:
show=shows[i]
if table in no_length_column:
depth_val = depth or max_depth[show]
filters.append(func.char_length(show_column) == depth_val )
elif show in max_depth:
depth_val = depth or max_depth[show]
filters.append(getattr(table, show + table_helper.LEN) == depth_val )
i+=1
if table in filters_map:
filters += filters_map[table]
growth_regex = re.match('(num_emp)_growth(_5)?', metric)
VAL_THRESOLD = 10000
if growth_regex:
orig_col_name = growth_regex.group(1)
orig_col = getattr(table, orig_col_name)
filters.append(orig_col >= VAL_THRESOLD)
elif metric == "wage_avg" and len(shows) == 1 and shows[0] == "bra_id":
# when looking at wage_avg for cities, only look at places
# with >= 50k people
cities = cities_by_pop(50000)
filters.append(table.bra_id.in_(cities))
columns = show_columns + [metric_col]
results = query_helper.query_table(table, columns, filters, order=metric, limit=limit, sort=sort, offset=offset)
cached_query(cache_key, pickle.dumps(results))
return results
def top_occupations(year, bra_id):
cache_key = CAROUSEL_NS + "top_occupations" + str(year) + bra_id
prev = object_cache(cache_key)
if prev:
return pickle.loads(prev)
table = rais.Ybo
filters = [table.bra_id == bra_id, table.year == year]
raw = query_helper.query_table(table, [table.cbo_id], filters, order=table.wage_avg, limit=10, sort="desc")
cbos = [x[0] for x in raw["data"]]
table = raisd.Ybod
filters = [table.bra_id == bra_id, table.year == year, table.cbo_id.in_(cbos), table.d_id.in_(["A", "B"])]
columns = [table.cbo_id, table.d_id, table.num_jobs, table.wage_avg, table.wage_growth]
results = query_helper.query_table(table, columns, filters, order=table.wage_avg)
object_cache(cache_key, pickle.dumps(results))
return results
def generic_join_breakdown(namespace, params, left_table, right_table, join_criteria, columns, order_col="ratio", filters=[],
limit=10, sort_order="desc", offset=0, col_select=None):
cache_key = CAROUSEL_NS + namespace + "_" + str(params)
prev = object_cache(cache_key)
if prev:
return pickle.loads(prev)
order = desc(order_col) if sort_order != "asc" else asc(order_col)
results = left_table.query.join(right_table, join_criteria) \
.with_entities(*columns) \
.filter(*filters) \
.order_by(order) \
.limit(limit) \
.offset(offset) \
.all()
if not col_select:
raise Exception("Please specify the column to select for results")
results = [row.__dict__[col_select] for row in results]
object_cache(cache_key, pickle.dumps(results))
return results
def make_items(data, Kind):
items = [{"value": item[-1], "poster": Kind.query.get(item[0])} for item in data]
return items
```
#### File: apps/ask/views.py
```python
from sqlalchemy import and_, or_, func
from datetime import datetime
from flask import Blueprint, request, make_response, render_template, flash, g, session, redirect, url_for, jsonify, abort, current_app
from flask.ext.babel import gettext
from dataviva import db, lm, view_cache
# from config import SITE_MIRROR
from dataviva.apps.user.models import User
from dataviva.apps.ask.models import Question, Reply, Status, Vote, TYPE_QUESTION, TYPE_REPLY, Flag
from dataviva.apps.ask.forms import AskForm, ReplyForm, SearchForm
from dataviva.utils.cached_query import cached_query, api_cache_key
import urllib2, urllib
mod = Blueprint('ask', __name__, url_prefix='/<lang_code>/ask')
RESULTS_PER_PAGE = 10
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', g.locale)
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.route('/questions/', methods=['GET', 'POST'], defaults={'page': 1})
def question_list(page):
# get URL parameters for results per page and ordering options
order = request.args.get('order', 'votes') # options = 'votes' or 'newest'
type = request.args.get('type', 'all') # options = 'all' or 'question' or 'comment' or 'contact'
offset = request.args.get('offset', 0)
search_term = request.args.get('q', None)
if search_term:
search_term = search_term.encode('utf-8')
limit = 25
lang = request.args.get('lang', None) or g.locale
# lets find the questions to load in the page
# only the approved questions
approved = Status.query.filter_by(name='Approved').first()
questions = Question.query.filter_by(status = approved)
# if the user has submitted a search, filter by that term
if search_term:
like_str = "%{0}%".format(search_term)
questions = questions.filter(or_(Question.question.like(like_str),Question.body.like(like_str),Question.status_notes.like(like_str)))
if type == "question":
questions = questions.filter_by(type_id='1')
elif type == "comment":
questions = questions.filter_by(type_id='2')
elif type == "contact":
questions = questions.filter_by(type_id='3')
# if we are ordering the questions by newest get them ordered chronologically
if order == "newest":
if g.locale == "pt":
questions = questions.order_by(Question.timestamp.desc(),Question.language.desc())
else:
questions = questions.order_by(Question.timestamp.desc(),Question.language)
questions = questions.order_by(Question.timestamp.desc())
questions = questions.limit(limit).offset(offset)
questions = [q.serialize() for q in questions.all()]
# otherwise we are ordering the questions by votes
else:
questions = questions.limit(limit).offset(offset)
ids = [q.id for q in questions]
# raise Exception(ids)
votes_subq = db.session.query(Vote, func.count('*').label('vote_count')).group_by(Vote.type_id).subquery()
if lang == "pt":
questions = db.session.query(Question, votes_subq.c.vote_count) \
.outerjoin(votes_subq, and_(Question.id==votes_subq.c.type_id, votes_subq.c.type==TYPE_QUESTION)) \
.filter(Question.status == approved) \
.filter(Question.id.in_(ids)) \
.filter(Question.language==lang) \
.order_by(votes_subq.c.vote_count.desc(),Question.language.desc())
else:
questions = db.session.query(Question, votes_subq.c.vote_count) \
.outerjoin(votes_subq, and_(Question.id==votes_subq.c.type_id, votes_subq.c.type==TYPE_QUESTION)) \
.filter(Question.status == approved) \
.filter(Question.id.in_(ids)) \
.filter(Question.language==lang) \
.order_by(votes_subq.c.vote_count.desc(),Question.language)
# .limit(limit).offset(offset)
questions = [q[0].serialize() for q in questions]
ret = jsonify({"activities":questions})
ret.headers.add('Last-Modified', datetime.now())
ret.headers.add('Expires', '-1')
ret.headers.add('Cache-Control', 'must-revalidate, private')
return ret
@mod.route('/question/<slug>/vote/')
@mod.route('/question/<slug>/vote/<user>/')
def question_vote(slug, user=None):
q = Question.query.filter_by(slug=slug).first_or_404()
if user and request.remote_addr == SITE_MIRROR.split(":")[1][2:]:
g.user = User.query.get(user)
elif g.user is None or not g.user.is_authenticated:
return jsonify({"error": gettext("You need to be logged in to vote.")})
elif user is None and g.user is None:
abort(404)
# if user is None:
# try:
# opener = urllib2.urlopen("{0}ask/question/{1}/vote/{2}/".format(SITE_MIRROR,slug,g.user.id),None,5)
# except:
# return jsonify({"error": gettext("The server is not responding. Please try again later.")})
vote = q.votes.filter_by(user=g.user).first()
if vote:
db.session.delete(vote)
db.session.commit()
return jsonify({"success": -1})
else:
new_vote = Vote(user=g.user, type=TYPE_QUESTION, type_id=q.id)
db.session.add(new_vote)
db.session.commit()
return jsonify({"success": 1})
@mod.route('/reply/<int:id>/vote/')
@mod.route('/reply/<int:id>/vote/<user>/')
def reply_vote(id, user=None):
reply = Reply.query.get_or_404(id)
# if user and request.remote_addr == SITE_MIRROR.split(":")[1][2:]:
# g.user = User.query.get(user)
if g.user is None or not g.user.is_authenticated:
return jsonify({"error": gettext("You need to be logged in to vote.")})
# elif user is None and g.user is None:
# abort(404)
# if user is None:
# try:
# opener = urllib2.urlopen("{0}ask/reply/{1}/vote/{2}/".format(SITE_MIRROR,id,g.user.id),None,5)
# except:
# return jsonify({"error": gettext("The server is not responding. Please try again later.")})
vote = reply.votes.filter_by(user=g.user).first()
if vote:
db.session.delete(vote)
db.session.commit()
return jsonify({"success": -1})
else:
new_vote = Vote(user=g.user, type=TYPE_REPLY, type_id=reply.id)
db.session.add(new_vote)
db.session.commit()
return jsonify({"success": 1})
@mod.route('/reply/<int:id>/flag/')
@mod.route('/reply/<int:id>/flag/<user>/')
def reply_flag(id, user=None):
reply = Reply.query.get_or_404(id)
# if user and request.remote_addr == SITE_MIRROR.split(":")[1][2:]:
# g.user = User.query.get(user)
if g.user is None or not g.user.is_authenticated:
return jsonify({"error": gettext("You need to be logged in to flag replies.")})
# elif user is None and g.user is None:
# abort(404)
# if user is None:
# try:
# opener = urllib2.urlopen("{0}ask/reply/{1}/flag/{2}/".format(SITE_MIRROR,id,g.user.id),None,5)
# except:
# return jsonify({"error": gettext("The server is not responding. Please try again later.")})
flag = reply.flags.filter_by(user=g.user).first()
if flag:
db.session.delete(flag)
db.session.commit()
return jsonify({"success": -1})
else:
new_flag = Flag(user=g.user, reply_id=reply.id)
db.session.add(new_flag)
db.session.commit()
return jsonify({"success": 1})
```
#### File: apps/build_graph/views.py
```python
import re
from flask import Blueprint, render_template, g, jsonify, request
from dataviva.apps.general.views import get_locale
from dataviva.apps.embed.models import Build, App
from dataviva.api.rais.services import Industry as CnaeService
from dataviva.api.secex.services import Product as SecexService
from dataviva.api.hedu.services import University as HeduService
from dataviva.api.sc.services import Basic_course as ScService
from dataviva.translations.dictionary import dictionary
from sqlalchemy import not_
mod = Blueprint('build_graph', __name__,
template_folder='templates',
url_prefix='/<lang_code>/build_graph')
@mod.before_request
def before_request():
g.page_type = mod.name
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
def parse_filter_id(filter_id):
if filter_id != 'all':
return '<%>'
else:
return filter_id
@mod.route('/')
@mod.route('/<dataset>/<filter0>/<filter1>/<filter2>')
def index(dataset=None, filter0=None, filter1=None, filter2=None):
view = request.args.get('view')
graph = request.args.get('graph')
compare = request.args.get('compare')
metadata = None
build_query = Build.query.join(App).filter(
Build.dataset == dataset,
Build.filter1.like(parse_filter_id(filter1)),
Build.filter2.like(parse_filter_id(filter2)),
Build.slug2_en == view,
App.type == graph)
if graph:
build = build_query.first_or_404()
build.set_bra(filter0)
if filter1 != 'all':
build.set_filter1(filter1)
if filter2 != 'all':
build.set_filter2(filter2)
service_id = filter1 if filter1 != u'all' else None
year = ' - ' if dataset else ''
if dataset == 'rais':
year += str(CnaeService(service_id).get_year())
elif dataset == 'secex':
year += str(SecexService(service_id).year())
elif dataset == 'hedu':
year += str(HeduService(service_id).year())
elif dataset == 'sc':
year += str(ScService(service_id).course_year())
title = re.sub(r'\s\(.*\)', r'', build.title())
metadata = {
'view': title,
'graph': dictionary()[graph],
'dataset': dictionary()[dataset] + year,
}
return render_template(
'build_graph/index.html', dataset=dataset, filter0=filter0, filter1=filter1, filter2=filter2,
graph=graph, view=view, compare=compare, metadata=metadata)
def parse_filter(filter):
if filter != 'all':
return '<%s>' % filter
else:
return filter
@mod.route('/views/<dataset>/<bra>/<filter1>/<filter2>')
def views(dataset, bra, filter1, filter2):
'''/views/secex/hs/wld'''
build_query = Build.query.filter(
Build.dataset == dataset,
Build.filter1 == parse_filter(filter1),
Build.filter2 == parse_filter(filter2))
if bra != 'all':
build_query.filter(not_(Build.bra.like('all')))
views = {}
for build in build_query.all():
if bra == 'all' and build.app.type == 'compare':
break
if bra:
build.set_bra(bra)
if filter1 != 'all':
build.set_filter1(request.args.get('filter1'))
if filter2 != 'all':
build.set_filter2(request.args.get('filter2'))
title = re.sub(r'\s\(.*\)', r'', build.title())
id = build.slug2_en
if id not in views:
views[id] = {
'id': id,
'name': title,
'graphs': {},
}
views[id]['graphs'][build.app.type] = {
'url': build.url(),
'name': build.app.name()
}
return jsonify(views=views)
```
#### File: apps/embed/views.py
```python
import requests
from datetime import datetime
from sqlalchemy import func
from flask import Blueprint, request, render_template, g, Response, make_response, jsonify
from flask.ext.babel import gettext
from dataviva import db, datavivadir, __year_range__, view_cache
from dataviva.api.attrs.models import Bra, Cnae, Hs, Cbo, Wld, University, Course_hedu, Course_sc, Search
from dataviva.apps.general.views import get_locale
from dataviva.apps.data.forms import DownloadForm
from dataviva.apps.user.models import Starred
from dataviva.apps.embed.models import Build, UI, App, Crosswalk_oc, Crosswalk_pi
from dataviva.apps.general.models import Short
from dataviva.utils.gzip_data import gzip_data
from dataviva.utils.cached_query import cached_query
from dataviva.utils.title_format import title_format
import json
import urllib2
import urllib
from config import FACEBOOK_OAUTH_ID, basedir, GZIP_DATA
import os
import zipfile
mod = Blueprint('embed', __name__,
template_folder='templates',
url_prefix='/<lang_code>/embed')
@mod.before_request
def before_request():
g.page_type = mod.name
g.color = "#af1f24"
g.sabrina = {
"outfit": "lab",
"face": "smirk",
"hat": "glasses"
}
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
def filler(dataset, filter1, filter2):
'''Since the "builds" are held in the database with placeholders for
attributes i.e. <cbo>, <hs>, <cnae> we need to convert the IDs given
in the URL to these placeholders. i.e.
- a0111 = <cnae>
- 010101 = <hs>
- all = all
'''
filler1 = filter1
if filler1 != "all":
if dataset == "rais":
filler1 = "cnae"
elif dataset == "secex":
filler1 = "hs"
elif dataset == "hedu":
filler1 = "university"
filler2 = filter2
if filler2 != "all":
if dataset == "rais":
filler2 = "cbo"
elif dataset == "secex":
filler2 = "wld"
elif dataset == "hedu":
filler2 = "course_hedu"
elif dataset == "sc":
filler2 = "course_sc"
return filler1, filler2
def is_xhr():
return request.is_xhr
@mod.route("/")
@mod.route("/<app_name>/<dataset>/<bra_id>/<filter1>/<filter2>/<output>/")
<EMAIL>(key_prefix=api_cache_key("apps:embed"), unless=is_xhr)
def embed(app_name="tree_map", dataset="rais", bra_id="4mg",
filter1="all", filter2="all", output="cbo"):
prefix = "apps:embed:xhr:"
lang = request.args.get('lang', None) or g.locale
global_vars = {x[0]: x[1] for x in request.args.items()}
imports = False
if "size" in global_vars:
if global_vars["size"] == "import_val":
imports = True
if "y" in global_vars:
if global_vars["y"] == "import_val":
imports = True
if "axes" in global_vars:
if global_vars["axes"] == "import_val":
imports = True
if (g.user is None or not g.user.is_authenticated) and request.is_xhr:
cache_id = prefix + request.path + lang
if imports:
cache_id = cache_id + "imports"
cached_q = cached_query(cache_id)
if cached_q:
ret = make_response(cached_q)
ret.headers['Content-Encoding'] = 'gzip'
ret.headers['Content-Length'] = str(len(ret.data))
return ret
build_filter1, build_filter2 = filler(dataset, filter1, filter2)
'''Grab attrs for bra and filters
'''
if bra_id == "all":
bra_attr = Wld.query.get_or_404("sabra")
else:
bra_attr = [Bra.query.get_or_404(b) for b in bra_id.split("_")]
filter1_attr = filter1
filter2_attr = filter2
if filter1 != "all":
filter1_attr = globals()[build_filter1.capitalize()].query.get_or_404(
filter1)
if filter2 != "all":
filter2_attr = globals()[build_filter2.capitalize()].query.get_or_404(
filter2)
if build_filter1 != "all":
build_filter1 = "<{}>".format(build_filter1)
if build_filter2 != "all":
build_filter2 = "<{}>".format(build_filter2)
'''This is an instance of the Build class for the selected app,
determined by the combination of app_type, dataset, filters and output.
'''
current_app = App.query.filter_by(type=app_name).first_or_404()
current_build = Build.query.filter_by(
app=current_app, dataset=dataset, filter1=build_filter1, filter2=build_filter2, output=output).first_or_404()
current_build.set_filter1(filter1_attr)
current_build.set_filter2(filter2_attr)
current_build.set_bra(bra_attr)
'''Every possible build, required by the embed page for building the build
dropdown.
'''
# all_builds = Build.query.all()
# all_builds.sort(key=lambda x: x.dataset)
# for build in all_builds:
# build.set_filter1(filter1_attr)
# build.set_filter2(filter2_attr)
# build.set_bra(bra_attr)
'''Get URL query parameters from reqest.args object to return to the view.
'''
if "controls" not in global_vars:
global_vars["controls"] = "true"
'''If user is logged in see if they have starred this app.'''
starred = 0
app_id = "/".join([app_name, dataset, bra_id, filter1, filter2, output])
if g.user and g.user.is_authenticated:
is_starred = Starred.query.filter_by(
user=g.user, app_id=app_id).first()
starred = 1 if is_starred else -1
if imports:
current_build.set_import()
if request.is_xhr:
ret = jsonify({
"current_build": current_build.serialize(),
# "all_builds": [b.json() for b in all_builds],
"starred": starred
})
ret.data = gzip_data(ret.data)
ret.headers['Content-Encoding'] = 'gzip'
ret.headers['Content-Length'] = str(len(ret.data))
if starred == 0 and cached_q is None:
cached_query(cache_id, ret.data)
else:
current_build.set_import()
year_range_dict = __year_range__.copy()
if current_build.app.type in ['network', 'rings', 'scatter']:
year_range_dict["secex"] = ["2000-1", "2017-12"]
year_range = json.dumps(year_range_dict)
ret = make_response(render_template("embed/embed.html",
# apps = App.query.all(),
# all_builds = all_builds,
starred=starred,
form=DownloadForm(),
current_build=current_build,
global_vars=json.dumps(
global_vars),
facebook_id=FACEBOOK_OAUTH_ID,
year_range=year_range))
ret.data = gzip_data(ret.data)
ret.headers['Content-Encoding'] = 'gzip'
ret.headers['Content-Length'] = str(len(ret.data))
ret.headers.add('Last-Modified', datetime.now())
ret.headers.add(
'Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0')
ret.headers.add('Pragma', 'no-cache')
return ret
@mod.route('/star/<app_name>/<data_type>/<bra_id>/<filter1>/<filter2>/<output>/', methods=['GET', 'POST'])
def app_star(app_name, data_type, bra_id, filter1, filter2, output):
app_id = "/".join([app_name, data_type, bra_id, filter1, filter2, output])
# if request.method == 'POST' and request.remote_addr == SITE_MIRROR.split(":")[1][2:]:
# g.user = User.query.get(request.form["user"])
if g.user is None or not g.user.is_authenticated:
return jsonify({"error": gettext("You need to be logged in to star visualizations.")})
starred = Starred.query.filter_by(user=g.user, app_id=app_id).first()
if request.method == 'POST':
# if "user" not in request.form:
# form_json = {"user": g.user.id, "title": request.form['title'].encode('utf-8')}
# try:
# opener = urllib2.urlopen("{0}{1}".format(SITE_MIRROR,request.path[1:]),urllib.urlencode(form_json),5)
# except:
# return jsonify({"error": gettext("The server is not responding.
# Please try again later.")})
if starred:
db.session.delete(starred)
db.session.commit()
return jsonify({"success": -1})
else:
app_name = request.form['title'].encode('utf-8')
timestamp = datetime.utcnow()
new_star = Starred(
user=g.user, app_id=app_id, app_name=app_name, timestamp=timestamp)
db.session.add(new_star)
db.session.commit()
return jsonify({"success": 1})
if starred:
return jsonify({"success": 1})
else:
return jsonify({"success": -1})
def get_builds(bra_attr, dataset, profile1, filter1, profile2, filter2, kwargs):
builds = Build.query.filter_by(
dataset=dataset, filter1=profile1, filter2=profile2).all()
build_list = []
for b in builds:
# -- when looking at all Brazil, skip Occugrid/Rings
if bra_attr and ((b.output == 'bra' and len(bra_attr.id) == 9) or (bra_attr.id == "sabra" and b.id in [48, 51])):
continue
if bra_attr:
b.set_bra(bra_attr)
if filter1 != 'all':
b.set_filter1(filter1)
if filter2 != 'all':
b.set_filter2(filter2)
build_list.append(b.json(**kwargs))
return build_list
@mod.route('/recommend/', methods=['GET', 'POST'])
@mod.route('/recommend/<app_name>/<dataset>/<bra_id>/<filter1>/<filter2>/<output>/', methods=['GET', 'POST'])
<EMAIL>(key_prefix=api_cache_key("apps:recommend"))
def recommend(app_name=None, dataset=None, bra_id="4mg", filter1=None, filter2=None, output=None):
recommended = {}
build_filter1, build_filter2 = filler(dataset, filter1, filter2)
'''Grab attrs for bra and filters
'''
bra_all = [Wld.query.get_or_404("sabra")]
if bra_id == "all":
bra_attr = bra_all
else:
bra_attr = [Bra.query.get_or_404(b) for b in bra_id.split("_")]
filter1_attr = filter1
filter2_attr = filter2
profile = False
if filter1 != "all":
filter1_attr = globals()[build_filter1.capitalize()].query.get_or_404(
filter1)
if output == build_filter1:
profile = filter1_attr
recommended["crosswalk"] = crosswalk_recs(
dataset, build_filter1, filter1)
if filter2 != "all":
filter2_attr = globals()[build_filter2.capitalize()].query.get_or_404(
filter2)
if output == build_filter2:
profile = filter2_attr
recommended["crosswalk"] = crosswalk_recs(
dataset, build_filter2, filter2)
if profile == False and output == "bra":
profile = bra_attr[0]
if profile and output != "school":
if g.locale == "pt":
title = u"Perfil <{0}_para> <{0}>".format(output)
else:
title = u"Profile for <{0}>".format(output)
recommended["profile"] = {
"title": title_format(title, profile),
"url": profile.url()
}
if build_filter1 != "all":
build_filter1 = "<{}>".format(build_filter1)
if build_filter2 != "all":
build_filter2 = "<{}>".format(build_filter2)
kwargs = {k: v for k, v in request.args.items()}
if app_name == "geo_map" and len(bra_id) < 9:
custom = Build.query.filter_by(
app_id=3, dataset=dataset, filter1=build_filter1, filter2=build_filter2, output=output).first()
custom.set_bra(bra_attr)
custom.set_filter1(filter1_attr)
custom.set_filter2(filter2_attr)
recommended["custom"] = custom.json(**kwargs)
for bra in bra_attr:
recommended['builds'] = get_builds(
bra, dataset, build_filter1, filter1_attr, build_filter2, filter2_attr, kwargs)
if bra_id != "all" and output != "bra":
recommended['builds'] += get_builds(
bra_all[0], dataset, build_filter1, filter1_attr, build_filter2, filter2_attr, kwargs)
return jsonify(recommended)
def get_geo_location(ip):
req = urllib2.Request("http://freegeoip.net/json/" + ip)
opener = urllib2.build_opener()
try:
f = opener.open(req)
except:
return None
json_resp = json.loads(f.read())
city = json_resp["city"]
# city = "Viana"
state = json_resp["region_name"]
# state = "Espírito Santo"
# state = "Maranhão"
# first try to find the exact city within the state
bra_state = Bra.query.filter_by(name_pt=state).filter(
func.char_length(Bra.id) == 3).first()
bra_cities = Bra.query.filter_by(name_pt=city).filter(
func.char_length(Bra.id) == 9)
if bra_state:
if bra_cities.count() == 1:
return bra_cities.first()
elif bra_cities.count() > 1:
return bra_cities.filter(Bra.id.like(bra_state.id+'%')).first()
return None
return None
@mod.route('/download/', methods=['GET', 'POST'])
def download():
import tempfile
import subprocess
import random
import base64
form = DownloadForm()
data = form.data.data
format = form.output_format.data
title = form.title.data
downloadToken = form.downloadToken.data
max_length = 250 - (len(downloadToken) + 1)
title_safe = title[:max_length]
filenameDownload = title_safe + "-" + downloadToken
if format == "png":
mimetype = 'image/png'
elif format == "pdf":
mimetype = 'application/pdf'
elif format == "svg":
mimetype = 'application/octet-stream'
elif format == "csv":
mimetype = "text/csv;charset=UTF-16"
elif format == "url2csv":
mimetype = "text/csv;charset=UTF-16"
response_data = data.encode("utf-16")
content_disposition = "attachment;filename=%s.%s" % (title_safe, format)
content_disposition = content_disposition.replace(",", "_")
download_file = make_response(Response(response_data,
mimetype=mimetype,
headers={"Content-Disposition": content_disposition}))
with open(os.path.join(basedir, "dataviva/static/downloads/" + title_safe + "." + format), "wb") as fo:
fo.write(response_data)
zf = zipfile.ZipFile(os.path.join(
basedir, "dataviva/static/downloads/" + filenameDownload + ".zip"), mode='w')
try:
zf.write(os.path.join(basedir, "dataviva/static/downloads/" +
title_safe + "." + format), title_safe + "." + format)
finally:
zf.close()
os.remove(os.path.join(basedir, "dataviva/static/downloads/" + title_safe + "." + format))
return "/static/downloads/" + filenameDownload + ".zip"
@mod.route('/info/<app_name>/')
def info(app_name="tree_map"):
return render_template("embed/info.html", app_name=app_name)
@mod.route('/coords/<id>/')
def coords(id="all"):
if GZIP_DATA:
fileext = ".gz"
filetype = "gzip"
else:
fileext = ""
filetype = "json"
if id == "all":
file_name = "bra_states.json"+fileext
else:
file_name = ("{0}_munic.json"+fileext).format(id)
cached_q = cached_query(file_name)
if cached_q:
ret = make_response(cached_q)
else:
path = datavivadir+"/static/json/coords/{0}".format(file_name)
gzip_file = open(path).read()
cached_query(file_name, gzip_file)
ret = make_response(gzip_file)
ret.headers['Content-Encoding'] = filetype
ret.headers['Content-Length'] = str(len(ret.data))
return ret
@mod.route('/networks/<type>/')
def networks(type="hs"):
if GZIP_DATA:
fileext = ".gz"
filetype = "gzip"
else:
fileext = ""
filetype = "json"
file_name = ("network_{0}.json"+fileext).format(type)
cached_q = cached_query(file_name)
if cached_q:
ret = make_response(cached_q)
else:
path = datavivadir+"/static/json/networks/{0}".format(file_name)
gzip_file = open(path).read()
cached_query(file_name, gzip_file)
ret = make_response(gzip_file)
ret.headers['Content-Encoding'] = filetype
ret.headers['Content-Length'] = str(len(ret.data))
return ret
@mod.route('/shorten/', methods=['GET', 'POST'])
def shorten_url():
if request.method == 'POST':
response = request.form['url'] if 'url' in request.form else request.json['url']
long_url = urllib.unquote(response.encode('utf-8')).decode('utf-8')
short = Short.query.filter_by(long_url=long_url).first()
if short is None:
slug = Short.make_unique_slug(long_url)
short = Short(slug=slug, long_url=long_url)
db.session.add(short)
db.session.commit()
return jsonify({"slug": short.slug})
return jsonify({"error": "No URL given."})
def crosswalk_recs(dataset, filter, id):
crosswalk = []
attr_swap = {"hs": "cnae", "cnae": "hs",
"cbo": "course_hedu", "course_hedu": "cbo"}
crosswalk_table = {
"hs": "pi", "cnae": "pi", "cbo": "oc", "course_hedu": "oc"}
if filter in attr_swap and id != "all":
table = globals()["Crosswalk_{}".format(crosswalk_table[filter])]
col = getattr(table, "{}_id".format(filter))
results = table.query.filter(col == id)
ids = [row.get_id(dataset) for row in results]
if ids:
ids = Search.query.filter(Search.id.in_(ids)).filter(
Search.kind == attr_swap[filter]).all()
ids = [a.id for a in ids]
table = globals()[attr_swap[filter].capitalize()]
attrs = table.query.filter(table.id.in_(ids)).all()
crosswalk = [
{"title": a.name(), "url": a.url(), "type": attr_swap[filter]} for a in attrs]
return crosswalk
@mod.route('/image', methods=['GET'])
def image():
url = request.args.get('link');
code = requests.get(url).status_code;
return Response(str(code), status=200)
```
#### File: apps/location/views.py
```python
from flask import Blueprint, render_template, g, abort, request
from dataviva import db
from dataviva.apps.general.views import get_locale
from dataviva.api.attrs.services import Location as LocationService, LocationGdpRankings, \
LocationGdpPerCapitaRankings, LocationPopRankings, LocationAreaRankings, LocationMunicipalityRankings, Bra
from dataviva.api.secex.models import Ymb
from dataviva.api.secex.services import Location as LocationBodyService, LocationWld, LocationEciRankings
from dataviva.api.rais.services import LocationIndustry, LocationOccupation, \
LocationJobs, LocationDistance, LocationOppGain
from dataviva.api.hedu.services import LocationUniversity, LocationMajor
from dataviva.api.sc.services import LocationSchool, LocationBasicCourse
from dataviva.api.attrs.services import All
from dataviva.api.secex.services import Product
from dataviva.api.rais.services import Industry
from dataviva.api.rais.services import Occupation
from dataviva.api.hedu.services import University
from dataviva.api.sc.services import Basic_course
from dataviva.api.hedu.services import Major
from dataviva.api.sc.services import AllScholar
from dataviva.api.sc.services import AllBasicCourse
from dataviva.api.attrs.models import Wld
from sqlalchemy import desc, func
from random import randint
from decimal import *
import sys
reload(sys)
sys.setdefaultencoding('utf8')
mod = Blueprint('location', __name__,
template_folder='templates',
url_prefix='/<lang_code>/location',
static_folder='static')
tabs = {
'general': [],
'opportunities': [
'product-space-scatter',
'activities-space-network',
'activities-space-scatter',
],
'wages': [
'jobs-industry-tree_map',
'new-api-jobs-industry-tree_map',
'jobs-industry-stacked',
'new-api-jobs-industry-stacked',
'jobs-occupation-tree_map',
'new-api-jobs-occupation-tree_map',
'jobs-occupation-stacked',
'new-api-jobs-occupation-stacked',
'wage-industry-tree_map',
'new-api-wage-industry-tree_map',
'wage-industry-stacked',
'new-api-wage-industry-stacked',
'wage-occupation-tree_map',
'new-api-wage-occupation-tree_map',
'wage-occupation-stacked',
'new-api-wage-occupation-stacked'
],
'trade-partner': [
'trade-balance-location-line',
'new-api-trade-balance-location-line',
'exports-products-tree_map',
'new-api-exports-products-tree_map',
'exports-products-stacked',
'new-api-exports-products-stacked',
'exports-destination-tree_map',
'new-api-exports-destination-tree_map',
'exports-destination-stacked',
'new-api-exports-destination-stacked',
'imports-products-tree_map',
'new-api-imports-products-tree_map',
'imports-products-stacked',
'new-api-imports-products-stacked',
'imports-origin-tree_map',
'new-api-imports-origin-tree_map',
'imports-origin-stacked',
'new-api-imports-origin-stacked',
'new-api-exports-port-tree_map',
'new-api-imports-port-tree_map',
'new-api-exports-port-line',
'new-api-imports-port-line'
],
'education': [
'higher-education-university-tree_map',
'new-api-higher-education-university-tree_map',
'education-course-tree_map',
'new-api-education-course-tree_map',
'professional-education-school-tree_map',
'new-api-professional-education-school-tree_map',
'professional-education-course-tree_map',
'new-api-professional-education-course-tree_map',
'basic-education-administrative-dependencie-tree_map',
'new-api-basic-education-administrative-dependencie-tree_map',
'basic-education-level-tree_map',
'new-api-basic-education-level-tree_map',
'basic-education-municipality-tree_map',
'new-api-basic-education-municipality-tree_map',
'basic-education-municipality-tree_map',
],
'health': [
'equipments-municipality-map',
'equipments-municipality-tree_map',
'equipments-municipality-stacked',
'equipments-type-tree_map',
'equipments-type-bar',
'equipments-type-stacked',
'equipments-sus-bond-bar',
'establishments-municipality-map',
'establishments-municipality-tree_map',
'establishments-municipality-stacked',
'establishments-unit-type-tree_map',
'establishments-unit-type-stacked',
'establishments-facilities-bar',
'beds-municipality-map',
'beds-municipality-tree_map',
'beds-municipality-stacked',
'beds-bed-type-tree_map',
'beds-bed-type-stacked',
'beds-bed-type-bar',
'beds-sus-bond-bar',
'professionals-municipality-map',
'professionals-municipality-tree_map',
'professionals-municipality-stacked',
'professionals-provider-unit-tree_map',
'professionals-provider-unit-stacked',
'professionals-occupation-tree_map',
'professionals-occupation-stacked',
]
}
@mod.before_request
def before_request():
g.page_type = 'category'
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
def location_depth(bra_id):
locations = {
1: "region",
3: "state",
5: "mesoregion",
7: "microregion",
9: "municipality"
}
return locations[len(bra_id)]
def handle_region_bra_id(bra_id):
return {
"1": "1",
"2": "2",
"3": "5",
"4": "3",
"5": "4"
}[bra_id]
def _location_service(depth, location):
if depth == 'region':
return handle_region_bra_id(location.id)
if depth == 'mesoregion':
return str(location.id_ibge)[:2] + str(location.id_ibge)[-2:]
if depth == 'microregion':
return str(location.id_ibge)[:2] + str(location.id_ibge)[-3:]
else:
return location.id_ibge
@mod.route('/<bra_id>/graphs/<tab>', methods=['POST'])
def graphs(bra_id, tab):
if bra_id == 'all':
location = Wld.query.filter_by(id='sabra').first()
location.id = 'all'
depth = None
id_ibge = None
is_municipality = False
else:
location = Bra.query.filter_by(id=bra_id).first()
depth = location_depth(bra_id)
id_ibge = _location_service(depth, location)
is_municipality = True if depth == 'municipality' else False
return render_template('location/graphs-' + tab + '.html', location=location, depth=depth, id_ibge=id_ibge, graph=None, is_municipality=is_municipality)
@mod.route('/all', defaults={'tab': 'general'})
@mod.route('/all/<tab>')
def all(tab):
location_service_brazil = All()
product_service = Product(product_id=None)
industry_service = Industry(cnae_id=None)
occupation_service = Occupation(occupation_id=None)
university_service = University(university_id=None)
basic_course_service = Basic_course(course_sc_id=None)
major_service = Major(course_hedu_id=None, bra_id=None)
scholar_service = AllScholar()
basic_course_service = AllBasicCourse()
location = Wld.query.filter_by(id='sabra').first_or_404()
location.id = 'all'
is_municipality = False
menu = request.args.get('menu')
url = request.args.get('url')
graph = {}
if menu:
graph['menu'] = menu
if url:
url_prefix = menu.split('-')[-1] + '/' if menu and menu.startswith('new-api-') or tab == 'health' else 'embed/'
graph['url'] = url_prefix + url
profile = {}
header = {
'bg_class_image': 'bg-all',
'gdp': location_service_brazil.gdp(),
'population': location_service_brazil.population(),
'gdp_per_capita': location_service_brazil.gdp_per_capita(),
'eci': 0.151,
'year_yb': location_service_brazil.year_yb(),
'year_ybs': location_service_brazil.year_ybs()
}
body = {
'product_year': product_service.year(),
'total_imports': product_service.all_imported(),
'total_exports': product_service.all_exported(),
'all_trade_balance': product_service.all_trade_balance(),
'industry_year': industry_service.get_year(),
'main_industry_by_num_jobs_name': industry_service.main_industry_by_num_jobs_name(),
'total_jobs': industry_service.total_jobs(),
'university_year': university_service.year(),
}
if body['total_exports'] is None and body['total_imports'] is None and body['total_jobs'] is None and \
body['highest_enrolled_by_university'] is None and body['highest_enrolled_by_basic_course'] is None and \
body['highest_enrolled_by_major'] is None:
abort(404)
if tab not in tabs:
abort(404)
if menu and menu not in tabs[tab]:
abort(404)
else:
return render_template('location/index.html',
header=header, body=body, profile=profile, location=location, is_municipality=is_municipality, tab=tab, graph=graph)
@mod.route('/<bra_id>', defaults={'tab': 'general'})
@mod.route('/<bra_id>/<tab>')
def index(bra_id, tab):
location = Bra.query.filter_by(id=bra_id).first_or_404()
is_municipality = location and len(location.id) == 9
menu = request.args.get('menu')
url = request.args.get('url')
if bra_id == 'all':
depth = None
id_ibge = None
else:
depth = location_depth(bra_id)
id_ibge = _location_service(depth, location)
if depth == 'municipality':
is_municipality = True
if location:
location_id = location.id
else:
location_id = None
graph = {}
if menu:
graph['menu'] = menu
if url:
url_prefix = menu.split('-')[-1] + '/' if menu and menu.startswith('new-api-') or tab == 'health' else 'embed/'
graph['url'] = url_prefix + url
depth = location_depth(bra_id)
if depth == 'region':
id_ibge = handle_region_bra_id(location.id)
elif depth == 'mesoregion':
id_ibge = str(location.id_ibge)[:2] + str(location.id_ibge)[-2:]
elif depth == 'microregion':
id_ibge = str(location.id_ibge)[:2] + str(location.id_ibge)[-3:]
else:
id_ibge = location.id_ibge
if not is_municipality:
tabs['wages'] += [
'jobs-municipality-tree_map',
'new-api-jobs-municipality-tree_map',
'jobs-municipality-stacked',
'new-api-jobs-municipality-stacked',
'wages-municipality-tree_map',
'new-api-wages-municipality-tree_map',
'wages-municipality-stacked',
'new-api-wages-municipality-stacked'
]
tabs['trade-partner'] += [
'exports-municipality-tree_map',
'new-api-exports-municipality-tree_map',
'exports-municipality-stacked',
'new-api-exports-municipality-stacked',
'imports-municipality-tree_map',
'new-api-imports-municipality-tree_map',
'imports-municipality-stacked',
'new-api-imports-municipality-stacked',
]
tabs['education'] += [
'education-municipality-tree_map',
'new-api-education-municipality-tree_map',
'basic-education-municipality-tree_map',
'new-api-basic-education-municipality-tree_map',
]
location_service = LocationService(bra_id=bra_id)
location_gdp_rankings_service = LocationGdpRankings(
bra_id=bra_id, stat_id='gdp')
location_gdp_pc_rankings_service = LocationGdpPerCapitaRankings(
bra_id=bra_id)
location_pop_rankings_service = LocationPopRankings(bra_id=bra_id)
location_eci_rankings_service = LocationEciRankings(bra_id=bra_id)
location_area_rankings_service = LocationAreaRankings(bra_id=bra_id)
location_municipality_rankings_service = LocationMunicipalityRankings(bra_id=bra_id)
location_wld_service = LocationWld(bra_id=bra_id)
location_secex_service = LocationBodyService(bra_id=bra_id)
location_industry_service = LocationIndustry(bra_id=bra_id)
location_occupation_service = LocationOccupation(bra_id=bra_id)
location_jobs_service = LocationJobs(bra_id=bra_id)
location_distance_service = LocationDistance(bra_id=bra_id)
location_opp_gain_service = LocationOppGain(bra_id=bra_id)
location_university_service = LocationUniversity(bra_id=bra_id)
location_major_service = LocationMajor(bra_id=bra_id)
location_school_service = LocationSchool(bra_id=bra_id)
location_basic_course_service = LocationBasicCourse(bra_id=bra_id)
''' Query básica para SECEX'''
max_year_query = db.session.query(
func.max(Ymb.year)).filter_by(bra_id=bra_id, month=12)
eci = Ymb.query.filter(
Ymb.bra_id == bra_id,
Ymb.month == 0,
Ymb.year == max_year_query) \
.order_by(desc(Ymb.year)).limit(1).first()
''' Background Image'''
if len(bra_id) == 1:
countys = Bra.query.filter(Bra.id.like(bra_id + '%'), func.length(Bra.id) == 3).all()
background_image = "bg-" + str(countys[randint(0, len(countys) - 1)].id) + "_" + str(randint(1, 2))
else:
background_image = "bg-" + location.id[:3] + "_" + str(randint(1, 2))
if len(bra_id) != 9 and len(bra_id) != 3:
header = {
'name': location_service.name(),
'gdp': location_service.gdp(),
'population': location_service.population(),
'gdp_per_capita': location_service.gdp() / location_service.population(),
'bg_class_image': background_image,
'year': location_service.year()
}
else:
header = {
'name': location_service.name(),
'gdp': location_service.gdp(),
'life_expectation': location_service.life_expectation(),
'population': location_service.population(),
'gdp_per_capita': location_service.gdp_per_capita(),
'hdi': location_service.hdi(),
'bg_class_image': background_image,
'year': location_service.year()
}
if eci is not None:
header['eci'] = eci.eci
header['eci_year'] = eci.year
body = {
'product_year': location_secex_service.year(),
'main_product_by_export_value_name': location_secex_service.main_product_by_export_value_name(),
'total_exports': location_secex_service.total_exports(),
'less_distance_by_product': location_secex_service.less_distance_by_product(),
'less_distance_by_product_name': location_secex_service.less_distance_by_product_name(),
'opportunity_gain_by_product': location_secex_service.opportunity_gain_by_product(),
'opportunity_gain_by_product_name': location_secex_service.opportunity_gain_by_product_name(),
'secex_year': location_secex_service.year(),
'industry_year': location_industry_service.year(),
'rais_year': location_jobs_service.year(),
'less_distance_by_occupation': location_distance_service.less_distance_by_occupation(),
'less_distance_by_occupation_name': location_distance_service.less_distance_by_occupation_name(),
'opportunity_gain_by_occupation': location_opp_gain_service.opportunity_gain_by_occupation(),
'opportunity_gain_by_occupation_name': location_opp_gain_service.opportunity_gain_by_occupation_name(),
'university_year': location_university_service.year(),
'basic_course_year': location_basic_course_service.year()
}
if len(bra_id) == 9:
profile = {
'number_of_municipalities': location_service.number_of_locations(len(bra_id)),
'bra_id': bra_id,
'state_name': location_service.location_name(3),
'mesoregion_name': location_service.location_name(5),
'gdp_rank': location_gdp_rankings_service.gdp_rank(),
'area': Decimal(location_service.area())
}
elif len(bra_id) == 7:
profile = {
'number_of_microregions': location_service.number_of_locations(len(bra_id)),
'bra_id': bra_id,
'state_name': location_service.location_name(3),
'mesoregion_name': location_service.location_name(5),
'number_of_municipalities': location_service.number_of_municipalities()
}
elif len(bra_id) == 5:
profile = {
'number_of_mesoregions': location_service.number_of_locations(len(bra_id)),
'bra_id': bra_id,
'state_name': location_service.location_name(3),
'eci_rank': location_eci_rankings_service.eci_rank()
}
elif len(bra_id) == 1:
profile = {
'number_of_regions': location_service.number_of_locations(len(bra_id)),
'bra_id': bra_id,
'gdp_pc_rank': location_gdp_pc_rankings_service.gdp_pc_rank(),
'pop_rank': location_pop_rankings_service.pop_rank(),
'region_states': location_service.states_in_a_region()
}
else:
profile = {
'number_of_states': location_service.number_of_locations(len(bra_id)),
'region_name': location_service.location_name(1),
'number_of_municipalities': location_service.number_of_locations(9),
'pop_rank': location_pop_rankings_service.pop_rank(),
'area_rank': location_area_rankings_service.area_rank(),
'neighbors': location_service.neighbors(),
'municipality_rank': location_municipality_rankings_service.municipality_rank()
}
if body['total_exports'] is None and body['total_imports'] is None and body['total_jobs'] is None and \
body['highest_enrolled_by_university'] is None and body['highest_enrolled_by_basic_course'] is None and \
body['highest_enrolled_by_major'] is None:
abort(404)
if tab not in tabs:
abort(404)
if menu and menu not in tabs[tab]:
abort(404)
else:
return render_template('location/index.html',
header=header, body=body, profile=profile, location=location, is_municipality=is_municipality, tab=tab, graph=graph, id_ibge=id_ibge)
```
#### File: apps/major/views.py
```python
from flask import Blueprint, render_template, g, request
from dataviva.apps.general.views import get_locale
from dataviva.api.hedu.models import Yc_hedu, Ybc_hedu
from dataviva.api.attrs.models import Bra, Course_hedu
from dataviva.api.hedu.services import Major, MajorUniversities, MajorMunicipalities
from dataviva import db
from sqlalchemy.sql.expression import func
mod = Blueprint('major', __name__,
template_folder='templates',
url_prefix='/<lang_code>/major',
static_folder='static')
def location_depth(bra_id):
locations = {
1: "region",
3: "state",
5: "mesoregion",
7: "microregion",
9: "municipality"
}
return locations[len(bra_id)]
def handle_region_bra_id(bra_id):
return {
"1": "1",
"2": "2",
"3": "5",
"4": "3",
"5": "4"
}[bra_id]
def _location_service(depth, location):
if depth == 'region':
return handle_region_bra_id(location.id)
if depth == 'mesoregion':
return str(location.id_ibge)[:2] + str(location.id_ibge)[-2:]
if depth == 'microregion':
return str(location.id_ibge)[:2] + str(location.id_ibge)[-3:]
else:
return location.id_ibge
@mod.before_request
def before_request():
g.page_type = 'category'
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
@mod.route('/<course_hedu_id>', defaults={'tab': 'general'})
@mod.route('/<course_hedu_id>/<tab>')
def index(course_hedu_id, tab):
bra_id = request.args.get('bra_id')
bra_id = bra_id if bra_id != 'all' else None
menu = request.args.get('menu')
url = request.args.get('url')
graph = {}
if menu:
graph['menu'] = menu
if url:
url_prefix = menu.split('-')[-1] + '/' if menu and menu.startswith('new-api-') else 'embed/'
graph['url'] = url_prefix + url
max_year_query = db.session.query(
func.max(Yc_hedu.year)).filter_by(course_hedu_id=course_hedu_id)
if bra_id:
major_service = Major(course_hedu_id, bra_id)
universities_service = MajorUniversities(course_hedu_id, bra_id)
municipalities_service = MajorMunicipalities(course_hedu_id, bra_id)
rank_query = Ybc_hedu.query.filter(
Ybc_hedu.year == max_year_query,
Ybc_hedu.bra_id == bra_id,
func.length(Ybc_hedu.course_hedu_id) == len(course_hedu_id))\
.order_by(Ybc_hedu.enrolled.desc())
else:
major_service = Major(course_hedu_id, '')
universities_service = MajorUniversities(course_hedu_id, '')
municipalities_service = MajorMunicipalities(course_hedu_id, '')
rank_query = Yc_hedu.query.filter(
Yc_hedu.year == max_year_query,
func.length(Yc_hedu.course_hedu_id) == len(course_hedu_id))\
.order_by(Yc_hedu.enrolled.desc())
rank = rank_query.all()
hedu_max_year = db.session.query(func.max(Yc_hedu.year)).first()[0]
if not bra_id:
header = {
'name': major_service.name(),
'enrolled': major_service.enrolled(),
'year': major_service.year(),
}
else:
header = {
'name': major_service.name(),
'enrolled': major_service.enrolled(),
'year': major_service.year(),
'bra_id': bra_id,
'location_name': major_service.location_name()
}
body = {
'university_with_more_enrolled': universities_service.university_with_more_enrolled(),
'highest_enrolled_number_by_university': universities_service.highest_enrolled_number(),
}
tabs = {
'general': [],
'enrollments': [
'enrollments-university-tree_map',
'new-api-enrollments-university-tree_map',
'enrollments-municipality-geo_map',
'new-api-enrollments-municipality-geo_map',
'enrollments-municipality-stacked',
'new-api-enrollments-municipality-stacked',
'enrollments-municipality-tree_map',
'new-api-enrollments-municipality-tree_map',
'enrollments-status-line',
'new-api-enrollments-status-line',
'enrollments-shift-stacked',
'new-api-enrollments-shift-stacked',
],
}
id_ibge = None
for index, maj in enumerate(rank):
if rank[index].course_hedu_id == course_hedu_id:
header['rank'] = index + 1
break
location = Bra.query.filter(Bra.id == bra_id).first()
if bra_id:
depth = location_depth(bra_id)
id_ibge = _location_service(depth, location)
is_municipality = True if depth == 'municipality' else False
major = Course_hedu.query.filter(Course_hedu.id == course_hedu_id).first()
if tab not in tabs:
abort(404)
if menu and menu not in tabs[tab]:
abort(404)
if header['enrolled'] is None or hedu_max_year != header['year']:
abort(404)
else:
return render_template('major/index.html', header=header, body=body, id_ibge=id_ibge, location=location, major=major, tab=tab, graph=graph)
@mod.route('/<course_hedu_id>/graphs/<tab>', methods=['POST'])
def graphs(course_hedu_id, tab):
bra = request.args.get('bra_id')
major = Course_hedu.query.filter_by(id=course_hedu_id).first_or_404()
location = Bra.query.filter_by(id=bra).first()
return render_template('major/graphs-'+tab+'.html', major=major, location=location, graph=None)
```
#### File: apps/map/views.py
```python
from flask import Blueprint, render_template, g, request, make_response
from dataviva.apps.general.views import get_locale
from dataviva.translations.dictionary import dictionary
from dataviva import datavivadir
from config import GZIP_DATA
from dataviva.utils.cached_query import cached_query
from dataviva.utils.graphs_services import location_service
from dataviva.apps.title.views import get_title
from dataviva.utils.graphs_services import *
import urllib
import json
mod = Blueprint('map', __name__,
template_folder='templates',
url_prefix='/<lang_code>/map',
static_folder='static')
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
@mod.before_request
def before_request():
g.page_type = mod.name
@mod.route('/<dataset>/<value>/', defaults={'id_ibge': ''})
@mod.route('/<dataset>/<value>/<id_ibge>')
def index(dataset, value, id_ibge):
filters = []
title_attrs = {}
services = {
'product': product_service,
'id_ibge': location_service,
'wld': wld_service,
'occupation': occupation_service,
'industry': industry_service,
'basic_course': sc_service,
}
for k, v in request.args.items():
if k not in ['values', 'filters', 'count', 'year']:
if v and k in services:
filters.append(services[k](v))
title_attrs[services[k](v)[0]] = services[k](v)[1]
else:
if k != 'colors':
filters.append((k, v))
title_attrs[k] = v
if id_ibge:
location = location_service(id_ibge)[0]
filters.append((location, id_ibge))
state = '' if location == 'region' else id_ibge[:2]
title_attrs[location] = id_ibge
else:
state = id_ibge
location = 'municipality'
filters = urllib.urlencode(filters)
title, subtitle = get_title(dataset, value, 'map', title_attrs)
return render_template('map/index.html',
dataset=dataset,
value=value,
state=state,
filters=filters,
title=title or '',
subtitle=subtitle or '',
dictionary=json.dumps(dictionary()))
@mod.route('/coords/', defaults={'id': 'all'})
@mod.route('/coords/<id>')
def coords(id):
if GZIP_DATA:
fileext = ".gz"
filetype = "gzip"
else:
fileext = ""
filetype = "json"
if id == "all":
file_name = "bra_all_states.json" + fileext
else:
file_name = ("coords-{0}.json" + fileext).format(id)
cached_q = cached_query(file_name)
if cached_q:
ret = make_response(cached_q)
else:
path = datavivadir + "/static/json/map/{0}".format(file_name)
gzip_file = open(path).read()
cached_query(file_name, gzip_file)
ret = make_response(gzip_file)
ret.headers['Content-Encoding'] = filetype
ret.headers['Content-Length'] = str(len(ret.data))
return ret
```
#### File: apps/scholar/models.py
```python
from dataviva import db
from sqlalchemy import ForeignKey
article_keyword_table = db.Table(
'scholar_article_keyword',
db.Column('article_id', db.Integer(), db.ForeignKey('scholar_article.id')),
db.Column('keyword_id', db.Integer(), db.ForeignKey('scholar_keyword.id'))
)
class Article(db.Model):
__tablename__ = 'scholar_article'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(400))
abstract = db.Column(db.Text())
file_url = db.Column(db.String(400))
theme = db.Column(db.String(250))
postage_date = db.Column(db.DateTime)
approval_status = db.Column(db.Boolean)
authors = db.relationship('AuthorScholar',
backref='scholar_article',
lazy='eager',
cascade='all, delete-orphan')
keywords = db.relationship('KeyWord',
secondary=article_keyword_table,
backref=db.backref('articles', lazy='dynamic'))
def authors_str(self):
author_names = [author.name for author in self.authors]
return ', '.join(author_names)
def keywords_str(self):
keyword_names = [keyword.name for keyword in self.keywords]
return ', '.join(keyword_names)
def date_str(self):
return self.postage_date.strftime('%d/%m/%Y')
def __repr__(self):
return '<Article %r>' % (self.title)
class AuthorScholar(db.Model):
__tablename__ = 'scholar_author'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(500))
article_id = db.Column(db.Integer, ForeignKey('scholar_article.id'))
def __init__(self, name=None):
self.name = name
def __repr__(self):
return '<AuthorScholar %r>' % (self.name)
class KeyWord(db.Model):
__tablename__ = 'scholar_keyword'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
def __init__(self, name=None):
self.name = name
def __repr__(self):
return '<Keyword %r>' % (self.name)
```
#### File: apps/search/models.py
```python
from dataviva import db
from flask import g
from sqlalchemy import ForeignKey
from dataviva.utils.title_case import title_case
class SearchProfile(db.Model):
__tablename__ = 'search_profile'
id = db.Column(db.String(50), primary_key=True)
name_pt = db.Column(db.String(50))
name_en = db.Column(db.String(50))
questions = db.relationship("SearchQuestion", backref='search_question')
def name(self):
lang = getattr(g, "locale", "en")
return title_case(getattr(self, "name_" + lang))
def __repr__(self):
return '<SearchProfile %r>' % (self.name)
class SearchSelector(db.Model):
__tablename__ = 'search_selector'
id = db.Column(db.String(50), primary_key=True)
name_pt = db.Column(db.String(50))
name_en = db.Column(db.String(50))
def name(self):
lang = getattr(g, "locale", "en")
return title_case(getattr(self, "name_" + lang))
def __repr__(self):
return '<SearchSelector %r>' % (self.name)
class SearchQuestion(db.Model):
__tablename__ = 'search_question'
id = db.Column(db.Integer, primary_key=True)
description_pt = db.Column(db.String(400))
description_en = db.Column(db.String(400))
answer = db.Column(db.String(400))
profile_id = db.Column(db.String(50), ForeignKey('search_profile.id'))
selectors = db.Column(db.String(400))
def description(self):
lang = getattr(g, "locale", "en")
return getattr(self, "description_" + lang)
def __repr__(self):
return '<SearchQuestion %r>' % (self.description())
```
#### File: apps/search/views.py
```python
from flask import Blueprint, render_template, g, redirect, url_for, flash, jsonify, request
from dataviva.apps.general.views import get_locale
from models import SearchQuestion, SearchSelector, SearchProfile
from dataviva import db
from forms import RegistrationForm
from flask.ext.login import login_required
from dataviva.apps.admin.views import required_roles
mod = Blueprint('search', __name__,
template_folder='templates',
url_prefix='/<lang_code>/search')
@mod.before_request
def before_request():
g.page_type = mod.name
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
@mod.route('/', methods=['GET'])
def index():
return redirect(url_for('search.admin'))
@mod.route('/selector/all', methods=['GET'])
def all_selectors():
result = SearchSelector.query.all()
selectors = []
for row in result:
selectors += [(row.id, row.name())]
return jsonify(selectors=selectors)
@mod.route('/question/all', methods=['GET'])
def all_questions():
questions_query = SearchQuestion.query.all()
questions = []
for row in questions_query:
questions += [(
row.id,
SearchProfile.query.filter_by(id=row.profile_id).first_or_404().name(),
row.description(),
row.selectors,
row.answer
)]
return jsonify(questions=questions)
@mod.route('/profile/<id>', methods=['GET'])
def profile_questions(id):
profile = SearchProfile.query.filter_by(id=id).first_or_404().name()
questions = {}
questions_query = SearchQuestion.query.filter_by(profile_id=id)
for row in questions_query.all():
questions[row.id] = {
'description': row.description(),
'selectors': row.selectors.split(','),
'answer': row.answer
}
return jsonify(
questions=questions,
profile=profile,
template=render_template('search/modal.html'))
@mod.route('/admin', methods=['GET'])
@login_required
@required_roles(1)
def admin():
questions = SearchQuestion.query.all()
return render_template('search/admin.html', questions=questions, lang=g.locale)
@mod.route('/admin/question/new', methods=['GET'])
def new():
form = RegistrationForm()
form.set_choices(g.locale)
return render_template('search/new.html', form=form, action=url_for('search.create'))
@mod.route('/admin/question/new', methods=['POST'])
@login_required
@required_roles(1)
def create():
form = RegistrationForm()
form.set_choices(g.locale)
if form.validate() is False:
return render_template('search/new.html', form=form)
else:
question = SearchQuestion()
question.profile_id = form.profile.data
question.description_en = form.description_en.data
question.description_pt = form.description_pt.data
question.answer = form.answer.data
question.selectors = form.selector.data.replace(' ', '') #remove spaces
db.session.add(question)
db.session.flush()
db.session.add(question)
db.session.commit()
message = u'Muito obrigado! Sua pergunta foi submetida com sucesso!'
flash(message, 'success')
return redirect(url_for('search.admin'))
@mod.route('/admin/question/<id>/edit', methods=['GET'])
@login_required
@required_roles(1)
def edit(id):
form = RegistrationForm()
form.set_choices(g.locale)
question = SearchQuestion.query.filter_by(id=id).first_or_404()
form.profile.data = question.profile_id
form.description_en.data = question.description_en
form.description_pt.data = question.description_pt
form.answer.data = question.answer
form.selector.data = ', '.join((question.selectors).split(','))
return render_template('search/edit.html', form=form, action=url_for('search.update', id=id))
@mod.route('admin/question/<id>/edit', methods=['POST'])
@login_required
@required_roles(1)
def update(id):
form = RegistrationForm()
form.set_choices(g.locale)
id = int(id.encode())
if form.validate() is False:
return render_template('search/edit.html', form=form)
else:
question = SearchQuestion.query.filter_by(id=id).first_or_404()
profile_id = form.profile.data
question.profile_id = profile_id
question.description_en = form.description_en.data
question.description_pt = form.description_pt.data
question.answer = form.answer.data
question.selectors = form.selector.data.replace(' ', '') #remove spaces
db.session.commit()
message = u'Pergunta editada com sucesso!'
flash(message, 'success')
return redirect(url_for('search.admin'))
@mod.route('/admin/delete', methods=['POST'])
@login_required
@required_roles(1)
def admin_delete():
ids = request.form.getlist('ids[]')
if ids:
questions = SearchQuestion.query.filter(SearchQuestion.id.in_(ids)).all()
for question in questions:
db.session.delete(question)
db.session.commit()
return u"Pergunta(s) excluída(s) com sucesso!", 200
else:
return u'Selecione alguma pergunta para excluí-la.', 205
```
#### File: apps/session/views.py
```python
from dataviva import db
from dataviva.apps.session.login_providers import facebook, twitter, google
from dataviva.apps.general.views import get_locale
from dataviva.apps.user.models import User
from dataviva.utils.encode import sha512
from dataviva.translations.dictionary import dictionary
from flask import Blueprint, request, render_template, session, redirect, Response, url_for, g, flash
from flask.ext.login import login_user, logout_user
from forms import LoginForm
from urllib2 import Request, urlopen, URLError
import json
mod = Blueprint('session', __name__,
template_folder='templates',
url_prefix='/<lang_code>/session',
static_folder='static')
@mod.before_request
def before_request():
g.page_type = mod.name
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
@mod.route('/logout')
def logout():
session.pop('twitter_token', None)
session.pop('google_token', None)
session.pop('facebook_token', None)
logout_user()
return redirect(url_for('general.home'))
def errorLogin(html_form, status, form):
if status == 401:
message = dictionary()["confirmation_pending"]
if html_form:
flash(message, 'danger')
else:
return Response(message, status=401, mimetype='application/json')
return redirect('/user/confirm_pending/' + form.email.data)
elif status == 400:
message = dictionary()["invalid_email_or_password"]
if html_form:
flash(message, 'danger')
else:
return Response(message, status=400, mimetype='application/json')
return render_template('session/login.html', form=form)
@mod.route('/login', methods=['GET', 'POST'])
@mod.route('/login/<provider>', methods=['GET', 'POST'])
def login(provider=None):
form = LoginForm()
html_form = request.args.get('htmlForm')
if request.method == "POST":
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data, password=<PASSWORD>(form.password.data)).first()
if user:
if user.confirmed:
login_user(user, remember=True)
return redirect(request.referrer)
else:
return errorLogin(html_form=html_form, status=401, form=form)
return errorLogin(html_form=html_form, status=400, form=form)
if provider:
if provider == "google":
callback = url_for('session.google_authorized', _external=True)
return google.authorize(callback=callback)
if provider == "twitter":
callback = url_for('session.twitter_authorized',
next=request.referrer or None,
_external=True)
return twitter.authorize(callback=callback)
if provider == "facebook":
callback = url_for('session.facebook_authorized',
next=request.referrer or None,
_external=True)
return facebook.authorize(callback=callback)
return render_template('session/login.html', form=form)
@mod.route('/complete_login', methods=['GET', 'POST'])
def after_login(email, fullname, language, gender, image):
user = User.query.filter_by(email=email).first()
if user is None:
user = User()
user.nickname = email.split('@')[0]
user.agree_mailer = True
user.confirmed = True
user.email = email
user.fullname = fullname
user.language = language
user.gender = gender
user.image = image
db.session.add(user)
db.session.commit()
remember_me = False
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me', None)
login_user(user, remember=remember_me)
return redirect(request.args.get('next') or request.referrer)
"""
GOOGLE LOGIN
Here are the specific methods for logging in users with their
google accounts.
"""
@google.tokengetter
def get_access_token():
return session.get('google_token')
@mod.route('/google_authorized')
@google.authorized_handler
def google_authorized(resp):
access_token = resp['access_token']
session['google_token'] = access_token, ''
request = Request('https://www.googleapis.com/oauth2/v1/userinfo?alt=json&access_token=' + access_token)
try:
res = urlopen(request)
except URLError, e:
if e.code == 401:
# Unauthorized - bad token
session.pop('google_token', None)
raise Exception('error!')
# return redirect(url_for('login'))
return res.read()
raise Exception('ERROR!', res.read())
response = json.loads(res.read())
email = response["email"] if "email" in response else None
fullname = response["name"] if "name" in response else None
language = response["locale"] if "locale" in response else None
gender = response["gender"] if "gender" in response else None
image = response["picture"] if "picture" in response else None
return after_login(email=email, fullname=fullname, language=language, gender=gender, image=image)
"""
FACEBOOK LOGIN
Here are the specific methods for logging in users with their
facebook accounts.
"""
@facebook.tokengetter
def get_facebook_oauth_token():
return session.get('facebook_token')
@mod.route('/facebook_authorized')
@facebook.authorized_handler
def facebook_authorized(resp):
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['facebook_token'] = (resp['access_token'], '')
response = facebook.get('/me/?fields=picture,name,locale,email,gender').data
email = response["email"] if "email" in response else None
fullname = response["name"] if "name" in response else None
language = response["locale"] if "locale" in response else None
gender = response["gender"] if "gender" in response else None
image = response["picture"]["data"]["url"] if "picture" in response else None
return after_login(email=email, fullname=fullname, language=language, gender=gender, image=image)
"""
TWITTER LOGIN
Here are the specific methods for logging in users with their
twitter accounts.
"""
@twitter.tokengetter
def get_twitter_token():
return session.get('twitter_token')
@mod.route('/twitter-authorized')
@twitter.authorized_handler
def twitter_authorized(resp):
session['twitter_token'] = (
resp['oauth_token'],
resp['oauth_token_secret']
)
response = twitter.get('account/verify_credentials.json?include_email=true').data
email = response["email"] if "email" in response else None
fullname = response["name"] if "name" in response else None
language = response["lang"] if "lang" in response else None
gender = response["gender"] if "gender" in response else None
image = response["profile_image_url"] if "profile_image_url" in response else None
return after_login(email=email, fullname=fullname, language=language, gender=gender, image=image)
```
#### File: apps/trade_partner/views.py
```python
from flask import Blueprint, render_template, g, request, abort
from dataviva.apps.general.views import get_locale
from dataviva.api.secex.services import TradePartner, TradePartnerMunicipalities, TradePartnerProducts
from dataviva.api.secex.models import Ymw, Ymbw
from dataviva.api.attrs.models import Wld, Bra
from sqlalchemy.sql.expression import func
from dataviva.translations.dictionary import dictionary
from dataviva import db
mod = Blueprint('trade_partner', __name__,
template_folder='templates',
url_prefix='/<lang_code>/trade_partner')
@mod.before_request
def before_request():
g.page_type = 'category'
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
def location_depth(bra_id):
locations = {
1: "region", # todo
3: "state",
5: "mesoregion",
7: "microregion",
9: "municipality"
}
return locations[len(bra_id)]
def handle_region_bra_id(bra_id):
return {
"1": "1",
"2": "2",
"3": "5",
"4": "3",
"5": "4"
}[bra_id]
def location_service(depth, location):
if depth == 'region':
return handle_region_bra_id(location.id)
if depth == 'mesoregion':
return str(location.id_ibge)[:2] + str(location.id_ibge)[-2:]
if depth == 'microregion':
return str(location.id_ibge)[:2] + str(location.id_ibge)[-3:]
else:
return location.id_ibge
@mod.route('/<wld_id>/graphs/<tab>', methods=['POST'])
def graphs(wld_id, tab):
trade_partner = Wld.query.filter_by(id=wld_id).first_or_404()
location = Bra.query.filter_by(id=request.args.get('bra_id')).first()
bra_id = request.args.get('bra_id')
if not bra_id:
depth = None
id_ibge = None
else:
depth = location_depth(bra_id)
id_ibge = location_service(depth, location)
return render_template('trade_partner/graphs-' + tab + '.html', trade_partner=trade_partner, location=location, graph=None, id_ibge=id_ibge)
@mod.route('/<wld_id>', defaults={'tab': 'general'})
@mod.route('/<wld_id>/<tab>')
def index(wld_id, tab):
bra_id = request.args.get('bra_id')
menu = request.args.get('menu')
url = request.args.get('url')
graph = {}
if menu:
graph['menu'] = menu
if url:
url_prefix = menu.split(
'-')[-1] + '/' if menu and menu.startswith('new-api-') else 'embed/'
graph['url'] = url_prefix + url
if wld_id == 'sabra':
abort(404)
trade_partner = Wld.query.filter_by(id=wld_id).first_or_404()
location = Bra.query.filter_by(id=bra_id).first()
max_year_query = db.session.query(
func.max(Ymw.year)).filter_by(wld_id=wld_id)
bra_id = bra_id if bra_id != 'all' else None
if not bra_id:
depth = None
id_ibge = None
else:
depth = location_depth(bra_id)
id_ibge = location_service(depth, location)
if bra_id:
trade_partner_service = TradePartner(wld_id, bra_id)
municipalities_service = TradePartnerMunicipalities(wld_id, bra_id)
products_service = TradePartnerProducts(wld_id, bra_id)
export_rank_query = Ymbw.query.join(Wld).filter(
Ymbw.wld_id_len == len(wld_id),
Ymbw.bra_id == bra_id,
Ymbw.month == 0,
Ymbw.year == max_year_query).order_by(Ymbw.export_val.desc())
import_rank_query = Ymbw.query.join(Wld).filter(
Ymbw.wld_id_len == len(wld_id),
Ymbw.bra_id == bra_id,
Ymbw.month == 0,
Ymbw.year == max_year_query).order_by(Ymbw.import_val.desc())
else:
trade_partner_service = TradePartner(wld_id, None)
municipalities_service = TradePartnerMunicipalities(wld_id, None)
products_service = TradePartnerProducts(wld_id, None)
export_rank_query = Ymw.query.join(Wld).filter(
Ymw.wld_id_len == len(wld_id),
Ymw.month == 0,
Ymw.year == max_year_query).order_by(Ymw.export_val.desc())
import_rank_query = Ymw.query.join(Wld).filter(
Ymw.wld_id_len == len(wld_id),
Ymw.month == 0,
Ymw.year == max_year_query).order_by(Ymw.import_val.desc())
export_rank = export_rank_query.all()
import_rank = import_rank_query.all()
if not bra_id:
header = {
'name': trade_partner_service.country_name(),
'year': trade_partner_service.year(),
'total_exported': trade_partner_service.total_exported(),
'total_imported': trade_partner_service.total_imported(),
'wld_id': wld_id,
'bra_id': bra_id
}
else:
header = {
'name': trade_partner_service.country_name(),
'year': trade_partner_service.year(),
'total_exported': trade_partner_service.total_exported(),
'total_imported': trade_partner_service.total_imported(),
'wld_id': wld_id,
'bra_id': bra_id,
'location_name': trade_partner_service.location_name(),
'location_type': dictionary()['bra_' + str(len(bra_id))]
}
body = {
'highest_export_value': municipalities_service.highest_export_value(),
'product_with_more_exports': products_service.product_with_more_exports(),
'product_with_highest_export_value': products_service.highest_export_value(),
}
tabs = {
'general': [],
'international-trade': [
'trade-balance-partner-line',
'new-api-trade-balance-partner-line',
'exports-municipality-tree_map',
'new-api-exports-municipality-tree_map',
'exports-municipality-stacked',
'new-api-exports-municipality-stacked',
'new-api-expots-port-line',
'exports-destination-tree_map',
'new-api-exports-destination-tree_map',
'exports-destination-stacked',
'new-api-exports-destination-stacked',
'exports-destination-geo_map',
'imports-municipality-tree_map',
'new-api-imports-municipality-tree_map',
'imports-municipality-stacked',
'new-api-imports-municipality-stacked',
'new-api-imports-port-line',
'imports-origin-tree_map',
'new-api-imports-origin-tree_map',
'imports-origin-stacked',
'new-api-imports-origin-stacked',
'imports-origin-geo_map',
],
}
for index, trade_partner_ranking in enumerate(export_rank):
if export_rank[index].wld_id == wld_id:
header['export_rank'] = index + 1
break
for index, trade_partner_ranking in enumerate(import_rank):
if import_rank[index].wld_id == wld_id:
header['import_rank'] = index + 1
break
if body['highest_export_value'] is None and body['highest_import_value'] is None:
abort(404)
if tab not in tabs:
abort(404)
if menu and menu not in tabs[tab]:
abort(404)
return render_template('trade_partner/index.html',
body_class='perfil-estado',
header=header,
body=body,
trade_partner=trade_partner,
location=location,
tab=tab,
graph=graph,
id_ibge=id_ibge)
```
#### File: apps/university/views.py
```python
from flask import Blueprint, render_template, g, abort, request
from dataviva.apps.general.views import get_locale
from dataviva.api.hedu.services import University, UniversityMajors
from dataviva.api.attrs.models import University as UniversityModel
from dataviva.api.hedu.models import Yu
from dataviva import db
from sqlalchemy.sql.expression import func
mod = Blueprint('university', __name__,
template_folder='templates',
url_prefix='/<lang_code>/university',
static_folder='static')
@mod.before_request
def before_request():
g.page_type = 'category'
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
@mod.route('/<university_id>/graphs/<tab>', methods=['POST'])
def graphs(university_id, tab):
university = UniversityModel.query.filter_by(id=university_id).first_or_404()
return render_template('university/graphs-' + tab + '.html', university=university, graph=None)
@mod.route('/<university_id>', defaults={'tab': 'general'})
@mod.route('/<university_id>/<tab>')
def index(university_id, tab):
university = UniversityModel.query.filter_by(id=university_id).first_or_404()
university_service = University(university.id)
majors_service = UniversityMajors(university.id)
menu = request.args.get('menu')
url = request.args.get('url')
graph = {}
if menu:
graph['menu'] = menu
if url:
url_prefix = menu.split('-')[-1] + '/' if menu and menu.startswith('new-api-') else 'embed/'
graph['url'] = url_prefix + url
header = {
'type': university_service.university_type(),
'enrolled': university_service.enrolled(),
'graduates': university_service.graduates()
}
body = {
'major_with_more_enrollments': majors_service.major_with_more_enrollments(),
'highest_enrollment_number_by_major': majors_service.highest_enrolled_number(),
'year': '2016',
}
tabs = {
'general': [],
'enrollments': [
'category-major-tree_map',
'category-major-stacked',
'category-status-line',
'new-api-category-status-line',
'category-shift-stacked',
],
}
hedu_max_year = '2016'
if tab not in tabs:
abort(404)
if menu and menu not in tabs[tab]:
abort(404)
if header['enrolled'] is None:
abort(404)
else:
return render_template('university/index.html', university=university, header=header, body=body, tab=tab, graph=graph)
```
#### File: dataviva/translations/translate.py
```python
from dictionary import dictionary
''' Translates the columns names
'''
def translate(column):
d = dictionary()
return d.get(column, column)
```
#### File: dataviva/utils/cached_query.py
```python
from flask import abort, current_app, make_response, Flask, jsonify, request, \
Response, session, g, get_flashed_messages
from dataviva import view_cache
''' Get/Sets a given ID in the cache. If data is not supplied,
used as getter'''
def cached_query(id, data=None, timeout=None):
if data is None:
return view_cache.get(id)
return view_cache.set(id, data, timeout=timeout)
def make_cache_key(*args, **kwargs):
path = request.path
lang = g.locale
cache_key = (path + lang).encode('utf-8')
if get_flashed_messages():
msgs = "|".join([msg[0] for msg in get_flashed_messages(with_categories=True)])
cache_key += "/"+msgs
return cache_key
def api_cache_key(namespace, *args, **kwargs):
def gen_key(**kwargs):
path = request.path
lang = g.locale
reqstr = ""
if request.args:
for k,v in request.args.items():
reqstr += "&{}={}".format(str(k), str(v))
key = namespace + ":" + path + lang + reqstr
cache_key = key.encode('utf-8')
if get_flashed_messages():
msgs = "|".join([msg[0] for msg in get_flashed_messages(with_categories=True)])
cache_key += "/"+msgs
return cache_key
return gen_key
```
#### File: scripts/data_download/common.py
```python
import os
from sqlalchemy import create_engine
from datetime import datetime
import pandas as pd
import bz2
def get_env_variable(var_name, default=-1):
try:
return os.environ[var_name]
except KeyError:
if default != -1:
return default
error_msg = "Set the %s os.environment variable" % var_name
raise Exception(error_msg)
data_base_conection = "mysql://{0}:{1}@{2}/{3}".format(
get_env_variable("DATAVIVA_DB_USER", "root"),
get_env_variable("DATAVIVA_DB_PW", ""),
get_env_variable("DATAVIVA_DB_HOST", "localhost"),
get_env_variable("DATAVIVA_DB_NAME", "dataviva"))
engine = create_engine( data_base_conection , echo=False)
def get_colums(table, columns_deleted):
columns = []
column_rows = engine.execute(
"SELECT COLUMN_NAME FROM information_schema.columns WHERE TABLE_NAME='"+table+"' AND COLUMN_NAME NOT LIKE %s", "%_len")
columns_all=[row[0] for row in column_rows]
for column in columns_all:
if column not in columns_deleted:
columns.append(column)
return columns
def test_imput(sys, logging, Condition):
if len(sys.argv) != 4 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! use :\npython scripts/data_download/school_census/create_files.py en/pt output_path year"
exit()
inputs = {}
inputs['output_path'] = os.path.abspath(sys.argv[2])
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[2].split('/')[2]) + '-data-download.log' )),level=logging.DEBUG)
inputs['year'] = Condition('year='+str(sys.argv[3]), '-'+str(sys.argv[3]))
inputs['lang'] = sys.argv[1]
return inputs
def download(table_columns, table, conditions, name_file, new_file_path, logging, sys):
query = 'SELECT '+','.join(table_columns[table])+' FROM '+table+' WHERE '+' and '.join(conditions)
logging.info('Query for file ('+str(datetime.now().hour)+':'+str(datetime.now().minute)+':'+str(datetime.now().second)+'): \n '+name_file+'\n'+query)
print "Gerando ... " + new_file_path
f = pd.read_sql_query(query, engine)
f.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep=",", index=False, float_format="%.3f", encoding='utf-8')
logging.info("\nError:\n"+str(sys.stderr)+"\n-----------------------------------------------\n")
```
#### File: data_download/utils/rais.py
```python
from collections import namedtuple
from common import engine, get_colums
from dictionary import en, pt
from datetime import datetime
import pandas as pd
import os
import bz2
import sys
import logging
if len(sys.argv) != 5 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! use :\npython scripts/data_download/rais_create_files.py en/pt output_path year 1/2/3/4"
print " 1 = rais-2003-microregions-classes-families\n 2 = rais-2013-municipalities-classes\n 3 = rais-2013-municipalities-classes-main_groups\n 4 = rais-2013-municipalities-classes-families\n"
exit()
def save(years, lang, output_path, select, year):
conditions = [' 1 = 1', ' 1 = 1', ' 1 = 1', ' 1 = 1'] # 4 condicoes
table_columns = {}
columns_deleted=['num_emp', 'hist', 'Gini', 'bra_id_len', 'cbo_id_len', 'cnae_id_len']
if lang == 'en':
dic_lang = en
else:
dic_lang = pt
if select != 2:
table = 'rais_ybio'
else:
table = 'rais_ybi'
if table not in table_columns.keys():
table_columns[table] = [ i+" as '"+dic_lang[i]+"'" for i in get_colums(table, columns_deleted)]
select_1 = ["SELECT " + ",".join(table_columns[table])+" FROM rais_ybio WHERE year=" + year + " and bra_id_len=7 and cnae_id_len=6 and cbo_id_len=4 ", "rais-" + year + "-microregions-classes-families" ]
select_2 = ["SELECT " + ",".join(table_columns[table])+" FROM rais_ybi WHERE year=" + year + " and bra_id_len=9 and cnae_id_len=6 and 1 = 1", "rais-" + year + "-municipalities-classes" ]
select_3 = ["SELECT " + ",".join(table_columns[table])+" FROM rais_ybio WHERE year=" + year + " and bra_id_len=9 and cnae_id_len=6 and cbo_id_len=1", "rais-" + year + "-municipalities-classes-main_groups" ]
select_4 = ["SELECT " + ",".join(table_columns[table])+" FROM rais_ybio WHERE year=" + year + " and bra_id_len=9 and cnae_id_len=6 and cbo_id_len=4", "rais-" + year + "-municipalities-classes-families" ]
query=[]
if select == str(1):
query = select_1
if select == str(2):
query = select_2
if select == str(3):
query = select_3
if select == str(4):
query = select_4
name_file = query[1]
print "Gerando ... : "+name_file
logging.info('Query for file ('+str(datetime.now().hour)+':'+str(datetime.now().minute)+':'+str(datetime.now().second)+'): \n '+name_file+'\n'+query[0])
f = pd.read_sql_query(query[0], engine)
new_file_path = os.path.join(output_path, name_file+".csv.bz2") #pega desda da rais do pc
f.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep=",", index=False, float_format="%.3f", encoding='utf-8')
logging.info("\nErro:\n"+str(sys.stderr)+"\n-----------------------------------------------\n")
Condition = namedtuple('Condition', ['condition', 'name'])
output_path = os.path.abspath(sys.argv[2])
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],'rais-data-download.log' )),level=logging.DEBUG)
years = [ Condition('year='+str(sys.argv[3]), '-'+str(sys.argv[3])) ]
save(years=years, lang=sys.argv[1], output_path=output_path, select=sys.argv[4], year=str(sys.argv[3]))
''' querys rais faltantes
select_1 = "rais-2003-microregions-classes-families
SELECT year as 'Year',bra_id as 'BRA ID',cnae_id as 'CNAE ID',cbo_id as 'CBO ID',wage as 'Total Monthly Wages',num_jobs as 'Total Jobs',num_est as 'Total Establishments',wage_avg as 'Average Monthly Wage',age_avg as 'Average age',required as 'Estimated Employees',wage_growth as 'Nominal Wage Growth (1 year)',wage_growth_5 as 'Nominal Wage Growth (5 year)',num_emp_growth as 'Nominal Employee Growth (1 year)',num_emp_growth_5 as 'Nominal Employee Growth (5 year)'
FROM rais_ybio WHERE year=2013 and bra_id_len=7 and cnae_id_len=6 and cbo_id_len=4"
select_2 = "rais-2013-municipalities-classes
SELECT year as 'Year',bra_id as 'BRA ID',cnae_id as 'CNAE ID',wage as 'Total Monthly Wages',num_jobs as 'Total Jobs',num_est as 'Total Establishments',wage_avg as 'Average Monthly Wage',age_avg as 'Average age',rca as 'Domestic RCA',distance as 'Distance',opp_gain as 'Opportunity Gain',wage_growth as 'Nominal Wage Growth (1 year)',wage_growth_5 as 'Nominal Wage Growth (5 year)',num_emp_growth as 'Nominal Employee Growth (1 year)',num_emp_growth_5 as 'Nominal Employee Growth (5 year)'
FROM rais_ybi WHERE year=2013 and bra_id_len=9 and cnae_id_len=6 and 1 = 1"
select_3 = "rais-2013-municipalities-classes-main_groups
SELECT year as 'Year',bra_id as 'BRA ID',cnae_id as 'CNAE ID',cbo_id as 'CBO ID',wage as 'Total Monthly Wages',num_jobs as 'Total Jobs',num_est as 'Total Establishments',wage_avg as 'Average Monthly Wage',age_avg as 'Average age',required as 'Estimated Employees',wage_growth as 'Nominal Wage Growth (1 year)',wage_growth_5 as 'Nominal Wage Growth (5 year)',num_emp_growth as 'Nominal Employee Growth (1 year)',num_emp_growth_5 as 'Nominal Employee Growth (5 year)'
FROM rais_ybio WHERE year=2013 and bra_id_len=9 and cnae_id_len=6 and cbo_id_len=1"
select_4 = "rais-2013-municipalities-classes-families
SELECT year as 'Year',bra_id as 'BRA ID',cnae_id as 'CNAE ID',cbo_id as 'CBO ID',wage as 'Total Monthly Wages',num_jobs as 'Total Jobs',num_est as 'Total Establishments',wage_avg as 'Average Monthly Wage',age_avg as 'Average age',required as 'Estimated Employees',wage_growth as 'Nominal Wage Growth (1 year)',wage_growth_5 as 'Nominal Wage Growth (5 year)',num_emp_growth as 'Nominal Employee Growth (1 year)',num_emp_growth_5 as 'Nominal Employee Growth (5 year)'
FROM rais_ybio WHERE year=2013 and bra_id_len=9 and cnae_id_len=6 and cbo_id_len=4"
#
'''
```
#### File: dataviva-site/scripts/rebuild_search_indices.py
```python
import datetime
import whoosh
import flask_whooshalchemy
import os
import sys
from shutil import rmtree
program_start = datetime.datetime.utcnow()
basedir = os.path.abspath(__file__).split('/scripts/')[0]
if basedir not in sys.path:
sys.path.insert(0, basedir)
from dataviva import app
from config import WHOOSH_BASE
from dataviva.apps.blog.models import Post
from dataviva.apps.news.models import Publication
def log(message):
logtime = datetime.datetime.utcnow()
logdiff = logtime - program_start
print("{0} (+{1:.3f}): {2}".format(logtime.strftime("%Y-%m-%d %H:%M:%S"),
logdiff.total_seconds(),
message))
def rebuild_index(model):
log("Rebuilding {0} index...".format(model.__name__))
primary_field = model.pure_whoosh.primary_key_name
searchables = model.__searchable__
index_writer = flask_whooshalchemy.whoosh_index(app, model)
entries = model.query.all()
entry_count = 0
with index_writer.writer() as writer:
for entry in entries:
index_attrs = {}
for field in searchables:
index_attrs[field] = unicode(getattr(entry, field))
index_attrs[primary_field] = unicode(getattr(entry, primary_field))
writer.update_document(**index_attrs)
entry_count += 1
log("Rebuilt {0} {1} search index entries.".format(
str(entry_count), model.__name__))
if __name__ == "__main__":
if os.path.exists(WHOOSH_BASE):
rmtree(WHOOSH_BASE)
model_list = [Post, Publication]
for model in model_list:
rebuild_index(model)
```
#### File: tests/application_tests/test_general_module.py
```python
from flask import url_for
from test_base import BaseTestCase
class GeneralModuleTests(BaseTestCase):
def test_should_redirect_when_access_root(self):
assert '302 FOUND' == self.client.get('/').status
def test_should_redirect_to_home_screen(self):
response = self.client.get('/')
self.assertRedirects(response, url_for('general.home'))
def test_english_home_screen_is_up_and_running(self):
response = self.client.get('/en/')
self.assert_200(response)
def test_portuguese_home_screen_is_up_and_running(self):
response = self.client.get('/pt/')
self.assert_200(response)
``` |
{
"source": "joelvitorniino/Google_search_API",
"score": 3
} |
#### File: joelvitorniino/Google_search_API/gvsearch.py
```python
from flask import Flask, request, jsonify
from googlesearch import search
import os
app = Flask(__name__)
@app.route("/search_all", methods=["GET"])
def get_all():
payload = request.args.get("q")
if payload == None:
return jsonify({"sucess": False, "error": "Q nao fornecido"})
lang = request.args.get("lang") if request.args.get("q") else "pt"
results = [c for c in search(f"{payload}", stop=30, lang=lang)]
return jsonify({"sucess": True, "links": results})
@app.route("/search_video", methods=["GET"])
def get_videos():
payload = request.args.get("q")
if payload == None:
return jsonify({"sucess": False, "error": "Q não fornecido"})
lang = request.args.get("lang") if request.args.get("q") else "pt"
results = [
c for c in search(f"'{payload}' youtube", stop=30, lang=lang) if "watch?v=" in c
]
return jsonify({"sucess": True, "links": results})
if os.environ.get("ENV") == "development":
if __name__ == "__main__":
app.run(host="0.0.0.0")
``` |
{
"source": "joelwanner/smtax",
"score": 2
} |
#### File: src/actions/benchmark.py
```python
import os
import time
import interface.log as log
from actions.check import NetworkChecker
from generators.crafted import AmplificationNetwork, CoremeltNetwork
# TODO: Reduce redundancy
DEFAULT_N_FLOWS = 6
class Benchmark(object):
def __init__(self, output_path, ac_cls, n_runs):
self.ac_cls = ac_cls
self.n_runs = n_runs
self.out_path = output_path
self.logfile = output_path
# TODO: Improve output format
def run_files(self, directory):
with self.create_logfile() as logfile:
logfile.write("Runtimes\n-------------------\n")
# Run all benchmarks in directory
files = os.listdir(directory)
n = len(files)
runtimes = []
for i, filename in enumerate(files):
if filename.endswith(".txt"):
print()
log.print_header("BENCHMARK %d/%d" % (i + 1, n), filename)
runs = []
for k in range(self.n_runs):
checker = NetworkChecker.from_file(os.path.join(directory, filename), self.ac_cls,
DEFAULT_N_FLOWS, render=False, verbose=False)
start_time = time.time()
checker.check_attack(out_path=None)
runs.append(time.time() - start_time)
runtimes.extend(runs)
runtime_str = ", ".join(["%.3f" % r for r in runtimes])
logfile.write("[%s]\n" % runtime_str)
def run_examples(self, sizes):
with self.create_logfile() as logfile:
log.print_header("Server Amplification Attacks")
logfile.write("Server Amplification Attack\n")
self.__run_example(AmplificationNetwork, sizes, [2 * n for n in sizes], logfile)
log.print_header("Coremelt Attacks")
logfile.write("\nCoremelt Attack\n")
self.__run_example(CoremeltNetwork, sizes, [2 * n for n in sizes], logfile)
def __run_example(self, attack_cls, sizes, n_flows, logfile):
x = []
runtimes = []
for size, n in zip(sizes, n_flows):
runs = []
n_hosts = None
for k in range(self.n_runs):
print("Run %d/%d of network %s(%d)" % (k + 1, self.n_runs, attack_cls, size))
attack = attack_cls(size)
checker = self.ac_cls.from_network(attack, n)
nc = NetworkChecker(checker)
start_time = time.time()
nc.check_attack(out_path=None)
runs.append(time.time() - start_time)
if not n_hosts:
n_hosts = len(attack.topology.hosts)
runtime_str = ", ".join(["%.3f" % r for r in runs])
x.append(n_hosts)
runtimes.append("[%s]" % runtime_str)
x_str = ", ".join(["%d" % n for n in x])
y_str = ", ".join(runtimes)
logfile.write("x = [%s]\ny = [%s]\n" % (x_str, y_str))
def create_logfile(self):
if os.path.isdir(self.out_path):
i = 1
file = None
while not file or os.path.exists(file):
file = os.path.join(self.out_path, "benchmark%d.txt" % i)
i += 1
self.logfile = file
return open(file, 'w')
else:
return open(self.out_path, 'w')
```
#### File: src/actions/generate.py
```python
from generators.random import *
from generators.crafted import *
class Generator(object):
def __init__(self, path):
self.path = path
def __generate(self, cls, name, ids, params):
for i, p in enumerate(zip(*params)):
topology = cls(*p)
path = os.path.join(self.path, "%s%d.txt" % (name, ids[i]))
with open(path, "w") as file:
file.write(topology.__str__())
print("Generated %s" % path)
def generate_random(self, n_networks, n_hosts, connectivity):
self.__generate(RandomTopology, "random", range(n_networks), [[n_hosts]*n_networks, [connectivity]*n_networks])
def generate_crafted(self, sizes):
self.__generate(AmplificationNetwork, "amplification", sizes, [sizes])
self.__generate(CoremeltNetwork, "coremelt", sizes, [sizes])
```
#### File: src/generators/random.py
```python
import subprocess
import os
from shutil import copyfile
from copy import copy
import string
import random
from network.topology import *
BRITE_DIRECTORY = "brite/"
CONF_FILE = "tmp.conf"
CONF_DEFAULT = "default.conf"
TMP_FILE = "tmp"
SEED_FILE = "seed"
SEED_DEFAULT = "seed.default"
class RandomTopology(Topology):
def __init__(self, n, connectivity):
with open(os.path.join(BRITE_DIRECTORY, CONF_DEFAULT), 'r') as f:
config = f.read()
n_routers = n * 3 // 5
self.n_clients = n - n_routers
# Configure BRITE
# ------------------
config = config.replace("<N_ROUTERS>", str(n_routers))
config = config.replace("<CONNECTIVITY>", str(connectivity))
tmp_conf = os.path.join(BRITE_DIRECTORY, CONF_FILE)
with open(tmp_conf, 'w') as f:
f.write(config)
seed_path = os.path.join(BRITE_DIRECTORY, SEED_FILE)
if not os.path.exists(seed_path):
copyfile(os.path.join(BRITE_DIRECTORY, SEED_DEFAULT), seed_path)
if 'BRITE' in os.environ:
brite_bin = os.environ['BRITE']
else:
brite_bin = "brite"
args = [brite_bin, CONF_FILE, TMP_FILE, SEED_FILE]
dev_null = open("/dev/null", 'w')
print("> %s" % " ".join(args))
try:
# Run BRITE
# ------------------
subprocess.check_call(args, env=os.environ, cwd=BRITE_DIRECTORY, stdout=dev_null)
dev_null.close()
output = os.path.join(BRITE_DIRECTORY, TMP_FILE)
brite_file = "%s.brite" % output
hosts, links = self.parse_brite_file(brite_file)
os.remove(brite_file)
super().__init__(hosts, links)
except FileNotFoundError:
print("BRITE binary was not found. Add it to the path or set the BRITE environment variable")
super().__init__([], [])
except subprocess.CalledProcessError:
print("BRITE error")
super().__init__([], [])
finally:
os.remove(tmp_conf)
def parse_brite_file(self, path):
with open(path, 'r') as f:
s = f.read()
paragraphs = s.split('\n\n')
node_str = paragraphs[1]
edge_str = paragraphs[2]
node_id_map = {}
routers = []
links = []
autonomous_systems = {}
as_sizes = {}
# Parse BRITE nodes
for line in node_str.split('\n')[1:]:
attrs = line.split(' ')
i = int(attrs[0])
as_id = int(attrs[5])
if as_id not in as_sizes:
as_sizes[as_id] = 0
autonomous_systems[as_id] = []
internal_address = as_sizes[as_id]
as_sizes[as_id] += 1
name = "%s%d" % (string.ascii_uppercase[as_id], internal_address + 1)
r = random.randint(30, 100)
s = random.randint(10, 100)
if random.randint(0, 2) == 0:
a = random.randint(5, 40)
h = Server(name, r, s, a)
else:
h = Router(name, r, s)
routers.append(h)
node_id_map[i] = h
autonomous_systems[as_id].append(h)
hosts = copy(routers)
# Parse BRITE links
for line in edge_str.split('\n')[1:]:
if line:
attrs = line.split(' ')
src_id = int(attrs[1])
dest_id = int(attrs[2])
src_as = int(attrs[6])
dest_as = int(attrs[7])
capacity = int(float(attrs[5]))
src = node_id_map[src_id]
dest = node_id_map[dest_id]
# Inter-AS edges are assigned larger capacities
if src_as != dest_as:
capacity = random.randint(70, 200)
l = Link(src, dest, capacity)
links.append(l)
# Create clients
for i in range(self.n_clients):
client = Host(str(i + 1), random.randint(5, 40), random.randint(1, 10))
router = random.choice(routers)
hosts.append(client)
l = Link(client, router, random.randint(10, 40))
links.append(l)
return hosts, links
```
#### File: src/interface/log.py
```python
__SEPARATOR = "=========================================================="
__SUBSEP = "----------------------------------------------------------"
def __print_header(title, subtitle, separator):
print(separator)
print(title)
if subtitle:
print(subtitle)
print(separator)
def print_header(title, subtitle=None):
__print_header(title, subtitle, __SEPARATOR)
def print_subheader(title, subtitle=None):
__print_header(title, subtitle, __SUBSEP)
def print_sep():
print(__SEPARATOR)
def print_subsep():
print(__SUBSEP)
```
#### File: src/interface/parse.py
```python
import re
from network.topology import *
def parse_network(s):
hosts = []
links = []
# TODO: add exception handling and descriptive error messages
host_search = re.search(r"hosts\s*{([^}]*)}", s)
if host_search:
host_block = host_search.groups()[0]
host_strs = re.findall(r"\s*([^\s]+\([^)]*\))", host_block)
for host_str in host_strs:
name = re.search(r"(.*)\(", host_str).groups()[0]
attr_search = re.search(r"\((.*)\)", host_str)
if not attr_search:
raise SyntaxError("Missing attributes for host " + host_str)
attrs = [s.strip() for s in attr_search.groups()[0].split(',')]
rc = int(attrs[0])
sc = int(attrs[1])
if len(attrs) > 2:
amp = int(attrs[2])
else:
amp = 1
if name[0] == '_':
if len(attrs) == 3:
h = Server(name[1:], rc, sc, amp)
else:
h = Router(name[1:], rc, sc)
else:
h = Host(name, rc, sc, amp)
hosts.append(h)
link_search = re.search(r"links\s*{([^}]*)}", s)
if link_search:
link_block = link_search.groups()[0]
link_strs = re.findall(r"\s*([^\s]+\s*--\s*[^\s]+:\s*\d+)", link_block)
for link_str in link_strs:
endpoints = re.search(r"([^\s]+)--([^\s]+)\s*:", link_str).groups()
c = int(re.search(r".*:\s*(\d+)", link_str).groups()[0])
h1 = None
h2 = None
for h in hosts:
if h.name == endpoints[0]:
h1 = h
elif h.name == endpoints[1]:
h2 = h
if h1 is None:
raise SyntaxError("Unknown host name '%s' specified in link '%s'" % (endpoints[0], link_str))
if h2 is None:
raise SyntaxError("Unknown host name '%s' specified in link '%s'" % (endpoints[1], link_str))
links.append(Link(h1, h2, c))
return Topology(hosts, links)
def parse_attack(ac_cls, s, n_flows):
network = Topology.from_string(s)
victims = None
attackers = None
flow_search = re.search(r"flows\s*:\s*(\d+)", s)
if flow_search:
n_flows = int(flow_search.groups()[0])
victim_search = re.search(r"victims\s*:\s*\[([^\s]*)\]", s)
if victim_search:
victims_str = victim_search.groups()[0]
names = [x.strip() for x in victims_str.split(',')] # may be hosts or links
victims = [h for h in network.hosts if h.name in names] +\
[l for l in network.links if l.__repr__() in names]
attackers_search = re.search(r"attackers\s*:\s*\[([^\s]*)\]", s)
if attackers_search:
attackers_str = attackers_search.groups()[0]
names = [x.strip() for x in attackers_str.split(',')]
attackers = [h for h in network.hosts if h.name in names]
return ac_cls(network, n_flows, victims, attackers)
```
#### File: src/interface/render.py
```python
import pydot
from network.topology import *
class NetworkRenderer(object):
accent_color = "#cc5555"
host_color = "#f0f0f0"
attacker_color = "#ffe6e6"
server_color = "#bfbfbf"
link_color = "#666666"
light_color = "#bbbbbb"
font_name = "Helvetica"
label_size = 10
node_fontsize = 8
def __init__(self, network):
self.network = network
self.graph = self.create_graph()
def write_dot(self, output):
with open(output + ".dot", "w") as f:
f.write(self.graph.to_string())
def render(self, output):
self.graph.write_pdf(output + ".pdf")
def __create_link_flow(self, h1, h2, f):
e = pydot.Edge(h1, h2)
e.set_fontname(self.font_name)
e.set_fontsize(self.label_size)
if f % 1 == 0: # integer flow
e.set_label(str(f))
else:
e.set_label("%.2f" % f)
e.set_fontcolor(self.accent_color)
e.set_color(self.accent_color)
return e
def create_graph(self):
g = pydot.Dot(graph_type='digraph')
node_map = {}
for h in self.network.topology.hosts:
label = "<<B>%s</B><br/>%d %d<br/>%d>" % (h.name, h.receiving_cap, h.sending_cap, h.amp_factor)
n = pydot.Node(h.name, label=label, style='filled', margin=-0.8, width=0.5, height=0.5,
fontname=self.font_name, fontsize=self.node_fontsize)
if type(h) is Server:
if self.network.victims and h in self.network.victims:
n.set_shape('doublecircle')
else:
n.set_shape('Mcircle')
n.set_fillcolor(self.server_color)
elif type(h) is Router:
if self.network.victims and h in self.network.victims:
n.set_shape('doubleoctagon')
else:
n.set_shape('octagon')
n.set_fillcolor(self.server_color)
else:
if self.network.victims and h in self.network.victims:
n.set_shape('doublecircle')
else:
n.set_shape('circle')
if self.network.attackers and h in self.network.attackers:
n.set_fillcolor(self.attacker_color)
else:
n.set_fillcolor(self.host_color)
g.add_node(n)
node_map[h] = n
for l in self.network.topology.links:
v1 = node_map[l.h1]
v2 = node_map[l.h2]
e = pydot.Edge(v1, v2, dir='none', label=str(l.capacity), color=self.link_color, fontcolor=self.link_color,
fontname=self.font_name, fontsize=self.label_size)
g.add_edge(e)
if self.network.flows:
f1 = sum([f.get(l.h1, l.h2) for f in self.network.flows])
f2 = sum([f.get(l.h2, l.h1) for f in self.network.flows])
if f1 > 0:
g.add_edge(self.__create_link_flow(v1, v2, f1))
if f2 > 0:
g.add_edge(self.__create_link_flow(v2, v1, f2))
return g
```
#### File: src/network/topology.py
```python
from network.route import *
class Host(object):
def __init__(self, name, r, s, a=1):
self.name = name
self.receiving_cap = r
self.sending_cap = s
self.amp_factor = a
self.links = []
def add_link(self, l):
self.links.append(l)
def __str__(self):
if self.amp_factor == 1:
return "%s(%d,%d)" % (self.name, self.receiving_cap, self.sending_cap)
else:
return "%s(%d,%d,%d)" % (self.name, self.receiving_cap, self.sending_cap, self.amp_factor)
def __repr__(self):
return self.name
class Server(Host):
def __init__(self, name, r, s, a):
super().__init__(name, r, s, a)
def __str__(self):
return "_" + super().__str__()
class Router(Server):
def __init__(self, name, r, s):
super().__init__(name, r, s, 1)
class Link(object):
def __init__(self, h1, h2, c):
self.h1 = h1
self.h2 = h2
self.capacity = c
def neighbor(self, h):
if h == self.h1:
return self.h2
elif h == self.h2:
return self.h1
else:
return None
def __repr__(self):
return "%s--%s" % (self.h1.name, self.h2.name)
def __str__(self):
return "%s:%d" % (self.__repr__(), self.capacity)
class Topology(object):
def __init__(self, hosts, links):
self.hosts = hosts
self.links = links
self.__routes = None
for l in links:
l.h1.add_link(l)
l.h2.add_link(l)
def get_routes(self):
if not self.__routes:
self.__routes = RoutingTable(self)
return self.__routes
def __str__(self):
host_str = ",\n\t".join([str(h) for h in self.hosts])
link_str = ",\n\t".join([str(l) for l in self.links])
return "hosts {\n\t%s\n}\nlinks {\n\t%s\n}" % (host_str, link_str)
@classmethod
def from_string(cls, s):
return parser.parse_network(s)
# TODO: remove workaround for circular dependencies
import interface.parse as parser
``` |
{
"source": "joelwembo/expert-python-dts-algorithms",
"score": 3
} |
#### File: Coding-Context/21-03-2022/jP-morga-int.py
```python
class Solution:
def solve(self, S, T):
INF = float("inf")
N = len(S)
dp = [INF] * N
for i in range(N):
if S[i] == T[0]:
dp[i] = 1
for j in range(1, len(T)):
last = {}
dp2 = [INF] * N
for i in range(N):
if S[i] == T[j]:
prev_i = last.get(T[j − 1], None)
if prev_i is not None:
dp2[i] = dp[prev_i] + (i − prev_i)
last[S[i]] = i
dp = dp2
m = min(dp)
i = dp.index(m)
if m == INF:
return ""
return S[i − dp[i] + 1 : i + 1]
ob = Solution()
print(ob.solve("abcbfbghfb","fg"))
```
#### File: Coding-Context/23-03-2022/circle.py
```python
from math import pi
def circle_area(r):
if r < 0:
raise ValueError("The radius cannot be negative")
return pi*(r**2)
print(circle_area(10))
```
#### File: Exercices/chapter_02/p02_return_kth_to_last.py
```python
from chapter_02.linked_list import LinkedList
def kth_to_last(ll, k):
runner = current = ll.head
for _ in range(k):
if not runner:
return None
runner = runner.next
while runner:
current = current.next
runner = runner.next
return current
test_cases = (
# list, k, expected
((10, 20, 30, 40, 50), 1, 50),
((10, 20, 30, 40, 50), 5, 10),
)
def test_kth_to_last():
for linked_list_values, k, expected in test_cases:
ll = LinkedList(linked_list_values)
assert kth_to_last(ll, k).value == expected
if __name__ == "__main__":
test_kth_to_last()
```
#### File: Exercices/chapter_03/p05_sort_stack.py
```python
import unittest
from chapter_03.stack import Stack
class SortedStack(Stack):
def __init__(self):
super().__init__()
self.temp_stack = Stack()
def push(self, item):
if self.is_empty() or item < self.peek():
super().push(item)
else:
while self.peek() is not None and item > self.peek():
self.temp_stack.push(self.pop())
super().push(item)
while not self.temp_stack.is_empty():
super().push(self.temp_stack.pop())
class Tests(unittest.TestCase):
def test_push_one(self):
queue = SortedStack()
queue.push(1)
assert len(queue) == 1
def test_push_two(self):
queue = SortedStack()
queue.push(1)
queue.push(2)
assert len(queue) == 2
def test_push_three(self):
queue = SortedStack()
queue.push(1)
queue.push(2)
queue.push(3)
assert len(queue) == 3
def test_pop_one(self):
queue = SortedStack()
queue.push(1)
assert queue.pop() == 1
def test_pop_two(self):
queue = SortedStack()
queue.push(1)
queue.push(2)
assert queue.pop() == 1
assert queue.pop() == 2
def test_pop_three(self):
queue = SortedStack()
queue.push(1)
queue.push(2)
queue.push(3)
assert queue.pop() == 1
assert queue.pop() == 2
assert queue.pop() == 3
def test_push_mixed(self):
queue = SortedStack()
queue.push(3)
queue.push(2)
queue.push(1)
queue.push(4)
assert queue.pop() == 1
assert queue.pop() == 2
assert queue.pop() == 3
assert queue.pop() == 4
```
#### File: Exercices/chapter_03/stack.py
```python
class Stack:
def __init__(self):
self.items = []
def is_empty(self):
return len(self.items) == 0
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
if self.items:
return self.items[-1]
return None
def __len__(self):
# in python the `len` function is preferred to `size` methods
return len(self.items)
def __bool__(self):
# lets us use the stack as a conditional
return bool(self.items)
```
#### File: Exercices/chapter_04/binary_tree.py
```python
class Node:
def __init__(self, key):
self.key = key
self.parent = None
self.left = None
self.right = None
class BinaryTree:
NodeCls = Node
def __init__(self):
self.root = None
def insert(self, key, parent):
new = self.NodeCls(key)
if parent is None:
if self.root is None:
self.root = new
return new
raise Exception("a root already exists")
if not parent.left:
parent.left = new
new.parent = parent
elif not parent.right:
parent.right = new
new.parent = parent
else:
raise Exception("a node cannot have more than two children")
return new
def example():
t = BinaryTree()
n1 = t.insert(1, None)
n2 = t.insert(2, n1)
n3 = t.insert(3, n1)
n4 = t.insert(4, n2)
t.insert(5, n2)
t.insert(7, n3)
t.insert(8, n4)
print(t.root.left.left.left.key)
if __name__ == "__main__":
example()
```
#### File: Exercices/chapter_04/p02_minimal_tree.py
```python
class Node:
def __init__(self, item):
self.right = None
self.left = None
self.val = item
def disp(self, nesting=0):
indent = " " * nesting * 2
output = f"{self.val}\n"
if self.left is not None:
output += f"{indent}L:"
output += self.left.disp(nesting + 1)
if self.right is not None:
output += f"{indent}R:"
output += self.right.disp(nesting + 1)
return output
def __str__(self):
return self.disp()
def array_to_binary_tree(array, start, end):
if start > end:
return None
mid = (
start + end
) // 2 # This must be floor division, otherwise you get a slice error
# TypeError: list indices must be integers or slices, not float
root = Node(array[mid])
root.left = array_to_binary_tree(array, start, mid - 1)
root.right = array_to_binary_tree(array, mid + 1, end)
return root
if __name__ == "__main__":
test_array = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 15, 18, 22, 43, 144, 515, 4123]
print(array_to_binary_tree(test_array, 0, len(test_array) - 1))
```
#### File: Exercices/chapter_04/p07_build_order.py
```python
import pytest
def determine_build_order(projects, dependencies):
dependency_tree = {p: set() for p in projects}
build_order = []
unbuilt_projects = set(projects)
for dependency, project in dependencies:
dependency_tree[project].add(dependency)
while unbuilt_projects:
something_built = False
for project in list(unbuilt_projects):
dependencies = dependency_tree[project]
if not unbuilt_projects.intersection(dependencies):
build_order.append(project)
unbuilt_projects.remove(project)
something_built = True
if not something_built:
raise NoValidBuildOrderError("No valid build order exists")
return build_order
class NoValidBuildOrderError(Exception):
pass
def test_determine_build_order():
projects = ["a", "b", "c", "d", "e", "f", "g"]
dependencies = [
("d", "g"),
("a", "e"),
("b", "e"),
("c", "a"),
("f", "a"),
("b", "a"),
("f", "c"),
("f", "b"),
]
build_order = determine_build_order(projects, dependencies)
for dependency, project in dependencies:
assert build_order.index(dependency) < build_order.index(project)
def test_impossible_build_order():
projects = ["a", "b"]
dependencies = [("a", "b"), ("b", "a")]
with pytest.raises(NoValidBuildOrderError):
determine_build_order(projects, dependencies)
```
#### File: Exercices/chapter_05/p03_flip_bit_to_win.py
```python
def flip_bit_to_win(number):
number_str = bin(number)[2:]
max_cnt, cnt, cnt0 = 0, 0, 0
i = len(number_str) # start index
while i:
if int(number_str[i - 1]):
cnt += 1
else:
if cnt0 == 0:
temp_i = i
cnt0 = 1
else: # second 0
max_cnt = cnt
i = temp_i # rewind
cnt0 = 0
cnt = 0
i -= 1
max_cnt = max(cnt, max_cnt)
return max_cnt + 1
def flip_bit_to_win_alt(num):
longest, current_segment, past_segment = 1, 0, 0
while num != 0:
if num & 1: # Current bit is 1
current_segment += 1
else: # Current bit is 0
past_segment = 0 if (num & 2 is True) else current_segment
current_segment = 0
longest = max(current_segment + past_segment + 1, longest)
num >>= 1 # Move 1 bit to the right
return longest
test_cases = [(0b0, 1), (0b111, 4), (0b10011100111, 4), (0b11011101111, 8)]
testable_functions = [flip_bit_to_win, flip_bit_to_win_alt]
def test_flip_bit_to_win():
for fli_bit in testable_functions:
for num, expected in test_cases:
assert fli_bit(num) == expected
if __name__ == "__main__":
test_flip_bit_to_win()
```
#### File: Exercices/chapter_05/p07_pairwise_swap.py
```python
def pairwise_swap(number):
mask_10 = 0xAAAAAAAA # 32 bits
mask_01 = 0x55555555 # 32 bits
num_evn = number & mask_10
num_odd = number & mask_01
swp_num = (num_evn >> 1) | (num_odd << 1)
return swp_num
def test_pairwise_swap():
view_output = 1
for number, exp_output in zip([123, 781, 278], [183, 782, 553]):
swap_num = pairwise_swap(number)
if view_output:
print(f"Number: {bin(number)}")
print(f"Swapped: {bin(swap_num)}")
assert swap_num == exp_output
if __name__ == "__main__":
test_pairwise_swap()
```
#### File: Exercices/chapter_06/p10_poison.py
```python
import random
from typing import List, Optional
DAYS_FOR_RESULT = 7
class _TestStrip:
def __init__(self) -> None:
self.has_poison = False
self.day_poisoned: Optional[int] = None
class World:
def __init__(
self, num_test_strips: int, num_bottles: int, poisoned_bottle_num: int
) -> None:
self._num_test_strips = num_test_strips
self._test_strips = [_TestStrip() for i in range(num_test_strips)]
self._num_bottles = num_bottles
self._poisoned_bottle_num = poisoned_bottle_num
self._day = 0
@property
def num_bottles(self) -> int:
return self._num_bottles
@property
def num_test_strips(self) -> int:
return self._num_test_strips
@property
def day(self) -> int:
return self._day
@day.setter
def day(self, day: int) -> None:
if day < self._day:
raise ValueError("day cannot be decreased")
self._day = day
def add_drop(self, bottle_num: int, test_strip_num: int) -> None:
test_strip = self._test_strips[test_strip_num]
if bottle_num == self._poisoned_bottle_num and not test_strip.has_poison:
test_strip.has_poison, test_strip.day_poisoned = True, self.day
def positive_test_strips(self) -> List[int]:
res: List[int] = []
for test_strip_num, test_strip in enumerate(self._test_strips):
if (
test_strip.has_poison
and self.day - test_strip.day_poisoned >= DAYS_FOR_RESULT
):
res.append(test_strip_num)
return res
def find_poison(world: World) -> int:
for i in range(world.num_bottles):
for j in range(world.num_test_strips):
if i & (1 << j):
world.add_drop(bottle_num=i, test_strip_num=j)
world.day += DAYS_FOR_RESULT
return sum(1 << i for i in world.positive_test_strips())
def test_find_poison():
poisoned_bottle_num = random.randrange(1000)
world = World(
num_bottles=1000, num_test_strips=10, poisoned_bottle_num=poisoned_bottle_num
)
assert find_poison(world) == poisoned_bottle_num
return poisoned_bottle_num, world.day
def example():
poisoned_bottle_num, days = test_find_poison()
print("Found poison in bottle number", poisoned_bottle_num, "in", days, "days.")
if __name__ == "__main__":
example()
```
#### File: Exercices/chapter_07/p04_parking_lot.py
```python
import unittest
from random import randrange
import pytest
class Vehicle:
types = []
def __init__(self, model, size, number):
self.model = model
self.size = size
self.number = number
self.parked = False
def is_parked(self):
if self.parked:
print("Vehicle is parked")
return True
print("Vehicle is not parked")
return False
class Bike(Vehicle):
pass
class Scooter(Vehicle):
pass
class Car(Vehicle):
pass
class Bus(Vehicle):
pass
class ParkZone:
def __init__(self):
self.space_available = 10
self.parked = {}
def park(self, vehicle):
if self.is_space_available(vehicle.size):
token = self.register(vehicle)
self.space_available -= vehicle.size
vehicle.parked = True
print(vehicle.model, " has been parked.")
print("Token: ", token, ", Space available ", self.space_available)
return token
print("No space available")
return None
def is_space_available(self, size):
return (self.space_available - size) >= 0
def register(self, vehicle):
token = self.generate_token()
while token in self.parked:
token = self.generate_token()
self.parked[token] = vehicle
return token
def generate_token(self):
return randrange(1111, 9999)
def depark(self, token):
if token in self.parked:
parked_vehicle = self.parked[token]
parked_vehicle.parked = False
self.space_available += parked_vehicle.size
print(parked_vehicle.model, "has been deparked")
print("Space Available: ", self.space_available)
return self.parked.pop(token)
raise ValueError("Invalid token or vehicle not found")
def list_parked_vehicles(self):
print("------Parked Vehicles------")
for vehicle in self.parked.values():
print(vehicle.model, vehicle.size, vehicle.number)
def test_parking_lot():
bike = Bike("Suzuki Access", 1, "MH14AB1234")
assert not bike.is_parked()
park_zone = ParkZone()
token = park_zone.park(bike)
assert bike.is_parked()
assert park_zone.depark(token) == bike
assert not bike.is_parked()
car = Car("<NAME>", 5, "MU268A")
assert not car.is_parked()
car_token = park_zone.park(car)
assert car.is_parked()
with pytest.raises(ValueError, match="Invalid token or vehicle not found"):
park_zone.depark(token)
assert park_zone.depark(car_token) == car
assert not car.is_parked()
bus = Bus("Volvo", 5, "AN657")
park_zone.park(bus)
scooter = Scooter("<NAME>", 1, "GI653")
park_zone.park(scooter)
park_zone.list_parked_vehicles()
if __name__ == "__main__":
unittest.main()
```
#### File: Exercices/chapter_08/p01_triple_step.py
```python
def triple_hop(x):
if x < 0:
return 0
if x == 0:
return 1
if x == 1:
return 1
return triple_hop(x - 1) + triple_hop(x - 2) + triple_hop(x - 3)
def method_2(x):
memo = [-1] * (x + 1)
return triple_hop_recursive(x, memo)
def triple_hop_recursive(x, memo):
if x < 0:
return 0
memo[0] = 1
if x >= 1:
memo[1] = 1
if x >= 2:
memo[2] = memo[1] + memo[0]
if x > 2:
for i in range(3, x + 1):
memo[i] = memo[i - 1] + memo[i - 2] + memo[i - 3]
return memo[x]
if __name__ == "__main__":
print(triple_hop(1))
print(triple_hop(2))
print(triple_hop(3))
print(triple_hop(4))
print(triple_hop(5))
print(triple_hop(6))
print(method_2(1))
print(method_2(2))
print(method_2(3))
print(method_2(4))
print(method_2(5))
print(method_2(6))
```
#### File: Exercices/chapter_08/p05_recursive_multiply.py
```python
def multiply(a, b, answer):
if answer == 0 and a != 0 and b != 0:
answer = a
if a == 1:
return answer
if b == 1:
return answer
answer += a
return multiply(a, b - 1, answer)
# Solution 1
def min_product(a, b):
bigger = b if a < b else a # a < b ? b : a
smaller = a if a < b else b # a < b ? a : b
return min_product_helper(smaller, bigger)
def min_product_helper(smaller, bigger):
if smaller == 0:
return 0
if smaller == 1:
return bigger
# Compute half. If uneven, compute other half. If even, double it
s = smaller >> 1 # divide by 2
side1 = min_product(s, bigger)
side2 = side1
if smaller % 2 == 1:
side2 = min_product_helper(smaller - s, bigger)
return side1 + side2
# Solution 2
def min_product_2(a, b):
bigger = b if a < b else a # a < b ? b : a
smaller = a if a < b else b # a < b ? a : b
return min_product_2_helper(smaller, bigger, {})
def min_product_2_helper(smaller, bigger, memo):
if smaller == 0:
return 0
if smaller == 1:
return bigger
if smaller in memo:
return memo[smaller]
# Compute half. If uneven, compute other half. If even double it
s = smaller >> 1
side1 = min_product_2_helper(s, bigger, memo)
side2 = side1
if smaller % 2 == 1:
side2 = min_product_2_helper(smaller - s, bigger, memo)
# sum and cache
memo[smaller] = side1 + side2
return memo[smaller]
# Solution 3
def min_product_3(a, b):
bigger = b if a < b else a # a < b ? b : a
smaller = a if a < b else b # a < b ? a : b
return min_product_3_helper(smaller, bigger)
def min_product_3_helper(smaller, bigger):
if smaller == 0:
return 0
if smaller == 1:
return bigger
s = smaller >> 1
half_prod = min_product_3_helper(s, bigger)
if smaller % 2 == 0:
return half_prod + half_prod
return half_prod + half_prod + bigger
# solution 4 # non-recursive
def multiply_bit_based(a, b):
b_bin = bin(b)
b_bin = b_bin[2:]
prod = 0
for i in range(len(b_bin)): # O(len_b)
if int(b_bin[-i - 1]):
prod = prod + (a << i)
return prod
test_cases = [(5, 6), (28, 89), (1234, 245334)]
testable_functions = [multiply_bit_based, min_product, min_product_2, min_product_3]
def test_min_product():
for min_prod in testable_functions:
for a, b in test_cases:
assert min_prod(a, b) == a * b
if __name__ == "__main__":
test_min_product()
```
#### File: Exercices/chapter_08/p07_permutations_without_dups.py
```python
def get_perms(string):
permutations = []
if string is None:
return None
if len(string) == 0:
# base case
permutations.append(" ")
return permutations
first = string[0] # get first letter in string
remainder = string[1:]
words = get_perms(remainder)
for word in words:
index = 0
for _ in word:
s = insert_char_at(word, first, index)
permutations.append(s)
index += 1
return permutations
def insert_char_at(word, char, i):
start = word[:i]
end = word[i:]
return start + char + end
# approach 2: Building from permutations of all n-1 character substrings
def get_perms_2(string):
result = []
get_perms_inner_2(" ", string, result)
return result
def get_perms_inner_2(prefix, remainder, result):
if len(remainder) == 0:
result.append(prefix)
length = len(remainder)
for i in range(length):
before = remainder[:i]
after = remainder[i + 1 :]
c = remainder[i]
get_perms_inner_2(prefix + c, before + after, result)
if __name__ == "__main__":
print(get_perms("str"))
print(get_perms_2("str"))
```
#### File: Exercices/chapter_08/p09_parens.py
```python
import unittest
def next_permutation(arr):
i = len(arr) - 1
while i > 0 and arr[i - 1] >= arr[i]:
i -= 1
if i <= 0:
return False
j = len(arr) - 1
while arr[j] <= arr[i - 1]:
j -= 1
arr[i - 1], arr[j] = arr[j], arr[i - 1]
arr[i:] = arr[len(arr) - 1 : i - 1 : -1]
return True
def is_matched_parentheses(ray):
lst = []
for c in ray:
if c == "(":
lst.append(c)
if c == ")":
if len(lst) < 1 or lst.pop() != "(":
return False
return True
def generate_parentheses_permutations_brute_force(number_of_pairs):
starting_list = (["("] * number_of_pairs) + [")"] * number_of_pairs
possibilities = ["".join(starting_list)]
while next_permutation(starting_list):
if is_matched_parentheses(starting_list):
possibilities.append("".join(starting_list))
return possibilities
def generate_parentheses_permutations_recursive_1(n):
def helper(
open_parentheses_remaining, closed_parentheses_remaining, current_string
):
if len(current_string) == n * 2:
result.append(current_string)
if open_parentheses_remaining > 0:
helper(
open_parentheses_remaining - 1,
closed_parentheses_remaining,
current_string + "(",
)
if closed_parentheses_remaining > open_parentheses_remaining:
helper(
open_parentheses_remaining,
closed_parentheses_remaining - 1,
current_string + ")",
)
result = []
helper(n, n, "")
return result
def add_paren(arr, left_rem, right_rem, string_arr, idx):
if left_rem < 0 or right_rem < left_rem: # invalid
return
if left_rem == 0 and right_rem == 0: # out of left and right parentheses
elem = "".join(string_arr)
arr.append(elem)
else:
string_arr[idx] = "(" # add left and recurse
add_paren(arr, left_rem - 1, right_rem, string_arr, idx + 1)
string_arr[idx] = ")" # add right and recurse
add_paren(arr, left_rem, right_rem - 1, string_arr, idx + 1)
def generate_parentheses_permutations_recursive_2(n):
results = []
string_arr = ["*"] * n * 2
add_paren(results, n, n, string_arr, 0)
return results
testable_functions = [
generate_parentheses_permutations_brute_force,
generate_parentheses_permutations_recursive_1,
generate_parentheses_permutations_recursive_2,
]
test_cases = [
(0, [""]),
(1, ["()"]),
(2, sorted(["()()", "(())"])),
(3, sorted(["((()))", "(()())", "(())()", "()(())", "()()()"])),
]
class TestSuite(unittest.TestCase):
def test_generate_parentheses_permutations(self):
for f in testable_functions:
for num, expected in test_cases:
assert sorted(f(num)) == expected, f"{f.__name__} {num} failed"
def example():
print(generate_parentheses_permutations_recursive_1(2))
print(generate_parentheses_permutations_brute_force(3))
print(generate_parentheses_permutations_recursive_2(3))
if __name__ == "__main__":
example()
```
#### File: Exercices/chapter_08/p14_boolean_evaluation.py
```python
import unittest
def string_to_bool(s: str) -> bool:
return s == "1"
def count_ways(exp: str, result: bool, memo) -> int:
if len(exp) == 0:
return 0
if len(exp) == 1:
return 1 if string_to_bool(exp) == result else 0
if exp + str(result) in memo:
return memo[exp + str(result)]
ways = 0
for i in range(1, len(exp), 2):
left = exp[:i]
right = exp[i + 1 :]
left_true = count_ways(left, True, memo)
left_false = count_ways(left, False, memo)
right_true = count_ways(right, True, memo)
right_false = count_ways(right, False, memo)
total = (left_true + left_false) * (right_true + right_false)
total_true = 0
if exp[i] == "|":
total_true = (
left_true * right_true
+ left_false * right_true
+ left_true * right_false
)
elif exp[i] == "&":
total_true = left_true * right_true
elif exp[i] == "^":
total_true = left_true * right_false + left_false * right_true
subways = total_true if result else (total - total_true)
ways += subways
memo[exp + str(result)] = ways
return ways
def evaluate(exp: str, result: bool) -> int:
memo = {}
return count_ways(exp, result, memo)
class Test(unittest.TestCase):
test_cases = [("1^0|0|1", False, 2), ("0&0&0&1^1|0", True, 10)]
testable_functions = [evaluate]
def test_evaluate(self):
for f in self.testable_functions:
for [expression, result, expected] in self.test_cases:
assert f(expression, result) == expected
if __name__ == "__main__":
unittest.main()
```
#### File: Exercices/chapter_10/p03_search_in_rotated_array.py
```python
from typing import Optional, Sequence
def index(nums: Sequence[int], target: int) -> Optional[int]:
if not nums:
return None
# We cannot guarantee better than O(n) if there are duplicates.
if nums[0] == nums[-1]:
try:
return nums.index(target)
except ValueError:
return None
is_target_left_of_wraparound = target >= nums[0]
lo, hi = 0, len(nums) - 1
while lo <= hi:
mid = (lo + hi) // 2
is_mid_left_of_wraparound = nums[mid] >= nums[0]
if is_mid_left_of_wraparound and not is_target_left_of_wraparound:
lo = mid + 1
elif not is_mid_left_of_wraparound and is_target_left_of_wraparound:
hi = mid - 1
elif nums[mid] < target:
lo = mid + 1
elif nums[mid] > target:
hi = mid - 1
else:
assert nums[mid] == target
return mid
return None
def search_rotated(array: Sequence[int], num: int) -> Optional[int]:
if not array:
return None
return _recursive_search(array, num, 0, len(array) - 1)
def _recursive_search(array, num, start, end):
middle = (end - start) // 2 + start
if array[middle] == num:
return middle
if end - start <= 0:
return None
result = None
if array[start] < array[middle]: # left side is normal
if array[start] <= num < array[middle]:
result = _recursive_search(array, num, start, middle - 1)
else:
result = _recursive_search(array, num, middle + 1, end)
elif array[middle] < array[end]: # right side is normal
if array[middle] < num <= array[end]:
result = _recursive_search(array, num, middle + 1, end)
else:
result = _recursive_search(array, num, start, middle - 1)
elif array[start] == array[middle]:
if array[middle] != array[end]:
result = _recursive_search(array, num, middle + 1, end)
else:
result = _recursive_search(array, num, start, middle - 1)
if result is None:
result = _recursive_search(array, num, middle + 1, end)
return result
test_cases = [
# array, target, valid solutions
([15, 16, 19, 20, 25, 1, 3, 4, 5, 7, 10, 14], 5, 8),
([2, 3, 1, 2, 2, 2, 2, 2, 2, 2], 2, {0, 3, 4, 5, 6, 7, 8, 9}),
([2, 3, 1, 2, 2, 2, 2, 2, 2, 2], 3, 1),
([2, 3, 1, 2, 2, 2, 2, 2, 2, 2], 4, None),
([2, 3, 1, 2, 2, 2, 2, 2, 2, 2], 1, 2),
([2, 3, 1, 2, 2, 2, 2, 2, 2, 2], 8, None),
]
testable_functions = [index, search_rotated]
def test_index():
for array, target, expected in test_cases:
for method in testable_functions:
ind = method(array, target)
if isinstance(expected, set):
assert ind in expected
else:
error_msg = (
f"arr:{array} target:{target} calculated:{ind} expected:{expected}"
)
assert ind == expected, error_msg
if __name__ == "__main__":
test_index()
```
#### File: Exercices/chapter_10/p05_sparse_search.py
```python
def sparse_search(arr, item):
def inner_search(arr, item, low, high):
middle = ((high - low) // 2) + low
if arr[middle] == "":
left = middle - 1
right = middle + 1
while True:
if left < low and right > high:
return None
elif right <= high and arr[right] != "":
middle = right
break
elif left >= low and arr[left] != "":
middle = left
break
left -= 1
right += 1
if arr[middle] == item:
return middle
if arr[middle] > item:
return inner_search(arr, item, low, middle - 1)
if arr[middle] < item:
return inner_search(arr, item, middle + 1, high)
return inner_search(arr, item, 0, len(arr) - 1)
test_cases = [
((["a", "", "", "b", "", "c", "", "", "d", "", "", "", "", "e", ""], "d"), 8),
((["a", "", "", "b", "", "c", "", "", "d", "", "", "", "", "e", ""], "f"), None),
((["a", "", "", "b", "", "c", "", "", "d", "", "", "", "", "e", ""], "a"), 0),
]
testable_functions = [sparse_search]
def test_sorted_search():
for function in testable_functions:
for (n, m), expected in test_cases:
calculated = function(n, m)
error_msg = f"{function.__name__}: {calculated} != {expected}"
assert function(n, m) == expected, error_msg
if __name__ == "__main__":
test_sorted_search()
```
#### File: Exercices/chapter_16/p02_word_frequencies.py
```python
import string
def preprocess(book):
word_counts = {}
for word in book.split():
word = word.lower()
word = word.translate(str.maketrans("", "", string.punctuation))
if not word:
continue
if word not in word_counts:
word_counts[word] = 1
else:
word_counts[word] += 1
return word_counts
def get_frequency_repetitive(book, word):
word_counts = preprocess(book)
return word_counts.get(word.lower(), 0)
def get_frequency_single_query(book, word):
if book is None or word is None:
return 0
word = word.lower()
count = 0
for book_word in book.split():
# make lowercase, remove punctuation
book_word = book_word.lower()
book_word = book_word.translate(str.maketrans("", "", string.punctuation))
if book_word == word:
count += 1
return count
def example():
book = """Once upon a time there was this book.
This is a sentence. This is a much longer sentence.
This book is terribly short. But you get the idea.
You should see the word this 6 times in this example text.
"""
word = "book"
count = get_frequency_repetitive(book, word)
print(f'The word "{word}" appears {count} times.')
count = get_frequency_single_query(book, word)
print(f'The word "{word}" appears {count} times.')
if __name__ == "__main__":
pass
```
#### File: Exercices/chapter_16/p08_english_int.py
```python
ones = [
"One",
"Two",
"Three",
"Four",
"Five",
"Six",
"Seven",
"Eight",
"Nine",
"Ten",
"Eleven",
"Tweleve",
"Thirteen",
"Fourteen",
"Fifteen",
"Sixteen",
"Seventeen",
"Eigteen",
"Nineteen",
]
twos = {
20: "Twenty",
30: "Thirty",
40: "Forty",
50: "Fifty",
60: "Sixty",
70: "Seventy",
80: "Eighty",
90: "Ninety",
}
threes = ["", "Thousand", "Million", "Billion"]
def get_chunks(n):
result = []
if 1 <= (n % 100) < 20:
result.append(ones[(n % 100) - 1])
elif 20 <= (n % 100) < 100:
if (n % 10) != 0:
result.append(ones[n % 10 - 1])
result.insert(0, twos.get((n % 100 - n % 10), ""))
if n >= 100:
result = [ones[n // 100 - 1], "Hundred"] + result
return result
def get_in_words(n):
if n == 0:
return "Zero"
int_in_words = []
index = 0
while n > 0:
temp = n % 1000
res = get_chunks(temp)
if res:
int_in_words = res + [threes[index]] + int_in_words
index += 1
n //= 1000
return " ".join(int_in_words)
def example():
nums = [
1,
10,
13,
19,
20,
23,
50,
73,
93,
100,
101,
110,
119,
195,
300,
504,
950,
974,
999,
1000,
10000,
909000,
1000000,
9000009,
19323984,
908900034,
100000000781,
]
for n in nums:
print(n, get_in_words(n))
if __name__ == "__main__":
example()
```
#### File: Exercices/chapter_17/p07_baby_names.py
```python
import collections
def count_baby_names(name_counts, synonyms):
parent = {}
for name in name_counts.keys():
parent[name] = name
def find(x):
if parent[x] != x:
parent[x] = find(parent[x])
return parent[x]
def union(x1, x2):
r1 = find(x1)
r2 = find(x2)
if r1 != r2:
parent[r1] = r2
res = collections.defaultdict(int)
for pair in synonyms:
union(pair[0], pair[1])
for key in parent.keys():
# find root of cluster
root = find(key)
res[root] += name_counts[key]
return dict(res)
test_cases = [
# name_counts, synonyms, expected_counts
[
{
"john": 10,
"jon": 3,
"davis": 2,
"kari": 3,
"johny": 11,
"carlton": 8,
"carleton": 2,
"jonathan": 9,
"carrie": 5,
},
[
("jonathan", "john"),
("jon", "johny"),
("johny", "john"),
("kari", "carrie"),
("carleton", "carlton"),
],
{"john": 33, "davis": 2, "carrie": 8, "carlton": 10},
]
]
def test_baby_names():
for name_counts, synonyms, expected_counts in test_cases:
assert count_baby_names(name_counts, synonyms) == expected_counts
if __name__ == "__main__":
test_baby_names()
```
#### File: Exercices/chapter_17/p09_kth_multiple.py
```python
def get_kth_multiple(k):
res = [1, 3, 5, 7]
is_number_seen = {1, 3, 5}
if k <= 3:
return res[k]
for i in range(k - 3):
choices = []
for j in range(len(res)):
if 3 * res[j] not in is_number_seen:
choices.append(3 * res[j])
if 5 * res[j] not in is_number_seen:
choices.append(5 * res[j])
if 7 * res[j] not in is_number_seen:
choices.append(7 * res[j])
ans = min(choices)
res.append(ans)
is_number_seen.add(ans)
return res[-1]
def get_kth_multiple_via_heap(k):
res = []
is_number_seen = set()
import heapq
heap = [3, 5, 7]
heapq.heapify(heap)
for i in range(k):
next_el = heapq.heappop(heap)
# is_number_seen.add(next_el)
res.append(next_el)
if (next_el * 3) not in is_number_seen:
is_number_seen.add(next_el * 3)
heapq.heappush(heap, next_el * 3)
if (next_el * 5) not in is_number_seen:
is_number_seen.add(next_el * 5)
heapq.heappush(heap, next_el * 5)
if (next_el * 7) not in is_number_seen:
is_number_seen.add(next_el * 7)
heapq.heappush(heap, next_el * 7)
print(res)
return res[-1]
test_cases = [
# k, expected
(1, 3),
(2, 5),
(3, 7),
(1000, 82046671875),
]
testable_functions = [get_kth_multiple_via_heap]
def test_kth_multiple():
for f in testable_functions:
for k, expected in test_cases:
assert f(k) == expected
if __name__ == "__main__":
test_kth_multiple()
```
#### File: Exercices/chapter_17/p16_the_masseuse.py
```python
def find_best_schedule(appointments):
n = len(appointments)
dp = [0] * (n + 1)
dp[-2] = appointments[-1]
max_so_far = -float("inf")
for i in reversed(range(n - 1)):
choices = []
# choice 1, take the ith element, then skip i+1, and take i+2.
choices.append((appointments[i] + dp[i + 2], i + 2))
# choice 2, don't take ith element, the answer sits at dp[i+1]
choices.append((dp[i + 1], i + 1))
dp[i] = max(choices)[0]
if dp[i] > max_so_far:
max_so_far = dp[i]
return max_so_far
def test_find_best_schedule():
appointments = [30, 15, 60, 75, 45, 15, 15, 45]
assert find_best_schedule(appointments) == 180
appointments = [30, 15, 60, 15, 45, 15, 45]
assert find_best_schedule(appointments) == 180
appointments = [30, 15, 15, 60]
assert find_best_schedule(appointments) == 90
```
#### File: Exercices/chapter_17/p18_shortest_supersequence.py
```python
import collections
def min_window(big_array, small_array):
n = len(small_array)
frequencies = collections.defaultdict(int)
for i in range(n):
frequencies[small_array[i]] += 1
# window invariant: 'contains all the chars in t'
min_win_len = float("inf")
left = 0
missing = n
min_win_left = -1
min_win_right = -1
for right, char in enumerate(big_array):
# insertion logic
if frequencies[char] > 0:
missing -= 1
# nevertheless, insert the element
frequencies[char] -= 1
if missing == 0:
while left <= right and missing == 0:
if right - left + 1 < min_win_len:
min_win_len = right - left + 1
min_win_left = left
min_win_right = right
if frequencies[big_array[left]] == 0:
# then you are making a blunder
missing += 1
frequencies[big_array[left]] += 1
left += 1
# break
else:
frequencies[big_array[left]] += 1
left += 1
if min_win_len == float("inf"):
return
return min_win_left, min_win_right
def test_min_window():
s = "75902135791158897"
t = "159"
assert min_window(s, t) == (7, 10)
``` |
{
"source": "joelwhitehouse/PythonExtensionPatterns",
"score": 3
} |
#### File: PythonExtensionPatterns/src/pidmon.py
```python
import sys
import time
import psutil
def memMon(pid, freq=1.0):
proc = psutil.Process(pid)
print(proc.memory_info_ex())
prev_mem = None
while True:
try:
mem = proc.memory_info().rss / 1e6
if prev_mem is None:
print('{:10.3f} [Mb]'.format(mem))
else:
print('{:10.3f} [Mb] {:+10.3f} [Mb]'.format(mem, mem - prev_mem))
prev_mem = mem
time.sleep(freq)
except KeyboardInterrupt:
try:
input(' Pausing memMon, <cr> to continue, Ctrl-C to end...')
except KeyboardInterrupt:
print('\n')
return
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: python pidmon.py <PID>')
sys.exit(1)
pid = int(sys.argv[1])
memMon(pid)
sys.exit(0)
``` |
{
"source": "joel-wilkins/balto-api",
"score": 3
} |
#### File: balto-api/models/genre.py
```python
from sqlalchemy.dialects.postgresql import UUID
from app import db
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
import uuid
class Genre(db.Model):
__tablename__ = 'genre'
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
genre = db.Column(db.String(120), nullable=False, unique=True)
def __init__(self, genre: str):
self.genre = genre
class GenreSchema(SQLAlchemySchema):
class Meta:
model = Genre
id = auto_field()
genre = auto_field()
```
#### File: balto-api/services/cast_member_service.py
```python
from models.cast_member import CastMember
from sqlalchemy import or_
import uuid
class CastMemberService():
db = None
def __init__(self, db):
self.db = db
def parse_from_string(self, cast_member_name: str) -> CastMember:
if (not cast_member_name):
return None
split_name = cast_member_name.split(' ')
first_name = ''
last_name = ''
if len(split_name) > 1:
first_name = split_name[0]
last_name = split_name[-1]
else:
first_name = last_name = cast_member_name
return CastMember(first_name, last_name, cast_member_name)
def get_id(self, cast_member: CastMember) -> uuid:
return self.db.session.query(CastMember.id).filter(
CastMember.first_name == cast_member.first_name or
CastMember.last_name == cast_member.last_name or
CastMember.full_name == cast_member.full_name).scalar()
def get_all(self, query):
wildcarded_query = f'{query}%'
return self.db.session.query(CastMember).filter(
or_(
CastMember.first_name.ilike(wildcarded_query),
CastMember.last_name.ilike(wildcarded_query),
CastMember.full_name.ilike(wildcarded_query)
)
).all()
def insert(self, cast_member: CastMember) -> uuid:
self.db.session.add(cast_member)
self.db.session.commit()
return cast_member.id
```
#### File: balto-api/services/movie_service.py
```python
from models.movie import Movie
from sqlalchemy import func, or_
import uuid
from services.movie_director_service import MovieDirectorService
from services.movie_cast_member_service import MovieCastMemberService
from models.movie_cast_member import MovieCastMember
from models.movie_director import MovieDirector
from models.genre import Genre
from models.movie_origin import MovieOrigin
from models.director import Director
from models.cast_member import CastMember
class MovieService():
db = None
def __init__(self, db):
self.db = db
def insert(self, movie: Movie) -> uuid:
self.db.session.add(movie)
self.db.session.commit()
return movie.id
def get(self, movie_id):
return self.db.session.query(Movie).filter(Movie.id == movie_id) \
.scalar()
def get_all(self, page: int, page_size: int, query):
base_query = self.db.session.query(Movie)
if query and query != '':
base_query = self.__apply_wildcard_to_query(base_query, query)
return base_query.order_by(
Movie.release_year.desc(),
Movie.title
).paginate(page, page_size).items
def get_count(self, query):
base_query = self.db.session.query(Movie).distinct(Movie.id)
if query and query != '':
base_query = self.__apply_wildcard_to_query(base_query, query)
count_query = base_query.group_by(Movie).statement.with_only_columns(
[func.count(Movie.id)]
)
return self.db.session.execute(count_query).scalar()
def insert_from_args(self, args):
movie = Movie(
args['release_year'],
args['title'],
args['wikipedia_link'],
args['plot'],
args['origin']['id'],
args['genre']['id']
)
movie_id = self.insert(movie)
cast_member_service = MovieCastMemberService(self.db)
director_service = MovieDirectorService(self.db)
if len(args['cast']) > 0:
movie_cast_records = []
for cast in args['cast']:
movie_cast_records.append(
MovieCastMember(movie_id, cast['id'])
)
cast_member_service.insert_many(movie_cast_records)
if len(args['directors']) > 0:
movie_director_records = []
for director in args['directors']:
movie_director_records.append(
MovieDirector(movie_id, director['id'])
)
director_service.insert_many(movie_director_records)
return movie
def update(self, movie: Movie, args):
movie.genre_id = args['genre']['id']
movie.origin_id = args['origin']['id']
movie.plot = args['plot']
movie.release_year = args['release_year']
movie.title = args['title']
movie.wikipedia_link = args['wikipedia_link']
cast_member_service = MovieCastMemberService(self.db)
director_service = MovieDirectorService(self.db)
cast_member_service.delete_by_movie_id(movie.id, False)
director_service.delete_by_movie_id(movie.id, False)
self.db.session.commit()
if len(args['cast']) > 0:
movie_cast_records = []
for cast in args['cast']:
movie_cast_records.append(
MovieCastMember(movie.id, cast['id'])
)
cast_member_service.insert_many(movie_cast_records)
if len(args['directors']) > 0:
movie_director_records = []
for director in args['directors']:
movie_director_records.append(
MovieDirector(movie.id, director['id'])
)
director_service.insert_many(movie_director_records)
def delete(self, movie: Movie):
self.db.session.delete(movie)
self.db.session.commit()
def __apply_wildcard_to_query(self, base_query, query):
wildcarded_query = f'%{query}%'
return base_query.join(
Movie.genre,
Movie.cast,
Movie.directors,
Movie.origin
).filter(
or_(
Movie.title.ilike(wildcarded_query),
Genre.genre.ilike(wildcarded_query),
MovieOrigin.origin.ilike(wildcarded_query),
Director.full_name.ilike(wildcarded_query),
CastMember.full_name.ilike(wildcarded_query),
Genre.genre.ilike(wildcarded_query)
)
)
``` |
{
"source": "JoelWilloughby/asn1tools",
"score": 2
} |
#### File: asn1tools/codecs/der.py
```python
from . import DecodeTagError
from . import ber
from . import restricted_utc_time_to_datetime
from . import restricted_utc_time_from_datetime
from . import restricted_generalized_time_to_datetime
from . import restricted_generalized_time_from_datetime
from .compiler import clean_bit_string_value
from .ber import Class
from .ber import Encoding
from .ber import Tag
from .ber import encode_length_definite
from .ber import decode_length_definite
from .ber import encode_signed_integer
from .ber import decode_signed_integer
from .ber import encode_tag
from .ber import Boolean
from .ber import Real
from .ber import Null
from .ber import ObjectIdentifier
from .ber import Enumerated
from .ber import Sequence
from .ber import Set
from .ber import Choice
from .ber import Any
from .ber import AnyDefinedBy
from .ber import Recursive
from .ber import ObjectDescriptor
from .ber import Date
from .ber import TimeOfDay
from .ber import DateTime
from .ber import decode_length
from .ber import encode_real
from .ber import decode_real
class Type(object):
def __init__(self, name, type_name, number, flags=0):
self.name = name
self.type_name = type_name
if number is None:
self.tag = None
else:
self.tag = encode_tag(number, flags)
self.optional = False
self.default = None
def set_tag(self, number, flags):
if not Class.APPLICATION & flags:
flags |= Class.CONTEXT_SPECIFIC
self.tag = encode_tag(number, flags)
def set_size_range(self, minimum, maximum, has_extension_marker):
pass
def decode_tag(self, data, offset):
end_offset = offset + len(self.tag)
if data[offset:end_offset] != self.tag:
raise DecodeTagError(self.type_name,
self.tag,
data[offset:end_offset],
offset)
return end_offset
def set_default(self, value):
self.default = value
def get_default(self):
return self.default
def is_default(self, value):
return value == self.default
def has_default(self):
return self.default is not None
class StringType(Type):
TAG = None
ENCODING = None
def __init__(self, name):
super(StringType, self).__init__(name,
self.__class__.__name__,
self.TAG)
def encode(self, data, encoded):
data = data.encode(self.ENCODING)
encoded.extend(self.tag)
encoded.extend(encode_length_definite(len(data)))
encoded.extend(data)
def decode(self, data, offset):
offset = self.decode_tag(data, offset)
length, offset = decode_length_definite(data, offset)
end_offset = offset + length
return data[offset:end_offset].decode(self.ENCODING), end_offset
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
self.name)
class ArrayType(Type):
def __init__(self, name, tag_name, tag, element_type):
super(ArrayType, self).__init__(name,
tag_name,
tag,
Encoding.CONSTRUCTED)
self.element_type = element_type
def set_tag(self, number, flags):
super(ArrayType, self).set_tag(number,
flags | Encoding.CONSTRUCTED)
def encode(self, data, encoded):
encoded_elements = bytearray()
for entry in data:
self.element_type.encode(entry, encoded_elements)
encoded.extend(self.tag)
encoded.extend(encode_length_definite(len(encoded_elements)))
encoded.extend(encoded_elements)
def decode(self, data, offset):
offset = self.decode_tag(data, offset)
length, offset = decode_length_definite(data, offset)
decoded = []
start_offset = offset
while (offset - start_offset) < length:
decoded_element, offset = self.element_type.decode(data, offset)
decoded.append(decoded_element)
return decoded, offset
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__,
self.name,
self.element_type)
class Integer(Type):
def __init__(self, name):
super(Integer, self).__init__(name,
'INTEGER',
Tag.INTEGER)
def encode(self, data, encoded):
encoded.extend(self.tag)
value = encode_signed_integer(data)
encoded.extend(encode_length_definite(len(value)))
encoded.extend(value)
def decode(self, data, offset):
offset = self.decode_tag(data, offset)
length, offset = decode_length_definite(data, offset)
end_offset = offset + length
return decode_signed_integer(data[offset:end_offset]), end_offset
def __repr__(self):
return 'Integer({})'.format(self.name)
class BitString(Type):
def __init__(self, name, has_named_bits):
super(BitString, self).__init__(name,
'BIT STRING',
Tag.BIT_STRING)
self.has_named_bits = has_named_bits
def is_default(self, value):
if self.default is None:
return False
clean_value = clean_bit_string_value(value,
self.has_named_bits)
clean_default = clean_bit_string_value(self.default,
self.has_named_bits)
return clean_value == clean_default
def encode(self, data, encoded):
number_of_bytes, number_of_rest_bits = divmod(data[1], 8)
data = bytearray(data[0])
if number_of_rest_bits == 0:
data = data[:number_of_bytes]
number_of_unused_bits = 0
else:
last_byte = data[number_of_bytes]
last_byte &= ((0xff >> number_of_rest_bits) ^ 0xff)
data = data[:number_of_bytes]
data.append(last_byte)
number_of_unused_bits = (8 - number_of_rest_bits)
encoded.extend(self.tag)
encoded.extend(encode_length_definite(len(data) + 1))
encoded.append(number_of_unused_bits)
encoded.extend(data)
def decode(self, data, offset):
offset = self.decode_tag(data, offset)
length, offset = decode_length_definite(data, offset)
end_offset = offset + length
number_of_bits = 8 * (length - 1) - data[offset]
offset += 1
return (bytes(data[offset:end_offset]), number_of_bits), end_offset
def __repr__(self):
return 'BitString({})'.format(self.name)
class OctetString(Type):
def __init__(self, name):
super(OctetString, self).__init__(name,
'OCTET STRING',
Tag.OCTET_STRING)
def encode(self, data, encoded):
encoded.extend(self.tag)
encoded.extend(encode_length_definite(len(data)))
encoded.extend(data)
def decode(self, data, offset):
offset = self.decode_tag(data, offset)
length, offset = decode_length_definite(data, offset)
end_offset = offset + length
return bytes(data[offset:end_offset]), end_offset
def __repr__(self):
return 'OctetString({})'.format(self.name)
class SequenceOf(ArrayType):
def __init__(self, name, element_type):
super(SequenceOf, self).__init__(name,
'SEQUENCE OF',
Tag.SEQUENCE,
element_type)
class SetOf(ArrayType):
def __init__(self, name, element_type):
super(SetOf, self).__init__(name,
'SET OF',
Tag.SET,
element_type)
class UTF8String(StringType):
TAG = Tag.UTF8_STRING
ENCODING = 'utf-8'
class NumericString(StringType):
TAG = Tag.NUMERIC_STRING
ENCODING = 'ascii'
class PrintableString(StringType):
TAG = Tag.PRINTABLE_STRING
ENCODING = 'ascii'
class IA5String(StringType):
TAG = Tag.IA5_STRING
ENCODING = 'ascii'
class VisibleString(StringType):
TAG = Tag.VISIBLE_STRING
ENCODING = 'ascii'
class GeneralString(StringType):
TAG = Tag.GENERAL_STRING
ENCODING = 'latin-1'
class BMPString(StringType):
TAG = Tag.BMP_STRING
ENCODING = 'utf-16-be'
class UniversalString(StringType):
TAG = Tag.UNIVERSAL_STRING
ENCODING = 'utf-32-be'
class GraphicString(StringType):
TAG = Tag.GRAPHIC_STRING
ENCODING = 'latin-1'
class TeletexString(StringType):
TAG = Tag.T61_STRING
ENCODING = 'iso-8859-1'
class UTCTime(Type):
def __init__(self, name):
super(UTCTime, self).__init__(name,
'UTCTime',
Tag.UTC_TIME)
def encode(self, data, encoded):
data = restricted_utc_time_from_datetime(data).encode('ascii')
encoded.extend(self.tag)
encoded.append(len(data))
encoded.extend(data)
def decode(self, data, offset):
offset = self.decode_tag(data, offset)
length, offset = decode_length_definite(data, offset)
end_offset = offset + length
decoded = data[offset:end_offset].decode('ascii')
return restricted_utc_time_to_datetime(decoded), end_offset
def __repr__(self):
return 'UTCTime({})'.format(self.name)
class GeneralizedTime(Type):
def __init__(self, name):
super(GeneralizedTime, self).__init__(name,
'GeneralizedTime',
Tag.GENERALIZED_TIME)
def encode(self, data, encoded):
data = restricted_generalized_time_from_datetime(data).encode('ascii')
encoded.extend(self.tag)
encoded.append(len(data))
encoded.extend(data)
def decode(self, data, offset):
offset = self.decode_tag(data, offset)
length, offset = decode_length_definite(data, offset)
end_offset = offset + length
decoded = data[offset:end_offset].decode('ascii')
return restricted_generalized_time_to_datetime(decoded), end_offset
def __repr__(self):
return 'GeneralizedTime({})'.format(self.name)
class Compiler(ber.Compiler):
def compile_implicit_type(self, name, type_descriptor, module_name):
type_name = type_descriptor['type']
if type_name == 'SEQUENCE':
compiled = Sequence(
name,
*self.compile_members(type_descriptor['members'],
module_name))
elif type_name == 'SEQUENCE OF':
compiled = SequenceOf(name,
self.compile_type('',
type_descriptor['element'],
module_name))
elif type_name == 'SET':
compiled = Set(
name,
*self.compile_members(type_descriptor['members'],
module_name))
elif type_name == 'SET OF':
compiled = SetOf(name,
self.compile_type('',
type_descriptor['element'],
module_name))
elif type_name == 'CHOICE':
compiled = Choice(
name,
*self.compile_members(type_descriptor['members'],
module_name))
elif type_name == 'INTEGER':
compiled = Integer(name)
elif type_name == 'REAL':
compiled = Real(name)
elif type_name == 'ENUMERATED':
compiled = Enumerated(name,
type_descriptor['values'],
self._numeric_enums)
elif type_name == 'BOOLEAN':
compiled = Boolean(name)
elif type_name == 'OBJECT IDENTIFIER':
compiled = ObjectIdentifier(name)
elif type_name == 'OCTET STRING':
compiled = OctetString(name)
elif type_name == 'TeletexString':
compiled = TeletexString(name)
elif type_name == 'NumericString':
compiled = NumericString(name)
elif type_name == 'PrintableString':
compiled = PrintableString(name)
elif type_name == 'IA5String':
compiled = IA5String(name)
elif type_name == 'VisibleString':
compiled = VisibleString(name)
elif type_name == 'GeneralString':
compiled = GeneralString(name)
elif type_name == 'UTF8String':
compiled = UTF8String(name)
elif type_name == 'BMPString':
compiled = BMPString(name)
elif type_name == 'GraphicString':
compiled = GraphicString(name)
elif type_name == 'UTCTime':
compiled = UTCTime(name)
elif type_name == 'UniversalString':
compiled = UniversalString(name)
elif type_name == 'GeneralizedTime':
compiled = GeneralizedTime(name)
elif type_name == 'DATE':
compiled = Date(name)
elif type_name == 'TIME-OF-DAY':
compiled = TimeOfDay(name)
elif type_name == 'DATE-TIME':
compiled = DateTime(name)
elif type_name == 'BIT STRING':
has_named_bits = ('named-bits' in type_descriptor)
compiled = BitString(name, has_named_bits)
elif type_name == 'ANY':
compiled = Any(name)
elif type_name == 'ANY DEFINED BY':
choices = {}
for key, value in type_descriptor['choices'].items():
choices[key] = self.compile_type(key,
value,
module_name)
compiled = AnyDefinedBy(name,
type_descriptor['value'],
choices)
elif type_name == 'NULL':
compiled = Null(name)
elif type_name == 'EXTERNAL':
compiled = Sequence(
name,
*self.compile_members(self.external_type_descriptor()['members'],
module_name))
compiled.set_tag(Tag.EXTERNAL, 0)
elif type_name == 'ObjectDescriptor':
compiled = ObjectDescriptor(name)
else:
if type_name in self.types_backtrace:
compiled = Recursive(name,
type_name,
module_name)
self.recursive_types.append(compiled)
else:
compiled = self.compile_user_type(name,
type_name,
module_name)
return compiled
def compile_dict(specification, numeric_enums=False):
return Compiler(specification, numeric_enums).process()
```
#### File: asn1tools/asn1tools/compiler.py
```python
import diskcache
from .parser import parse_files
from .parser import parse_string
from .codecs import compiler
from .codecs import ber
from .codecs import der
from .codecs import gser
from .codecs import jer
from .codecs import oer
from .codecs import per
from .codecs import uper
from .codecs import xer
from .codecs import type_checker
from .codecs import constraints_checker
from .errors import CompileError
from .errors import EncodeError
from .errors import DecodeError
class Specification(object):
"""This class is used to encode and decode ASN.1 types found in an
ASN.1 specification.
Instances of this class are created by the factory functions
:func:`~asn1tools.compile_files()`,
:func:`~asn1tools.compile_string()` and
:func:`~asn1tools.compile_dict()`.
"""
def __init__(self,
modules,
decode_length,
type_checkers,
constraints_checkers):
self._modules = modules
self._decode_length = decode_length
self._types = {}
duplicated = set()
for module_name in modules:
types = modules[module_name]
for type_name, type_ in types.items():
type_.type_checker = type_checkers[module_name][type_name]
type_.constraints_checker = constraints_checkers[module_name][type_name]
if type_name in duplicated:
continue
if type_name in self._types:
del self._types[type_name]
duplicated.add(type_name)
continue
self._types[type_name] = type_
@property
def types(self):
"""A dictionary of all unique types in the specification. Types found
in two or more modules are not part of this dictionary.
>>> question = foo.types['Question']
>>> question
Sequence(Question, [Integer(id), IA5String(question)])
>>> question.encode({'id': 1, 'question': 'Is 1+1=3?'})
b'0\\x0e\\x02\\x01\\x01\\x16\\x09Is 1+1=3?'
"""
return self._types
@property
def modules(self):
"""A dictionary of all modules in the specification. Unlike
:attr:`.types`, this attribute contains every type, even if
the type name was found in two or more modules.
>>> question = foo.modules['Foo']['Question']
>>> question
Sequence(Question, [Integer(id), IA5String(question)])
>>> question.encode({'id': 1, 'question': 'Is 1+1=3?'})
b'0\\x0e\\x02\\x01\\x01\\x16\\x09Is 1+1=3?'
"""
return self._modules
def encode(self,
name,
data,
check_types=True,
check_constraints=False,
**kwargs):
"""Encode given dictionary `data` as given type `name` and return the
encoded data as a bytes object.
If `check_types` is ``True`` all objects in `data` are checked
against the expected Python type for its ASN.1 type. Set
`check_types` to ``False`` to minimize the runtime overhead,
but instead get less informative error messages.
See `Types`_ for a mapping table from ASN.1 types to Python
types.
If `check_constraints` is ``True`` all objects in `data` are
checked against their ASN.1 type constraints. A
ConstraintsError exception is raised if the constraints are
not fulfilled. Set `check_constraints` to ``False`` to skip
the constraints check and minimize the runtime overhead, but
instead get less informative error messages and allow encoding
of values not fulfilling the constraints.
>>> foo.encode('Question', {'id': 1, 'question': 'Is 1+1=3?'})
b'0\\x0e\\x02\\x01\\x01\\x16\\x09Is 1+1=3?'
"""
try:
type_ = self._types[name]
except KeyError:
raise EncodeError(
"Type '{}' not found in types dictionary.".format(name))
if check_types:
type_.check_types(data)
if check_constraints:
type_.check_constraints(data)
return type_.encode(data, **kwargs)
def decode(self, name, data, check_constraints=False):
"""Decode given bytes object `data` as given type `name` and return
the decoded data as a dictionary.
If `check_constraints` is ``True`` all objects in `data` are
checked against their ASN.1 type constraints. A
ConstraintsError exception is raised if the constraints are
not fulfilled. Set `check_constraints` to ``False`` to skip
the constraints check and minimize the runtime overhead, but
instead allow decoding of values not fulfilling the
constraints.
>>> foo.decode('Question', b'0\\x0e\\x02\\x01\\x01\\x16\\x09Is 1+1=3?')
{'id': 1, 'question': 'Is 1+1=3?'}
"""
try:
type_ = self._types[name]
except KeyError:
raise DecodeError(
"Type '{}' not found in types dictionary.".format(name))
decoded = type_.decode(data)
if check_constraints:
type_.check_constraints(decoded)
return decoded
def decode_length(self, data):
"""Decode the length of given data `data`. Returns None if not enough
data was given to decode the length.
This method only works for BER and DER codecs with definite
length in the first data encoding. Other codecs and
combinations lacks length information in the data.
>>> foo.decode_length(b'\\x30\\x0e\\x02\\x01\\x01')
16
"""
return self._decode_length(data)
def _compile_any_defined_by_type(type_, choices):
type_['choices'] = {}
for key, value in choices.items():
specification = 'A DEFINITIONS ::= BEGIN B ::= {} END'.format(value)
type_['choices'][key] = parse_string(specification)['A']['types']['B']
def _compile_any_defined_by_choices(specification,
any_defined_by_choices):
for location, choices in any_defined_by_choices.items():
module_name = location[0]
type_names = location[1:-1]
member_name = location[-1]
types = specification[module_name]['types']
if len(type_names) == 0:
_compile_any_defined_by_type(types[member_name], choices)
else:
for type_name in type_names:
types = types[type_name]
for member in types['members']:
if member['name'] != member_name:
continue
_compile_any_defined_by_type(member, choices)
break
def _compile_files_cache(filenames,
codec,
any_defined_by_choices,
encoding,
cache_dir,
numeric_enums):
key = [codec.encode('ascii')]
if isinstance(filenames, str):
filenames = [filenames]
for filename in filenames:
with open(filename, 'rb') as fin:
key.append(fin.read())
key = b''.join(key)
cache = diskcache.Cache(cache_dir)
try:
return cache[key]
except KeyError:
compiled = compile_dict(parse_files(filenames, encoding),
codec,
any_defined_by_choices,
numeric_enums)
cache[key] = compiled
return compiled
def compile_dict(specification,
codec='ber',
any_defined_by_choices=None,
numeric_enums=False):
"""Compile given ASN.1 specification dictionary and return a
:class:`~asn1tools.compiler.Specification` object that can be used
to encode and decode data structures with given codec
`codec`. `codec` may be one of ``'ber'``, ``'der'``, ``'gser'``,
``'jer'``, ``oer``, ``'per'``, ``'uper'`` and ``'xer'``.
Give `numeric_enums` as ``True`` for numeric enumeration values
instead of strings.
>>> foo = asn1tools.compile_dict(asn1tools.parse_files('foo.asn'))
"""
codecs = {
'ber': ber,
'der': der,
'gser': gser,
'jer': jer,
'oer': oer,
'per': per,
'uper': uper,
'xer': xer
}
try:
codec = codecs[codec]
except KeyError:
raise CompileError("Unsupported codec '{}'.".format(codec))
if any_defined_by_choices:
_compile_any_defined_by_choices(specification,
any_defined_by_choices)
return Specification(codec.compile_dict(specification,
numeric_enums),
codec.decode_length,
type_checker.compile_dict(specification,
numeric_enums),
constraints_checker.compile_dict(specification,
numeric_enums))
def compile_string(string,
codec='ber',
any_defined_by_choices=None,
numeric_enums=False):
"""Compile given ASN.1 specification string and return a
:class:`~asn1tools.compiler.Specification` object that can be used
to encode and decode data structures with given codec
`codec`. `codec` may be one of ``'ber'``, ``'der'``, ``'gser'``,
``'jer'``, ``oer``, ``'per'``, ``'uper'`` and ``'xer'``.
Give `numeric_enums` as ``True`` for numeric enumeration values
instead of strings.
>>> with open('foo.asn') as fin:
... foo = asn1tools.compile_string(fin.read())
"""
return compile_dict(parse_string(string),
codec,
any_defined_by_choices,
numeric_enums)
def compile_files(filenames,
codec='ber',
any_defined_by_choices=None,
encoding='utf-8',
cache_dir=None,
numeric_enums=False):
"""Compile given ASN.1 specification file(s) and return a
:class:`~asn1tools.compiler.Specification` object that can be used
to encode and decode data structures with given codec
`codec`. `codec` may be one of ``'ber'``, ``'der'``, ``'gser'``,
``'jer'``, ``oer``, ``'per'``, ``'uper'`` and ``'xer'``.
`encoding` is the text encoding. This argument is passed to the
built-in function `open()`.
`cache_dir` specifies the compiled files cache location in the
file system. Give as ``None`` to disable the cache. By default the
cache is disabled. The cache key is the concatenated contents of
given files and the codec name. Using a cache will significantly
reduce the compile time when recompiling the same files. The cache
directory is automatically created if it does not exist. Remove
the cache directory `cache_dir` to clear the cache.
Give `numeric_enums` as ``True`` for numeric enumeration values
instead of strings.
>>> foo = asn1tools.compile_files('foo.asn')
Give `cache_dir` as a string to use a cache.
>>> foo = asn1tools.compile_files('foo.asn', cache_dir='my_cache')
"""
if cache_dir is None:
return compile_dict(parse_files(filenames, encoding),
codec,
any_defined_by_choices,
numeric_enums)
else:
return _compile_files_cache(filenames,
codec,
any_defined_by_choices,
encoding,
cache_dir,
numeric_enums)
def pre_process_dict(specification):
"""Pre-process given specification dictionary, expanding COMPONENTS OF
and adding extension markers if EXTENSIBILITY IMPLIED is active.
"""
return compiler.pre_process(specification)
``` |
{
"source": "joelwking/ansible-meraki",
"score": 2
} |
#### File: joelwking/ansible-meraki/Meraki_Connector.py
```python
import json
import time
import requests
import httplib
# ========================================================
# Meraki_Connector
# ========================================================
class Connector(object):
" Class variables, shared by all instances of the class "
BANNER = "MERAKI"
APP_ERROR = 0
APP_SUCCESS = 1
successful_POST_status = (201,)
def __init__(self, API_key=None, dashboard="dashboard.meraki.com"):
"""
Instance variables, belongs only to the current instance of a class.
"""
self.HEADER = {"Content-Type": "application/json"}
self.status_codes = [] # List of all status codes
self.progress = []
self.app_run_status = Connector.APP_SUCCESS
self.result = { 'ansible_facts': {'meraki': [] }}
# Configuration variables
self.configuration = dict()
self.configuration["Meraki-API-Key"] = API_key
self.configuration["dashboard"] = dashboard
# Parameters
self.param = dict()
self.param["search_string"] = "*" # Return all devices
self.param["timespan"] = 2592000 # The Whole Enchilada, one month of data
try:
requests.packages.urllib3.disable_warnings()
except AttributeError:
# Older versions of Requests do not support 'disable_warnings'
pass
def debug_print(self, message):
"NOT IMPLEMENTED: Method, a function that is defined in a class definition."
return None
def set_status_save_progress(self, status, message):
"Set status and append to the progress message, for debugging"
self.app_run_status = status
self.progress.append(message)
return
def get_configuration(self, requested_key):
"Return requested key or None if the key does not exist."
try:
return self.configuration[requested_key]
except:
return None
def get_data_size(self):
"Return the number of elements in our list of clients"
return len(self.result['ansible_facts']['meraki'])
def add_data(self, response):
"Each client dictionary is a list element"
self.result['ansible_facts']['meraki'].append(response)
return None
def set_status(self, status):
self.app_run_status = status
return None
def get_status(self):
"get the current status, return either APP_SUCCESS or APP_ERROR"
return self.app_run_status
def get_last_status_code(self):
"return the most recent status code"
return self.status_codes[-1]
def get_network_id(self, list_of_networks, network_name):
"""
A query to get_networks returns a list of configured networks.
This routine returns the network 'id' for a given network 'name', or None
"""
for network in list_of_networks:
if network_name == network['name']:
return network['id']
return None
def get_org_id(self, list_of_orgs, org_name):
"""
A query to get_org_ids returns a list of the orgizations managed by this administrator
This routine returns the org 'id' for a given org 'name', or None
"""
for org in list_of_orgs:
if org_name == org['name']:
return org['id']
return None
def set_parameters(self, **kwargs):
" If the parameters is an empty dictionary, use the default values."
for key, value in kwargs.items():
self.param[key] = value
self.debug_print("%s SET_PARAMETERS parameters:\n%s" % (Connector.BANNER, self.param))
return
def locate_device(self):
"""
Locating client devices means walking a tree based on the API Key. The key is associated with one or more organizations,
an organization can have one or more networks, each network can have multiple devices, and each device can have one or
more client machines. Depending on the timespan specified, you may see differing results. Larger timespans may show the same
client connected to multiple devices. Small timespans, may not return any results.
"""
org_id_list = self.get_org_ids()
for organization in org_id_list:
networks_list = self.get_networks(organization["id"])
for network in networks_list:
device_list = self.get_devices(network["id"])
for device in device_list:
client_list = self.get_clients(device["serial"], self.param["timespan"])
for client in client_list:
response = self.build_output_record(self.param["search_string"], organization, network, device, client)
if response:
self.add_data(response)
if self.get_data_size() > 0:
self.set_status_save_progress(Connector.APP_SUCCESS, "Returned: %s clients" % self.get_data_size())
else:
self.set_status_save_progress(Connector.APP_ERROR, "Returned: %s clients" % self.get_data_size())
self.debug_print("%s Data size: %s" % (Connector.BANNER, self.get_data_size()))
return self.get_status()
def build_output_record(self, search_string, organization, network, device, client):
"""
Match the search string against the client MAC and description, if there is a match return a dictionary to add to
the Action Result data field. A search string of "*" means to return everything.
"""
self.debug_print("%s BUILD_OUTPUT_RECORD for: %s %s %s" % (Connector.BANNER, device["serial"], client['description'], client['mac']))
if client['description'] is None: # Description could be NoneType
client['description'] = ""
if search_string == "*" or search_string in client['description'] or search_string in client['mac']:
return {'client': {'ip': client['ip'], 'mac': client['mac'], 'description': client['description'], 'dhcpHostname': client['dhcpHostname']},
'device': device['name'],
'network': network['name'],
'organization': organization['name']}
return None
def get_org_ids(self):
"""
Return a list of organization IDs for this account
URI = "https://dashboard.meraki.com/api/v0/organizations"
return [{"id":123456,"name":"WWT"}]
"""
return self.query_api("/api/v0/organizations")
def get_networks(self, organization_id):
"""
Return a list of network IDs for this organization
URI = "https://dashboard.meraki.com/api/v0/organizations/123456/networks"
return [{u'id': u'L_62937804798460', u'name': u'SWISSWOOD', u'organizationId': u'123456', u'tags': u'',
u'timeZone': u'America/New_York', u'type': u'combined'}]
"""
return self.query_api("/api/v0/organizations/" + str(organization_id) + "/networks")
def get_devices(self, network_id):
"""
Return a list of devices in this network
URI = "https://dashboard.meraki.com/api/v0/networks/L_6293780028460/devices"
return [{u'address': u'swisswood dr, Denton, NC 16713', u'lat': 34.9543899, u'lng': -77.721312,
u'mac': u'88:15:44:08:ad:08', u'model': u'MX64', u'name': u'SWISSWOOD-MX64', u'serial': u'Q2KN-R9P3-3U6X',
u'tags': u' recently-added ', u'wan1Ip': u'192.168.0.3', u'wan2Ip': None}]
"""
return self.query_api("/api/v0/networks/" + network_id + "/devices")
def get_clients(self, serial, timespan):
"""
Return a list of clients associated with this device serial number.
URI = "https://dashboard.meraki.com/api/v0/devices/Q2HP-NAY7-A2WH/clients?timespan=86400"
return [{u'description': u'alpha_b-THINK-7', u'dhcpHostname': u'alpha_b-THINK-7', u'id': u'k7c0271',
u'mac': u'60:6c:77:01:22:42',
u'mdnsName': None, u'switchport': u'3', u'usage': {u'recv': 14168.0, u'sent': 124917.00000000001}}]
"""
if timespan > 2592000:
timespan = 2592000
timespan = str(timespan)
return self.query_api("/api/v0/devices/" + serial + "/clients?timespan=" + timespan)
def get_VLANS(self, network_id):
"""
Return a list of VLANS for this network_id
'https://dashboard.meraki.com/api/v0/networks/[networkId]/vlans'
"""
return self.query_api("/api/v0/networks/" + network_id + "/vlans")
def build_URI(self, URL):
"Format the URL for the request and return"
header = self.HEADER
header["X-Cisco-Meraki-API-Key"] = self.get_configuration("Meraki-API-Key")
return "https://" + self.get_configuration("dashboard") + URL
def build_header(self):
"Add the API key to the header and return"
header = self.HEADER
header["X-Cisco-Meraki-API-Key"] = self.get_configuration("Meraki-API-Key")
return header
def query_api(self, URL):
"""
Method to query and return results, return an empty list if there are connection error(s).
"""
try:
r = requests.get(self.build_URI(URL), headers=self.build_header(), verify=False)
except requests.ConnectionError as e:
self.set_status_save_progress(Connector.APP_ERROR, str(e))
return []
self.status_codes.append(r.status_code)
if r.status_code in (httplib.OK,):
pass
else:
self.debug_print("%s QUERY_API url: %s status code: %s" % (Connector.BANNER, URL, r.status_code))
return []
try:
return r.json()
except ValueError: # If you get a 404 error, throws a ValueError exception
return []
def POST(self, URL, body):
"""
Method to POST (Add) to the configuration. Return empty dictionary if there are connection errors.
The body is a dictionary, which is converted to json.
Sample return values are:
{u'errors': [u'Validation failed: Vlan has already been taken']}
{u'applianceIp': u'192.168.64.1', u'id': 64, u'name': u'VLAN64', u'networkId': u'L_6228460', u'subnet': u'192.168.64.0/24'}
"""
try:
r = requests.post(self.build_URI(URL), headers=self.build_header(), data=json.dumps(body), verify=False)
except requests.ConnectionError as e:
self.set_status_save_progress(Connector.APP_ERROR, str(e))
return dict()
self.status_codes.append(r.status_code)
try:
return r.json()
except ValueError:
return dict()
```
#### File: joelwking/ansible-meraki/meraki_facts.py
```python
DOCUMENTATION = '''
---
module: meraki_facts.py
author: <NAME>, World Wide Technology
version_added: "1.0"
short_description: Locate devices in a Meraki Cloud Managed Network and return as facts
description:
- Gather facts about clients for the organization, network, and devices in a Meraki network.
requirements:
- Meraki_connector.py
options:
dashboard:
description:
- hostname of the dashboard, default is dashboard.meraki.com
required: False
apikey:
description:
- API key, for authentication and association with an organization.
required: True
timespan:
description:
- The timespan for which clients will be fetched. Must be at most one month and in seconds.
required: False
search_string:
description:
- Search for this string in client description and MAC, return all clients if not specified
required: False
'''
EXAMPLES = '''
Sample inventory file and execution from shell
[meraki_dashboard]
dashboard.meraki.com ansible_connection=local ansible_ssh_user=administrator
ansible -m meraki_facts meraki_dashboard -a 'apikey=<KEY>'
Sample playbook
- name: gather facts about the cloud
meraki_facts:
dashboard: "{{dashboard}}"
apikey: "{{meraki_params.apikey}}"
timespan: 1200
search_string: "WIZ"
'''
# ---------------------------------------------------------------------------
# MAIN
# ---------------------------------------------------------------------------
def main():
"Locate devices in a Meraki Cloud Managed Network and return as facts"
module = AnsibleModule(argument_spec = dict(
apikey = dict(required=True),
dashboard = dict(required=False),
search_string = dict(required=False),
timespan = dict(required=False)
),
check_invalid_arguments=False,
add_file_common_args=True)
# Import the Class
try:
import Meraki_Connector
except ImportError:
sys.path.append("/usr/share/ansible")
try:
import Meraki_Connector
HAS_LIB=True
except:
HAS_LIB=False
module.fail_json(msg="Import error of Meraki_Connector")
# Handle arguments (value is None if optional arguemtn not specified)
apikey = module.params["apikey"]
dashboard = module.params["dashboard"]
search_string = module.params["search_string"]
timespan = module.params["timespan"]
if dashboard:
meraki = Meraki_Connector.Connector(API_key=apikey, dashboard=dashboard)
else:
meraki = Meraki_Connector.Connector(API_key=apikey)
if timespan:
meraki.set_parameters(timespan=timespan)
if search_string:
meraki.set_parameters(search_string=search_string)
# Gather facts from the cloud
if meraki.locate_device():
module.exit_json(**meraki.result)
else:
module.fail_json(msg="%s %s" % (meraki.app_run_status, meraki.progress))
from ansible.module_utils.basic import *
main()
#
``` |
{
"source": "joelwright-dev/the-weird-world",
"score": 3
} |
#### File: joelwright-dev/the-weird-world/button.py
```python
import pygame
from text import Text
class Button(pygame.sprite.Sprite):
def __init__(self, text, text_color, button_color, surface, screen, size, pos, click):
super().__init__()
self.surface = surface
self.screen = screen
self.size = size
self.pos = pos
self.click = click
self.button_color = button_color
self.text = Text(text, text_color, self.surface, (pos[0]+1, pos[1]+1), self.size)
self.image = pygame.Surface((self.text.textobj.get_rect().width + 3, self.text.textobj.get_rect().height + 3))
self.rect = self.image.get_rect(center = pos)
def draw(self):
self.surface.blit(self.image, self.rect)
self.border = pygame.image.load('graphics/gui/buttons/button_border.png')
self.border_rect_left = self.border.get_rect(center = (self.pos[0]-(self.image.get_rect().width/2)-2, self.pos[1]))
self.border_rect_right = self.border.get_rect(center = ((self.pos[0])+(self.image.get_rect().width/2)+2, self.pos[1]))
self.image.fill(self.button_color)
self.rect = self.image.get_rect(center = self.pos)
self.text.draw()
self.surface.blit(self.border, self.border_rect_left)
self.surface.blit(pygame.transform.flip(self.border,True, True), self.border_rect_right)
def update(self):
self.draw()
mpos = list(pygame.mouse.get_pos())
ratio_x = (self.screen.get_rect().width / self.surface.get_rect().width)
ratio_y = (self.screen.get_rect().height / self.surface.get_rect().height)
scaled_pos = (mpos[0] / ratio_x, mpos[1] / ratio_y)
if self.rect.collidepoint(scaled_pos):
if self.click:
return True
```
#### File: joelwright-dev/the-weird-world/text.py
```python
import pygame
class Text(pygame.sprite.Sprite):
def __init__(self, text, color, surface, pos, size):
super().__init__()
font = pygame.font.Font('graphics/fonts/WayfarersToyBoxRegular-gxxER.ttf', size)
self.textobj = font.render(text, 1, color)
self.textrect = self.textobj.get_rect(center = pos)
self.surface = surface
def draw(self):
self.surface.blit(self.textobj, self.textrect)
```
#### File: joelwright-dev/the-weird-world/tiles.py
```python
import pygame
class Tile(pygame.sprite.Sprite):
def __init__(self, pos, size, type):
super().__init__()
self.image = pygame.Surface((size,size))
self.type = type
if type == 'C':
#CENTER
self.image = pygame.image.load('graphics/level/center.png')
elif type == 'X':
#PLATFORM MIDDLE
self.image = pygame.image.load('graphics/level/platformcenter.png')
elif type == 'M':
#RIGHT PLATFORM
self.image = pygame.image.load('graphics/level/platformright.png')
elif type == 'K':
#LEFT PLATFORM
self.image = pygame.image.load('graphics/level/platformleft.png')
elif type == 'T':
#TOP
self.image = pygame.image.load('graphics/level/edge.png')
elif type == 'H':
#TOP RIGHT
self.image = pygame.transform.rotate(pygame.image.load('graphics/level/corner.png'), -90)
elif type == 'I':
#TOP LEFT
self.image = pygame.image.load('graphics/level/corner.png')
elif type == 'R':
#RIGHT
self.image = pygame.transform.rotate(pygame.image.load('graphics/level/edge.png'), -90)
elif type == 'L':
#LEFT
self.image = pygame.transform.rotate(pygame.image.load('graphics/level/edge.png'), 90)
elif type == 'B':
#BOTTOM
self.image = pygame.transform.rotate(pygame.image.load('graphics/level/edge.png'), 180)
elif type == 'G':
#BOTTOM RIGHT
self.image = pygame.transform.rotate(pygame.image.load('graphics/level/corner.png'), 180)
elif type == 'J':
#BOTTOM LEFT
self.image = pygame.transform.rotate(pygame.image.load('graphics/level/corner.png'), 90)
elif type == 'N':
#BORDER
self.image = pygame.transform.rotate(pygame.image.load('graphics/level/border.png'), 180)
elif type == 'D':
#DRIP
self.image = pygame.image.load('graphics/level/drip.png')
elif type == 'F':
#FOLIAGE
self.image = pygame.image.load('graphics/level/foliage.png')
elif type == 'Q':
#GRASS
self.image = pygame.image.load('graphics/level/grass.png')
elif type == 'S':
#ROCK
self.image = pygame.image.load('graphics/level/rock.png')
self.rect = self.image.get_rect(topleft = pos)
def update(self, x_shift):
self.rect.x += x_shift
``` |
{
"source": "joelyancey/neuroglancer",
"score": 3
} |
#### File: python/examples/agglomeration_split_tool_csv_to_sqlite.py
```python
import pandas
import sqlite3
import neuroglancer.equivalence_map
import argparse
import numpy as np
def load_edges2(path, include_agglo_id=False):
edges = []
dtype = {'segment_a': np.uint64, 'segment_b': np.uint64, 'score': np.float64, 'x': np.int64, 'y': np.int64, 'z': np.int64}
if include_agglo_id:
dtype['agglo_id'] = np.uint64
df = pandas.read_csv(path, sep=',', dtype=dtype)
return df
def write_db(edges_csv_path, output_path, include_agglo_id=False):
print('Loading edges')
edges = load_edges2(edges_csv_path, include_agglo_id=include_agglo_id)
all_eqs = neuroglancer.equivalence_map.EquivalenceMap()
print('Creating equivalence map for agglomeration')
for a, b in edges[['segment_a', 'segment_b']].values:
all_eqs.union(a, b)
conn = sqlite3.connect(output_path)
c = conn.cursor()
c.execute('CREATE TABLE supervoxels (supervoxel_id INTEGER, agglo_id INTEGER)')
c.execute('CREATE INDEX supervoxels_by_supervoxel_id_index ON supervoxels (supervoxel_id)')
c.execute('CREATE INDEX supervoxels_by_agglo_id_index ON supervoxels (agglo_id)')
c.execute('CREATE TABLE edges (agglo_id INTEGER, segment_a INTEGER, segment_b INTEGER, score REAL, x INTEGER, y INTEGER, z INTEGER)')
c.execute('CREATE INDEX edges_by_agglo_id_index ON edges (agglo_id)')
print('Writing supervoxels table')
c.executemany('INSERT INTO supervoxels VALUES (?,?)',
((int(x), int(all_eqs[x])) for x in all_eqs.keys()))
print('Writing edges table')
c.executemany(
'INSERT INTO edges VALUES (?, ?, ?, ?, ?, ?, ?)',
((int(all_eqs[segment_a]), int(segment_a), int(segment_b), float(score), int(x), int(y), int(z))
for (segment_a, segment_b), score,
(x, y, z) in zip(edges[['segment_a', 'segment_b']].values, edges['score']
.values, edges[['x', 'y', 'z']].values)))
print('Committing')
conn.commit()
conn.close()
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('csv', help='Path to CSV file specifying edges.')
ap.add_argument('db', help='Output path to sqlite3 db.')
args = ap.parse_args()
write_db(args.csv, args.db)
```
#### File: python/examples/example_coordinate_arrays.py
```python
from __future__ import print_function
import argparse
import numpy as np
import neuroglancer
import neuroglancer.cli
def add_example_layers(state):
a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in a.shape[1:]], indexing='ij')
a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255
b = np.cast[np.uint32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))
b = np.pad(b, 1, 'constant')
dimensions = neuroglancer.CoordinateSpace(
names=['x', 'y', 'z', 'c'],
units=['nm', 'nm', 'nm', ''],
scales=[10, 10, 10, 1],
coordinate_arrays=[
None,
None,
None,
neuroglancer.CoordinateArray(labels=['red', 'green', 'blue']),
])
state.dimensions = dimensions
state.layers.append(
name='a',
layer=neuroglancer.LocalVolume(
data=a,
dimensions=neuroglancer.CoordinateSpace(
names=['c', 'x', 'y', 'z'],
units=['', 'nm', 'nm', 'nm'],
scales=[1, 10, 10, 10],
),
voxel_offset=(0, 20, 30, 15),
))
return a, b
if __name__ == '__main__':
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
a, b = add_example_layers(s)
print(viewer)
```
#### File: python/examples/example_toggle_visibility.py
```python
from __future__ import print_function
import argparse
import neuroglancer
import neuroglancer.cli
if __name__ == '__main__':
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
viewer = neuroglancer.Viewer()
def toggle_visibility(s):
with viewer.txn() as s:
if s.layers['a'].visible == True:
s.layers['a'].visible = False
print('Setting visibility to false')
else:
s.layers['a'].visible = True
print('Setting visibility to true')
viewer.actions.add('toggle-visibility', toggle_visibility)
with viewer.config_state.txn() as s:
s.input_event_bindings.viewer['keys'] = 'toggle-visibility'
with viewer.txn() as s:
s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y"], units="nm", scales=[1, 1])
s.position = [150, 150]
s.layers.append(
name="a",
layer=neuroglancer.LocalAnnotationLayer(
dimensions=s.dimensions,
annotations=[
neuroglancer.PointAnnotation(
id='1',
point=[150, 150],
),
],
shader='''
void main() {
setColor(prop_color());
setPointMarkerSize(prop_size());
}
''',
),
)
s.layout = 'xy'
s.selected_layer.layer = 'a'
print(viewer)
```
#### File: python/examples/flood_filling_simulation.py
```python
import argparse
import random
import time
import threading
import neuroglancer
import neuroglancer.cli
import cloudvolume
import zarr
import numpy as np
import scipy.ndimage
class InteractiveInference(object):
def __init__(self):
viewer = self.viewer = neuroglancer.Viewer()
self.gt_vol = cloudvolume.CloudVolume(
'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
mip=0,
bounded=True,
progress=False,
provenance={})
viewer.actions.add('start-fill', self._start_fill_action)
viewer.actions.add('stop-fill', self._stop_fill_action)
self.dimensions = neuroglancer.CoordinateSpace(
names=['x', 'y', 'z'],
units='nm',
scales=[8, 8, 8],
)
with viewer.config_state.txn() as s:
s.input_event_bindings.data_view['shift+mousedown0'] = 'start-fill'
s.input_event_bindings.data_view['keyt'] = 'stop-fill'
with viewer.txn() as s:
s.layers['image'] = neuroglancer.ImageLayer(
source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
)
s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
)
s.layers['ground_truth'].visible = False
self.flood_fill_event = None
def _do_flood_fill(self, initial_pos, inf_results, inf_volume, event):
initial_pos = (int(initial_pos[0]), int(initial_pos[1]), int(initial_pos[2]))
gt_vol_zarr = zarr.zeros(
self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint64)
gt_blocks_seen = set()
block_size = np.array((64, 64, 64), np.int64)
def fetch_gt_block(block):
spos = block * block_size
epos = spos + block_size
slice_expr = np.s_[int(spos[0]):int(epos[0]),
int(spos[1]):int(epos[1]),
int(spos[2]):int(epos[2])]
gt_data = self.gt_vol[slice_expr][..., 0]
gt_vol_zarr[slice_expr] = gt_data
def get_patch(spos, epos):
spos = np.array(spos)
epos = np.array(epos)
sblock = spos // block_size
eblock = (epos - 1) // block_size
for blockoff in np.ndindex(tuple(eblock - sblock + 1)):
block = np.array(blockoff) + sblock
block_tuple = tuple(block)
if block_tuple in gt_blocks_seen: continue
gt_blocks_seen.add(block_tuple)
fetch_gt_block(block)
slice_expr = np.s_[int(spos[0]):int(epos[0]),
int(spos[1]):int(epos[1]),
int(spos[2]):int(epos[2])]
result = gt_vol_zarr[slice_expr]
return result
segment_id = self.gt_vol[initial_pos][0]
patch_size = np.array((33, ) * 3, np.int64)
lower_bound = patch_size // 2
upper_bound = np.array(self.gt_vol.bounds.to_list()[3:]) - patch_size + patch_size // 2
d = 8
seen = set()
q = []
last_invalidate = [time.time()]
invalidate_interval = 3
def enqueue(pos):
if np.any(pos < lower_bound) or np.any(pos >= upper_bound): return
if pos in seen: return
seen.add(pos)
q.append(pos)
def update_view():
if event.is_set():
return
cur_time = time.time()
if cur_time < last_invalidate[0] + invalidate_interval:
return
last_invalidate[0] = cur_time
inf_volume.invalidate()
with self.viewer.txn() as s:
s.layers['points'].annotations = [
neuroglancer.PointAnnotation(id=repr(pos), point=pos) for pos in list(seen)
]
def process_pos(pos):
spos = pos - patch_size // 2
epos = spos + patch_size
slice_expr = np.s_[int(spos[0]):int(epos[0]),
int(spos[1]):int(epos[1]),
int(spos[2]):int(epos[2])]
gt_data = get_patch(spos, epos)
mask = gt_data == segment_id
for offset in ((0, 0, d), (0, 0, -d), (0, d, 0), (0, -d, 0), (d, 0, 0), (-d, 0, 0)):
if not mask[tuple(patch_size // 2 + offset)[::-1]]: continue
new_pos = np.array(pos) + np.array(offset)
enqueue(tuple(new_pos))
dist_transform = scipy.ndimage.morphology.distance_transform_edt(~mask)
inf_results[slice_expr] = 1 + np.cast[np.uint8](
np.minimum(dist_transform, 5) / 5.0 * 254)
self.viewer.defer_callback(update_view)
enqueue(initial_pos)
while len(q) > 0 and not event.is_set():
i = random.randint(0, len(q) - 1)
pos = q[i]
q[i] = q[-1]
del q[-1]
process_pos(pos)
self.viewer.defer_callback(update_view)
def _stop_flood_fill(self):
if self.flood_fill_event is not None:
self.flood_fill_event.set()
self.flood_fill_event = None
def _start_flood_fill(self, pos):
self._stop_flood_fill()
inf_results = zarr.zeros(
self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint8)
inf_volume = neuroglancer.LocalVolume(
data=inf_results, dimensions=self.dimensions)
with self.viewer.txn() as s:
s.layers['points'] = neuroglancer.LocalAnnotationLayer(self.dimensions)
s.layers['inference'] = neuroglancer.ImageLayer(
source=inf_volume,
shader='''
void main() {
float v = toNormalized(getDataValue(0));
vec4 rgba = vec4(0,0,0,0);
if (v != 0.0) {
rgba = vec4(colormapJet(v), 1.0);
}
emitRGBA(rgba);
}
''',
)
self.flood_fill_event = threading.Event()
t = threading.Thread(
target=self._do_flood_fill,
kwargs=dict(
initial_pos=pos,
inf_results=inf_results,
inf_volume=inf_volume,
event=self.flood_fill_event,
))
t.daemon = True
t.start()
def _start_fill_action(self, action_state):
pos = action_state.mouse_voxel_coordinates
if pos is None:
return
self._start_flood_fill(pos)
def _stop_fill_action(self, action_state):
self._stop_flood_fill()
if __name__ == '__main__':
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
inf = InteractiveInference()
print(inf.viewer)
while True:
time.sleep(1000)
```
#### File: python/neuroglancer/screenshot.py
```python
from __future__ import absolute_import
import os
class ScreenshotSaver(object):
def __init__(self, viewer, directory):
self.viewer = viewer
self.directory = directory
if not os.path.exists(directory):
os.makedirs(directory)
self.index = 0
def get_path(self, index):
return os.path.join(self.directory, '%07d.png' % index)
def get_next_path(self, index=None):
if index is None:
index = self.index
return index, self.get_path(index)
def capture(self, index=None):
s = self.viewer.screenshot()
increment_index = index is None
index, path = self.get_next_path(index)
with open(path, 'wb') as f:
f.write(s.screenshot.image)
if increment_index:
self.index += 1
return index, path
```
#### File: neuroglancer/tool/filter_bodies.py
```python
from __future__ import division
import json
import os
import copy
import collections
import argparse
import csv
import neuroglancer
import neuroglancer.cli
import numpy as np
class State(object):
def __init__(self, path):
self.path = path
self.body_labels = collections.OrderedDict()
def load(self):
if os.path.exists(self.path):
with open(self.path, 'r') as f:
self.body_labels = collections.OrderedDict(json.load(f))
def save(self):
tmp_path = self.path + '.tmp'
with open(tmp_path, 'w') as f:
f.write(json.dumps(self.body_labels.items()))
os.rename(tmp_path, self.path)
Body = collections.namedtuple('Body', ['segment_id', 'num_voxels', 'bbox_start', 'bbox_size'])
class Tool(object):
def __init__(self, state_path, bodies, labels, segmentation_url, image_url, num_to_prefetch):
self.state = State(state_path)
self.num_to_prefetch = num_to_prefetch
self.viewer = neuroglancer.Viewer()
self.bodies = bodies
self.state.load()
self.total_voxels = sum(x.num_voxels for x in bodies)
self.cumulative_voxels = np.cumsum([x.num_voxels for x in bodies])
with self.viewer.txn() as s:
s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
s.layers['segmentation'] = neuroglancer.SegmentationLayer(source=segmentation_url)
s.show_slices = False
s.concurrent_downloads = 256
s.gpu_memory_limit = 2 * 1024 * 1024 * 1024
s.layout = '3d'
key_bindings = [
['bracketleft', 'prev-index'],
['bracketright', 'next-index'],
['home', 'first-index'],
['end', 'last-index'],
['control+keys', 'save'],
]
label_keys = ['keyd', 'keyf', 'keyg', 'keyh']
for label, label_key in zip(labels, label_keys):
key_bindings.append([label_key, 'label-%s' % label])
def label_func(s, label=label):
self.set_label(s, label)
self.viewer.actions.add('label-%s' % label, label_func)
self.viewer.actions.add('prev-index', self._prev_index)
self.viewer.actions.add('next-index', self._next_index)
self.viewer.actions.add('first-index', self._first_index)
self.viewer.actions.add('last-index', self._last_index)
self.viewer.actions.add('save', self.save)
with self.viewer.config_state.txn() as s:
for key, command in key_bindings:
s.input_event_bindings.viewer[key] = command
s.status_messages['help'] = ('KEYS: ' + ' | '.join('%s=%s' % (key, command)
for key, command in key_bindings))
self.index = -1
self.set_index(self._find_one_after_last_labeled_index())
def _find_one_after_last_labeled_index(self):
body_index = 0
while self.bodies[body_index].segment_id in self.state.body_labels:
body_index += 1
return body_index
def set_index(self, index):
if index == self.index:
return
body = self.bodies[index]
self.index = index
def modify_state_for_body(s, body):
s.layers['segmentation'].segments = frozenset([body.segment_id])
s.voxel_coordinates = body.bbox_start + body.bbox_size // 2
with self.viewer.txn() as s:
modify_state_for_body(s, body)
prefetch_states = []
for i in range(self.num_to_prefetch):
prefetch_index = self.index + i + 1
if prefetch_index >= len(self.bodies):
break
prefetch_state = copy.deepcopy(self.viewer.state)
prefetch_state.layout = '3d'
modify_state_for_body(prefetch_state, self.bodies[prefetch_index])
prefetch_states.append(prefetch_state)
with self.viewer.config_state.txn() as s:
s.prefetch = [
neuroglancer.PrefetchState(state=prefetch_state, priority=-i)
for i, prefetch_state in enumerate(prefetch_states)
]
label = self.state.body_labels.get(body.segment_id, '')
with self.viewer.config_state.txn() as s:
s.status_messages['status'] = (
'[Segment %d/%d : %d/%d voxels labeled = %.3f fraction] label=%s' %
(index, len(self.bodies), self.cumulative_voxels[index], self.total_voxels,
self.cumulative_voxels[index] / self.total_voxels, label))
def save(self, s):
self.state.save()
def set_label(self, s, label):
self.state.body_labels[self.bodies[self.index].segment_id] = label
self.set_index(self.index + 1)
def _first_index(self, s):
self.set_index(0)
def _last_index(self, s):
self.set_index(max(0, self._find_one_after_last_labeled_index() - 1))
def _next_index(self, s):
self.set_index(self.index + 1)
def _prev_index(self, s):
self.set_index(max(0, self.index - 1))
if __name__ == '__main__':
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
ap.add_argument('--image-url', required=True, help='Neuroglancer data source URL for image')
ap.add_argument('--segmentation-url',
required=True,
help='Neuroglancer data source URL for segmentation')
ap.add_argument('--state', required=True, help='Path to proofreading state file')
ap.add_argument('--bodies', required=True, help='Path to list of bodies to proofread')
ap.add_argument('--labels', nargs='+', help='Labels to use')
ap.add_argument('--prefetch', type=int, default=10, help='Number of bodies to prefetch')
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
bodies = []
with open(args.bodies, 'r') as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
bodies.append(
Body(
segment_id=int(row['id']),
num_voxels=int(row['num_voxels']),
bbox_start=np.array([
int(row['bbox.start.x']),
int(row['bbox.start.y']),
int(row['bbox.start.z'])
],
dtype=np.int64),
bbox_size=np.array(
[int(row['bbox.size.x']),
int(row['bbox.size.y']),
int(row['bbox.size.z'])],
dtype=np.int64),
))
tool = Tool(
state_path=args.state,
image_url=args.image_url,
segmentation_url=args.segmentation_url,
labels=args.labels,
bodies=bodies,
num_to_prefetch=args.prefetch,
)
print(tool.viewer)
```
#### File: python/tests/viewer_state_roundtrip_test.py
```python
import numpy as np
import neuroglancer
import threading
import pytest
def test_mesh_silhouette(webdriver):
with webdriver.viewer.txn() as s:
s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
units="nm",
scales=[1, 1, 1])
s.layers.append(
name='a',
layer=neuroglancer.SegmentationLayer(source=neuroglancer.LocalVolume(
data=np.zeros((10, 10, 10), dtype=np.uint8), dimensions=s.dimensions),
mesh_silhouette_rendering=2),
)
state = webdriver.sync()
assert state.layers['a'].mesh_silhouette_rendering == 2
def test_layer_subsources(webdriver):
with webdriver.viewer.txn() as s:
s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
units="nm",
scales=[1, 1, 1])
s.layers.append(
name='a',
layer=neuroglancer.SegmentationLayer(
source=neuroglancer.LayerDataSource(url=neuroglancer.LocalVolume(
data=np.zeros((10, 10, 10), dtype=np.uint8), dimensions=s.dimensions),
enable_default_subsources=False,
subsources={
'default': True,
'bounds': False,
'meshes': False
})),
)
state = webdriver.sync()
assert state.layers['a'].source[0].subsources['default'].enabled == True
assert 'bounds' not in state.layers['a'].source[0].subsources
assert 'meshes' not in state.layers['a'].source[0].subsources
assert state.layers['a'].source[0].enable_default_subsources == False
with webdriver.viewer.txn() as s:
s.layers[0].source[0].enable_default_subsources = True
s.layers[0].source[0].subsources['bounds'] = False
s.layers[0].source[0].subsources['meshes'] = False
state = webdriver.sync()
assert state.layers[0].source[0].enable_default_subsources == True
assert sorted(state.layers[0].source[0].subsources.keys()) == ['bounds', 'meshes']
assert state.layers[0].source[0].subsources['bounds'].enabled == False
assert state.layers[0].source[0].subsources['meshes'].enabled == False
``` |
{
"source": "joelYing/Graduation-Design",
"score": 3
} |
#### File: Graduation-Design/5i5j/wiwj.py
```python
import re
from random import choice
import requests
class Wiwj(object):
def __init__(self):
"""
75163套 按每页30个 共有2506页
"""
self.start_url = 'https://sh.5i5j.com/ershoufang/'
# self.proxies = {"http": "http://localhost:1080", "https": "http://localhost:1080", }
def jpg_tool(self, text):
house_jpg = ''
if 'src' in text:
house_jpg = re.findall(r'src="(.*?)".*?', text, re.S)[0]
return house_jpg
def div_tool(self, text):
text = re.sub('<.*?>| i |<!--|-->', ' ', text)
return text
@staticmethod
def getadsl(res):
""" 随机取ip """
proxies = {"http": "http://" + choice(res['data']), }
# print(proxies)
return proxies
def gethouselist(self):
s = requests.session()
res = s.get('http://', headers={}).json()
r = s.get(self.start_url, proxies=self.getadsl(res))
print(r.text)
# 二手房url,封面图片(如果有src在内则有图片),标题,第一行,第二行,第三行,总价,单价,标签
basic_info_list = re.findall(r'<div class="listImg".*?><a href="(.*?)" target="_blank">.*?<img class='
r'"lazy" (.*?)title="(.*?)".*?>.*?<!-- <p>.*?</p> -->.*?<i class="i_01">'
r'</i>(.*?)</p>.*?<i class="i_02"></i>(.*?)</p>.*?<i class="i_03"></i>(.*?)</p>'
r'.*?<p class="redC">(.*?)</p>.*?<p>.*?(\d+).*?</p>.*?<div class="listTag">(.*?)<'
r'/div>', r.text, re.S)
if basic_info_list:
for basic_info in basic_info_list:
# print(basic_info)
house_url = 'https://sh.5i5j.com' + basic_info[0]
house_jpg = self.jpg_tool(basic_info[1])
house_title = basic_info[2]
first_line = basic_info[3].split(" · ")
# house_type = first_line[0]
# house_m2 = first_line[1]
# house_direction = first_line[2]
second_line = basic_info[4].split(" · ")
for i in range(0, len(second_line)):
second_line[i] = self.div_tool(second_line[i])
third_line = basic_info[5].split(" · ")
house_price = self.div_tool(basic_info[6])
house_m2_price = basic_info[7]
house_tag = self.div_tool(basic_info[8])
# print(second_line)
print(house_url, house_jpg, house_title, first_line, second_line, third_line, house_price, house_m2_price, house_tag)
if __name__ == '__main__':
wiwj = Wiwj()
wiwj.gethouselist()
``` |
{
"source": "Joemag/rl-course",
"score": 3
} |
#### File: rl-course/ex06-plan/ex06-plan.py
```python
import gym
import copy
import random
import numpy as np
import matplotlib.pyplot as plt
import cProfile, pstats, io
from pstats import SortKey
class Node:
def __init__(self, parent=None, action=None):
self.parent = parent # parent of this node
self.action = action # action leading from parent to this node
self.children = []
self.sum_value = 0. # sum of values observed for this node, use sum_value/visits for the mean
self.visits = 0
def rollout(env, maxsteps=100):
""" Random policy for rollouts """
G = 0
for i in range(maxsteps):
action = env.action_space.sample()
_, reward, terminal, _ = env.step(action)
G += reward
if terminal:
return G
return G
longest_path_plot = []
longest_path = 0
def mcts(env, root, maxiter=500):
""" TODO: Use this function as a starting point for implementing Monte Carlo Tree Search
"""
# this is an example of how to add nodes to the root for all possible actions:
#root.children = [Node(root, a) for a in range(env.action_space.n)]
global longest_path_plot
global longest_path
for i in range(maxiter):
# state = copy.deepcopy(env)
state = gym.make("Taxi-v3")
state.reset()
state.env.s = env.env.s
state.env.lastaction = env.env.lastaction
G = 0.
terminal = False
path_length = 0
# traverse the tree using an epsilon greedy tree policy
node = root
while len(node.children) > 0:
if np.random.random() < 0.1:
node = random.choice(node.children)
else:
values = [c.sum_value/(c.visits+0.0000001) for c in node.children] # calculate values for child actions
node = node.children[np.argmax(values)] # select the best child
path_length +=1
_, reward, terminal, _ = state.step(node.action)
G += reward
if path_length > longest_path:
longest_path = path_length
longest_path_plot.append(longest_path)
# Expansion of tree
if not terminal:
# add nodes to the root for all possible actions:
node.children = [Node(node, a) for a in range(env.action_space.n)]
# This performs a rollout (Simulation):
if not terminal:
G += rollout(state)
# update all visited nodes in the tree
while node != None:
node.visits += 1
node.sum_value += G
node = node.parent
def main():
global longest_path
global longest_path_plot
env = gym.make("Taxi-v3")
env.seed(0) # use seed to make results better comparable
# run the algorithm 10 times:
rewards = []
for i in range(10):
longest_path_plot = []
mean_return = []
longest_path = 0
env.reset()
terminal = False
root = Node() # Initialize empty tree
sum_reward = 0.
while not terminal:
env.render()
mcts(env, root) # expand tree from root node using mcts
mean_return.append(root.sum_value/root.visits)
values = [c.sum_value/c.visits for c in root.children] # calculate values for child actions
bestchild = root.children[np.argmax(values)] # select the best child
_, reward, terminal, _ = env.step(bestchild.action) # perform action for child
root = bestchild # use the best child as next root
root.parent = None
longest_path -= 1
sum_reward += reward
rewards.append(sum_reward)
print("finished run " + str(i+1) + " with reward: " + str(sum_reward))
# plot the mean return over the number of episodes.
plt.subplot(2, 1, 1)
plt.plot(range(len(mean_return)), mean_return)
plt.xlabel("episode")
plt.ylabel("mean return")
# plot the length of the longest path over the number of iterations.
plt.subplot(2, 1, 2)
plt.plot(range(len(longest_path_plot)), longest_path_plot)
plt.xlabel("iteration")
plt.ylabel("longest path")
plt.show()
print("mean reward: ", np.mean(rewards))
if __name__ == "__main__":
main()
```
#### File: rl-course/ex08-nstep/ex08-nstep.py
```python
import gym
import numpy as np
import matplotlib.pyplot as plt
def nstep_sarsa(env, n=1, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):
""" TODO: implement the n-step sarsa algorithm """
Q = np.zeros((env.observation_space.n, env.action_space.n))
def choose_epsilon_greedy_action(state):
if np.random.random() < epsilon:
return np.random.randint(env.action_space.n)
else:
#return np.argmax(Q[state,:])
return np.random.choice(np.flatnonzero(Q[state,:] == np.max(Q[state,:])))
episode_returns = []
for episode in range(num_ep):
S = [env.reset()]
A = [choose_epsilon_greedy_action(S[0])]
R = [0]
T = np.inf
t = 0
while True:
if t < T:
s, r, done, _ = env.step(A[t])
S.append(s)
R.append(r)
if done:
T = t + 1
else:
A.append(choose_epsilon_greedy_action(s))
theta = t - n + 1
if theta >= 0:
s_theta = S[theta]
a_theta = A[theta]
G = 0
for i in range(theta+1, int(min(theta + n, T)+1)):
G += gamma ** (i - theta - 1) * R[i]
if theta + n < T:
G = G + (gamma**n) * Q[S[theta+n], A[theta+n]]
Q[s_theta,a_theta] = Q[s_theta,a_theta] + alpha*(G - Q[s_theta,a_theta])
if theta == T - 1:
break
t += 1
episode_returns.append(sum(R))
return np.mean(episode_returns)
# # play
# for i in range(1):
# done = False
# s = env.reset()
# while not done:
# env.render()
# a = np.argmax(Q[s,:])
# s, r, done, _ = env.step(a)
env=gym.make('FrozenLake-v0', map_name="8x8")
# TODO: run multiple times, evaluate the performance for different n and alpha
for n in [1,2,4,8,16]:#,32,64,128,256,512]:
alphas = np.linspace(0,1,10)
performances = []
for alpha in alphas:
performances.append(np.mean([nstep_sarsa(env,n,alpha,num_ep=100) for i in range(100)]))
plt.plot(alphas, performances, label=f"n={n}")
plt.legend()
plt.show()
``` |
{
"source": "joemarch010/NILBS",
"score": 2
} |
#### File: NiLBS/demo/scene.py
```python
import pyrender
import trimesh
import numpy as np
import os
from NiLBS.body.human_body import HumanBody
from NiLBS.occupancy.occupancy_function_mesh import OccupancyFunctionMesh
from NiLBS.occupancy.occupancy_function_rest_cached import OccupancyFunctionRestCached
from NiLBS.occupancy.voxel.util import extract_voxel_grid
from NiLBS.sampling.pose_sampler_amass import PoseSamplerAMASS
from NiLBS.skinning.skinning_mesh_lbs import LBSDeformer
from NiLBS.weighting.weighting_function import WeightingFunction
from NiLBS.weighting.weighting_function_pointwise import WeightingFunctionPointwise
from NiLBS.weighting.weighting_function_nearest_neighbour import WeightingFunctionNearestNeighbour
default_bm_path = os.path.join(os.path.dirname(__file__), '../data/female_body_model_smplh.npz')
default_pose_path = os.path.join(os.path.dirname(__file__), '../data/amass_mpi_sample_poses.npz')
body_model = HumanBody(body_dict_path=default_bm_path, active_bones=range(0, 22))
pose_sampler = PoseSamplerAMASS(body_model, np.load(default_pose_path))
poses = pose_sampler.sample_frames(10, step=50)
pose = poses[2]
def make_default_model_pose_scene():
"""
`
:return:
"""
result = pyrender.Scene()
weighting_function = WeightingFunctionPointwise(body_model.vertex_template, body_model.weights)
mesh_deformer = LBSDeformer(weighting_function)
deformed_vertices = mesh_deformer.apply_lbs(body_model.vertex_template, pose)
trimesh_mesh = trimesh.Trimesh(vertices=deformed_vertices, faces=body_model.faces,
vertex_colors=np.tile((1.0, 1.0, 0.4), (6890, 1)))
mesh = pyrender.Mesh.from_trimesh(trimesh_mesh)
result.add(mesh)
camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0)
camera_pose = np.eye(4)
camera_pose[:3, 3] = np.array([0, 0, 2.5])
result.add(camera, pose=camera_pose)
return result
def make_default_joint_view_scene():
"""
`
:return:
"""
result = pyrender.Scene()
for i in range(0, pose.bone_matrices.shape[0]):
joint = body_model.joints[i]
joint_homo = np.zeros((4, 1))
joint_homo[0] = joint[0]
joint_homo[1] = joint[1]
joint_homo[2] = joint[2]
joint_trans = np.matmul(pose.bone_matrices[i], joint_homo)
joint_pos = joint_trans[0:3, 0]
trimesh_mesh = trimesh.creation.box(np.array([0.1, 0.1, 0.1]),
trimesh.transformations.translation_matrix(joint_pos),
vertex_colors=np.tile((1.0, 0, 0), (8, 1)))
box_mesh = pyrender.Mesh.from_trimesh(trimesh_mesh)
result.add(box_mesh)
camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0)
camera_pose = np.eye(4)
camera_pose[:3, 3] = np.array([0, 0, 2.5])
result.add(camera, pose=camera_pose)
return result
def make_default_nilbs_scene():
result = pyrender.Scene()
posed_joints = np.zeros((pose.bone_matrices.shape[0], 3))
for i in range(0, pose.bone_matrices.shape[0]):
joint = body_model.joints[i]
joint_homo = np.zeros((4, 1))
joint_homo[0] = joint[0]
joint_homo[1] = joint[1]
joint_homo[2] = joint[2]
joint_trans = np.matmul(pose.bone_matrices[i], joint_homo)
joint_pos = joint_trans[0:3, 0]
posed_joints[i] = joint_pos
weighting_function = WeightingFunctionNearestNeighbour(posed_joints)
rest_trimesh_mesh = trimesh.Trimesh(vertices=body_model.vertex_template, faces=body_model.faces,
vertex_colors=np.tile((1.0, 1.0, 0.4), (6890, 1)))
rest_occupancy_function = OccupancyFunctionMesh(rest_trimesh_mesh)
posed_occupancy_function = OccupancyFunctionRestCached(rest_occupancy_function, pose, weighting_function)
voxel_dict = extract_voxel_grid(posed_occupancy_function, rest_trimesh_mesh.bounds, np.array([32, 32, 32]))
voxel_grid = voxel_dict['voxel_grid']
voxel_start = voxel_dict['voxel_start']
voxel_dimensions = voxel_dict['voxel_dimensions']
for i in range(0, voxel_grid.shape[0]):
for j in range(0, voxel_grid.shape[1]):
for k in range(0, voxel_grid.shape[2]):
if voxel_grid[i][j][k] > 0.5:
voxel_position = voxel_start + voxel_dimensions * np.array([i, j, k])
trimesh_mesh = trimesh.creation.box(voxel_dimensions,
trimesh.transformations.translation_matrix(voxel_position),
vertex_colors=np.tile((1.0, 1.0, 0.4), (8, 1)))
box_mesh = pyrender.Mesh.from_trimesh(trimesh_mesh)
result.add(box_mesh)
camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0)
camera_pose = np.eye(4)
camera_pose[:3, 3] = np.array([0, 0, 2.5])
result.add(camera, pose=camera_pose)
return result
```
#### File: NiLBS/mesh/mesh_occupancy.py
```python
import numpy as np
from trimesh.voxel.ops import points_to_marching_cubes
from trimesh.voxel.ops import points_to_indices
class MeshOccupancy:
"""
Mesh which is backed by an occupancy function.
"""
def __init__(self, occupancy_function, iso_level, bounds, resolution = [64, 64, 64]):
self.occupancy_function = occupancy_function
self.iso_level = iso_level
self.bounds = bounds
self.lower = bounds[0]
self.upper = bounds[1]
self.resolution = resolution
self.mesh = None
self.calculate_voxels()
def calculate_voxel_matrix(self, max):
"""
:param max:
:return:
"""
return
def calculate_voxels(self):
x_range = np.linspace(self.lower[0], self.upper[0], self.resolution[0])
y_range = np.linspace(self.lower[1], self.upper[1], self.resolution[1])
z_range = np.linspace(self.lower[2], self.upper[2], self.resolution[2])
xx, yy, zz = np.meshgrid(x_range, y_range, z_range)
xx = xx.flatten()
yy = yy.flatten()
zz = zz.flatten()
points = np.array([xx, yy, zz]).reshape((xx.shape[0], 3))
occupancy_mask = self.occupancy_function.evaluate_set(points)
inside_points = points[occupancy_mask > self.iso_level]
indices = points_to_indices(inside_points, pitch=1.0, origin=np.array([0.0, 0.0, 0.0]))
self.mesh = points_to_marching_cubes(inside_points * 32, pitch=1.0)
```
#### File: NiLBS/occupancy/occupancy_function_rest_cached.py
```python
import numpy as np
from NiLBS.skinning.skinning_mesh_lbs import LBSDeformer
from NiLBS.occupancy.occupancy_function import OccupancyFunction
class OccupancyFunctionRestCached(OccupancyFunction):
"""
Occupancy function backed by an OccupancyFunction at rest, a Pose, and a WeightingFunction, as described in the LiLBS
technical report.
"""
def __init__(self, rest_occupancy_function, pose, weighting_function):
self.rest_occupancy_function = rest_occupancy_function
self.pose = pose
self.weighting_function = weighting_function
self.lbs_deformer = LBSDeformer(weighting_function)
def evaluate(self, x):
weights = self.weighting_function.evaluate(x, self.pose)
x_reproj = self.lbs_deformer.invert_lbs(np.array([x]), self.pose)
return (1 - weights[weights.shape[0] - 1]) * self.rest_occupancy_function.evaluate(x_reproj)
def evaluate_set(self, X):
"""
Evaluate the function at a set of points
:param X: {(x, y, z)} (Numpy array-like)
:return: {o | o in [0, 1]}
"""
weight_set = self.weighting_function.evaluate_set(X, self.pose)
X_reproj = self.lbs_deformer.invert_lbs(X, self.pose)
E = self.rest_occupancy_function.evaluate_set(X_reproj)
result = (1 - weight_set[:, weight_set.shape[1] - 1]) * E
return result
```
#### File: NiLBS/sampling/point_sampler_surface.py
```python
import numpy as np
class PointSamplerSurface:
"""
Class which generates points sampled from locations on the surfaces of a mesh.
"""
def __init__(self, mesh, distribution='uniform', noise='none', sigma=0):
"""
:param mesh: Mesh, must have instance variables 'vertices' and 'faces'
:param distribution: String, must be one of {'uniform', 'poisson'}
:param noise: String, must be one of {'none', 'isotropic'}
:param sigma: Float, used when calculating noise
"""
self.mesh = mesh
self.distribution = distribution
self.noise = noise
self.sigma = sigma
def generate_isotropic_noise_samples(self, n):
result = np.random.normal(0, self.sigma, (n, 3))
return result
def generate_noise_samples(self, n):
if self.noise == 'isotropic':
return self.generate_isotropic_noise_samples(n)
else:
return np.zeros((n, 3))
def sample_points_uniform(self, n):
result = np.zeros((n, 3))
for i in range(0, n):
tri_index = np.random.randint(0, self.mesh.triangles.shape[0])
c1 = np.random.uniform(0.0, 1.0)
c2 = np.random.uniform(0.0, 1.0)
c3 = np.random.uniform(0.0, 1.0)
v1 = self.mesh.triangles[tri_index][0]
v2 = self.mesh.triangles[tri_index][1]
v3 = self.mesh.triangles[tri_index][2]
v = c1 * v1 + c2 * v2 + c3 *v3
result[i] = v
return result
def sample_points(self, n):
samples = None
if self.distribution == 'uniform':
samples = self.sample_points_uniform(n)
else:
samples = np.zeros((n, 3))
noise = self.generate_noise_samples(n)
return samples + noise
```
#### File: NiLBS/weighting/weighting_function_mlp.py
```python
from NiLBS.weighting.weighting_function import WeightingFunction
class WeightingFunctionMLP(WeightingFunction):
"""
Weighting function backed by an MLP
"""
def __init__(self, mlp):
self.mlp = mlp
def generate_query(self, x, pose):
return None
def generate_query_set(self, X, pose):
return None
def evaluate(self, x, pose):
return None
def evaluate_set(self, X, pose):
return None
```
#### File: NiLBS/weighting/weighting_function_mlp_rest_naive.py
```python
from NiLBS.weighting.weighting_function import WeightingFunction
import numpy as np
import tensorflow.keras as keras
class WeightingFunctionMLPRestNaive(WeightingFunction):
"""
Weighting function backed by a Keras regression network.
Queries are performed using the naive pose encoding.
"""
def __init__(self, model=None, model_path=None):
if model_path is not None:
self.model = keras.models.load_model(model_path)
else:
self.model = model
def generate_query(self, x, pose):
return x
def generate_query_set(self, X, pose):
return X
def evaluate(self, x, pose):
query = self.generate_query(x, pose)
return self.model.predict(np.array([query]))[0]
def evaluate_set(self, X, pose):
query_set = self.generate_query_set(X, pose)
return self.model.predict(query_set)
```
#### File: NiLBS/weighting/weighting_function_pointwise.py
```python
import numpy as np
from NiLBS.weighting.weighting_function import WeightingFunction
class WeightingFunctionPointwise(WeightingFunction):
"""
Weighting function which is defined point-wise at the vertices as the artist's defined vertex weights
and (1, 0, ..., 0) everywhere else.
"""
def __init__(self, vertices, weights):
"""
:param vertices: Numpy array-like, Vx3
:param weights: Numpy array-like, VxB
"""
self.point_map = dict()
self.n_weights = weights.shape[1]
self.n_bones = weights.shape[1]
for i in range(0, vertices.shape[0]):
self.point_map[vertices[i].data.tobytes()] = weights[i]
def generate_query(self, x, pose):
return x
def generate_query_set(self, X, pose):
return X
def evaluate(self, x, pose):
if x.data.tobytes() in self.point_map:
return self.point_map[x.data.tobytes()]
result = np.zeros((self.n_weights))
result[0] = 1
return result
def evaluate_set(self, X, pose):
result = np.zeros((X.shape[0], self.n_bones))
for i in range(0, X.shape[0]):
result[i] = self.evaluate(X[i], pose)
return result
``` |
{
"source": "joemarchese/PolyNanna",
"score": 3
} |
#### File: joemarchese/PolyNanna/test.py
```python
import unittest
import polynanna
import seed_db
from participants import participants, history
from pony.orm import *
class TestPolynanna(unittest.TestCase):
def setUp(self):
self.polyanna = polynanna.main()
def tearDown(self):
del self.polyanna
def test_drawing_validity(self):
for p in self.polyanna._participants:
with self.subTest(p=p):
self.assertIsNotNone(p.giving_to,
msg='giving_to not assigned for {}'.format(p.name))
self.assertNotIn(p.giving_to, p.restricted_set,
msg='Invalid {} giving_to'.format(p.name))
def test_restricted_sets(self):
"""Test that restricted sets are equal to the intersections of restricted_set and history sets."""
for p in self.polyanna._participants:
r_set = set(participants.get(p.name))|set([y[1] for y in history.get(p.name)])
with self.subTest(p=p):
self.assertEqual(p.restricted_set, r_set)
def test_participant_length(self):
"""Assert that participants equals the length of the input participants"""
self.assertEqual(len(self.polyanna._participants), len(participants.keys()))
def test_avg_failcount(self):
"""Find the failure rate of the selection algorithm."""
total_fails = 0
for drawing in range(1000):
total_fails += polynanna.main().failcount
print('Average Failcount: {}'.format(total_fails / 1000))
@unittest.skip('Skip Restricted Set Printing')
def test_print_all_restricted_set(self):
"""Print restricted sets for each participant"""
for p in self.polyanna._participants:
print(p.name, 'restricted_set:', p.restricted_set)
@unittest.skip('Skip Results Printing')
def test_print_results(self):
"""Print drawing results to the console."""
for participant in self.polyanna._participants:
print('{:<9} --> {}'.format(participant.name, participant.giving_to))
class TestDatabase(unittest.TestCase):
def setUp(self):
"""Note this runs a drawing automatically."""
self.polyanna = polynanna.main()
seed_db.main()
db = Database()
db.bind(provider='sqlite', filename='participants.db')
db.generate_mapping(create_tables=True)
def tearDown(self):
del self.polyanna
@db_session
def test_print_database(self):
"""Verify the database has only as many entries as the polyanna. Print the drawing results."""
self.assertEqual(len(self.polyanna._participants), len(seed_db.Participant.select()),
msg='Number of Participants is not equal to the number of Database Entries')
print('participants.db Results')
print('{:<9} ||| {} \n'.format('Name', 'Giving To'))
for p in seed_db.Participant.select():
print('{:<9} --> {} \n'.format(p.name, p.giving_to), end='')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joemarchese/transactions",
"score": 3
} |
#### File: joemarchese/transactions/app.py
```python
from flask import Flask, render_template, url_for, request, redirect
from models import db, Transaction
from forms import AddTransactionForm, DeleteTransactionForm
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///transactions.db'
db.init_app(app)
@app.route('/')
def index():
transactions = Transaction.query.all()
return render_template('index.html', transactions=transactions)
@app.route('/add', methods=['GET', 'POST'])
def add():
form = AddTransactionForm(request.form)
if request.method == 'POST' and form.validate():
with app.app_context():
transaction = Transaction(date=form.date.data,
kind=form.kind.data,
category=form.category.data,
sub_category=form.sub_category.data,
description=form.description.data,
amount=form.amount.data)
db.session.add(transaction)
db.session.commit()
return redirect(url_for('index'))
return render_template('add.html', form=form)
@app.route('/delete', methods=['GET', 'POST'])
def delete():
form = DeleteTransactionForm(request.form)
if request.method == 'POST' and form.validate():
with app.app_context():
transaction = Transaction.query.get_or_404(form.transaction_id.data)
db.session.delete(transaction)
db.session.commit()
return redirect(url_for('index'))
return render_template('delete.html', form=form)
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "joemarchionna/kissom",
"score": 3
} |
#### File: kissom/tests/testUtilSqlUpdate.py
```python
from os import name
import json
import unittest
from kissom.utils.sql import insertSql, updateSql, deleteSql, selectSql, getConditions
class TestUtilSqlUpdate(unittest.TestCase):
def test_a(self):
tn = "test.table"
opkeys = ["id"]
okeys = ["name", "age", "hometown"]
dKeys = ["full_name", "age", "home"]
ct = {"fieldName": "pk", "fieldValue": 1001}
r = {"id": 1001, "name": "Johnny", "age": 24, "hometown": "Monroe"}
sql, tpl = updateSql(
tableName=tn, objKeys=okeys, objPrimaryKeys=opkeys, dbKeys=dKeys, data=r, conditionTree=ct
)
self.assertEqual(
sql,
"UPDATE test.table SET full_name = %s, age = %s, home = %s WHERE pk = %s RETURNING full_name, age, home",
)
self.assertEqual(tpl, ("Johnny", 24, "Monroe", 1001))
def test_b(self):
tn = "test.table"
opkeys = ["id"]
okeys = ["name", "age", "hometown"]
dKeys = ["full_name", "age", "home"]
ct = {"fieldName": "pk", "fieldValue": 1001}
r = {"id": 1001, "hometown": "Monroe"}
sql, tpl = updateSql(
tableName=tn, objKeys=okeys, objPrimaryKeys=opkeys, dbKeys=dKeys, data=r, conditionTree=ct
)
self.assertEqual(sql, "UPDATE test.table SET home = %s WHERE pk = %s RETURNING full_name, age, home")
self.assertEqual(tpl, ("Monroe", 1001))
def test_c(self):
tn = "test.table"
opkeys = ["id"]
okeys = ["name", "age", "hometown"]
dKeys = ["full_name", "age", "home"]
ct = {
"operator": "AND",
"conditions": [{"fieldName": "org", "fieldValue": 199200}, {"fieldName": "pk", "fieldValue": 1001}],
}
r = {"coid": 199200, "id": 1001, "hometown": "Monroe"}
sql, tpl = updateSql(
tableName=tn, objKeys=okeys, objPrimaryKeys=opkeys, dbKeys=dKeys, data=r, conditionTree=ct
)
self.assertEqual(
sql, "UPDATE test.table SET home = %s WHERE (org = %s AND pk = %s) RETURNING full_name, age, home"
)
self.assertEqual(tpl, ("Monroe", 199200, 1001))
def test_d(self):
tn = "test.table"
opkeys = ["id"]
okeys = ["id", "parentId", "name", "age", "hometown", "status"]
dKeys = ["id", "parent", "full_name", "age", "home", "status"]
r = {"id": 2112, "status": "missing"}
ct = {
"operator": "OR",
"conditions": [
{"fieldName": "id", "fieldValue": r["id"]},
{"fieldName": "parentId", "fieldValue": r["id"]},
],
}
sql, tpl = updateSql(
tableName=tn, objKeys=okeys, objPrimaryKeys=opkeys, dbKeys=dKeys, data=r, conditionTree=ct
)
self.assertEqual(
sql,
"UPDATE test.table SET status = %s WHERE (id = %s OR parentId = %s) RETURNING id, parent, full_name, age, home, status",
)
self.assertEqual(tpl, ("missing", 2112, 2112))
```
#### File: kissom/utils/names.py
```python
def normalizeStoreNameToObj(name: str, removeChars: list = ["_", "-"], toLower: bool = False):
_nn = ""
capNext = False
for c in name:
if c in removeChars:
capNext = True
else:
if capNext:
_nn += c.upper()
capNext = False
else:
_nn += c
return _nn
def normalizeObjNameToStore(name: str, ignoreChars: str = "_-", delimiter: str = "_", toLower: bool = True):
_nn = ""
for c in name:
if c not in ignoreChars:
if c.isupper():
_nn += delimiter
_nn += c
return _nn.lower() if toLower else _nn
def combineFQTN(schemaName: str, tableName: str):
return "{}.{}".format(schemaName, tableName)
def splitFQTN(fullyQualifiedTableName: str, defaultSchema: str = "public"):
if not fullyQualifiedTableName:
return None, None
if "." not in fullyQualifiedTableName:
return defaultSchema, fullyQualifiedTableName
_elements = fullyQualifiedTableName.split(".")
return _elements[0], _elements[1]
def getFqn(fullyQualifiedName: str, defaultSchema: str = "public"):
schema, table = splitFQTN(fullyQualifiedTableName=fullyQualifiedName, defaultSchema=defaultSchema)
return combineFQTN(schemaName=schema, tableName=table)
``` |
{
"source": "joemarchionna/kissom_pg",
"score": 2
} |
#### File: kissom_pg/pgCmds/pgTableInfo.py
```python
from kissom.appExceptions import TableNameDoesNotExistException
from kissom_pg.pgCmds.pgPrimaryKeyInfo import getPrimaryKeyNamesAndTypes
_selectInfoSchemaSql = """SELECT ordinal_position, column_name, data_type, column_default, is_nullable, is_updatable FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = '{}' AND TABLE_NAME = '{}'"""
def getTableInfo(conn, schemaName: str, tableName: str, includePrimaryKeyInfo: bool = False):
"""returns a list of dicts, one for each column in the table specified"""
"""returns a list of dicts, one for each column in the table specified, with primary key information merged"""
tableColumns = _getTableInfo(conn, schemaName, tableName)
if not includePrimaryKeyInfo:
return tableColumns
primaryKeys = getPrimaryKeyNamesAndTypes(conn, schemaName, tableName)
for column in tableColumns:
column["isPrimaryKey"] = column["name"] in primaryKeys
return tableColumns
def _getTableInfo(conn, schemaName: str, tableName: str, includePrimaryKeyInfo: bool = False):
xaction = conn.cursor()
xaction.execute(_selectInfoSchemaSql.format(schemaName, tableName))
response = xaction.fetchall()
tableData = []
for r in response:
tableData.append(
{
"index": r[0],
"name": r[1],
"type": r[2].lower(),
"default": r[3],
"isNullable": (r[4] == "YES"),
"isUpdatable": (r[5] == "YES"),
}
)
xaction.close()
if not tableData:
raise TableNameDoesNotExistException(tablename=tableName)
tableData.sort(key=lambda x: x.get("index"))
return tableData
``` |
{
"source": "joemarct/fbmq",
"score": 2
} |
#### File: fbmq/fbmq/fbmq.py
```python
import sys
import re
import requests
from .payload import *
from .template import *
from .events import *
# See https://developers.facebook.com/docs/graph-api/changelog
SUPPORTED_API_VERS=[
"v2.11",
"v2.10",
"v2.9",
"v2.8",
"v2.7",
"v2.6",
]
# See https://developers.facebook.com/docs/messenger-platform/messenger-profile/supported-locales
SUPPORTED_LOCALES=[
"default",
"en_US",
"ca_ES",
"cs_CZ",
"cx_PH",
"cy_GB",
"da_DK",
"de_DE",
"eu_ES",
"en_UD",
"es_LA",
"es_ES",
"gn_PY",
"fi_FI",
"fr_FR",
"gl_ES",
"hu_HU",
"it_IT",
"ja_JP",
"ko_KR",
"nb_NO",
"nn_NO",
"nl_NL",
"fy_NL",
"pl_PL",
"pt_BR",
"pt_PT",
"ro_RO",
"ru_RU",
"sk_SK",
"sl_SI",
"sv_SE",
"th_TH",
"tr_TR",
"ku_TR",
"zh_CN",
"zh_HK",
"zh_TW",
"af_ZA",
"sq_AL",
"hy_AM",
"az_AZ",
"be_BY",
"bn_IN",
"bs_BA",
"bg_BG",
"hr_HR",
"nl_BE",
"en_GB",
"et_EE",
"fo_FO",
"fr_CA",
"ka_GE",
"el_GR",
"gu_IN",
"hi_IN",
"is_IS",
"id_ID",
"ga_IE",
"jv_ID",
"kn_IN",
"kk_KZ",
"lv_LV",
"lt_LT",
"mk_MK",
"mg_MG",
"ms_MY",
"mt_MT",
"mr_IN",
"mn_MN",
"ne_NP",
"pa_IN",
"sr_RS",
"so_SO",
"sw_KE",
"tl_PH",
"ta_IN",
"te_IN",
"ml_IN",
"uk_UA",
"uz_UZ",
"vi_VN",
"km_KH",
"tg_TJ",
"ar_AR",
"he_IL",
"ur_PK",
"fa_IR",
"ps_AF",
"my_MM",
"qz_MM",
"or_IN",
"si_LK",
"rw_RW",
"cb_IQ",
"ha_NG",
"ja_KS",
"br_FR",
"tz_MA",
"co_FR",
"as_IN",
"ff_NG",
"sc_IT",
"sz_PL",
]
class LocalizedObj():
def __init__(self, locale, obj):
if locale not in SUPPORTED_LOCALES:
raise ValueError("Unsupported locale: {}".format(locale))
if not obj:
raise ValueError("Object is mandatory")
self.locale = locale
self.obj = obj
# I agree with him : http://stackoverflow.com/a/36937/3843242
class NotificationType:
REGULAR = 'REGULAR'
SILENT_PUSH = 'SILENT_PUSH'
NO_PUSH = 'NO_PUSH'
class SenderAction:
TYPING_ON = 'typing_on'
TYPING_OFF = 'typing_off'
MARK_SEEN = 'mark_seen'
def event_parser(messaging=None):
if messaging is None:
messaging = dict()
if 'message' in messaging:
is_echo = messaging.get('message', {}).get('is_echo')
if is_echo:
event_type = EchoEvent
else:
event_type = MessageEvent
elif 'delivery' in messaging:
event_type = DeliveriesEvent
elif 'read' in messaging:
event_type = ReadEvent.new_from_json_dict(messaging)
elif 'account_linking' in messaging:
event_type = AccountLinkingEvent
elif 'checkout_update' in messaging:
event_type = CheckOutUpdateEvent
elif 'game_play' in messaging:
event_type = GamePlayEvent.new_from_json_dict(messaging)
elif 'pass_thread_control' in messaging:
event_type = PassThreadEvent
elif 'take_thread_control' in messaging:
event_type = TakeThreadEvent
elif 'request_thread_control' in messaging:
event_type = RequestThreadEvent
elif 'app_roles' in messaging:
event_type = AppRoleEvent
elif 'optin' in messaging:
event_type = OptinEvent
elif 'payment' in messaging:
event_type = PaymentEvent
elif 'policy-enforcement' in messaging:
# key name must be changed for properly use to class instance.
messaging['policy_enforcement'] = messaging['policy-enforcement']
del messaging['policy-enforcement']
event_type = PolicyEnforcementEvent
elif 'postback' in messaging:
event_type = PostBackEvent
elif 'referral' in messaging:
event_type = ReferralEvent
elif 'standby' in messaging:
event_type = StandByEvent
else:
print("Webhook received unknown messaging")
return
event = event_type.new_from_json_dict(messaging)
return event
class Page(object):
def __init__(self, page_access_token, **options):
self.page_access_token = page_access_token
self._after_send = options.pop('after_send', None)
self._api_ver = options.pop('api_ver', 'v2.6')
if self._api_ver not in SUPPORTED_API_VERS:
raise ValueError('Unsupported API Version : ' + self._api_ver)
self._page_id = None
self._page_name = None
WEBHOOK_ENDPOINTS = ['optin', 'message', 'echo', 'delivery', 'postback', 'read', 'account_linking', 'referral', 'standby']
# these are set by decorators or the 'set_webhook_handler' method
_webhook_handlers = {}
_quick_reply_callbacks = {}
_button_callbacks = {}
_quick_reply_callbacks_key_regex = {}
_button_callbacks_key_regex = {}
_after_send = None
def _api_uri(self, sub):
return "https://graph.facebook.com/" + self._api_ver + "/" + sub
def _call_handler(self, name, func, *args, **kwargs):
if func is not None:
func(*args, **kwargs)
elif name in self._webhook_handlers:
self._webhook_handlers[name](*args, **kwargs)
else:
print("there's no %s handler" % name)
def handle_webhook(self, payload, optin=None, message=None, echo=None, delivery=None,
postback=None, read=None, account_linking=None, referral=None,
game_play=None, pass_thread_control=None, take_thread_control=None,
request_thread_control=None, app_roles=None, policy_enforcement=None,
checkout_update=None, payment=None, standby=None):
data = json.loads(payload)
# Make sure this is a page subscription
if data.get("object") != "page":
print("Webhook failed, only support page subscription")
return False
# Iterate over each entry
# There may be multiple if batched
def get_events(data):
for entry in data.get("entry"):
messagings = entry.get("messaging")
# handle standby events
if 'standby' in entry:
event = event_parser(entry)
yield event
elif messagings:
for messaging in messagings:
event = event_parser(messaging)
yield event
else:
print("Webhook received unsupported Entry:", entry)
continue
for event in get_events(data):
if isinstance(event, OptinEvent):
self._call_handler('optin', optin, event)
elif isinstance(event, EchoEvent):
self._call_handler('echo', echo, event)
elif isinstance(event, MessageEvent):
self._call_handler('message', message, event)
if event.is_quick_reply:
matched_callbacks = self.get_quick_reply_callbacks(event)
for callback in matched_callbacks:
callback(event.quick_reply_payload, event)
elif isinstance(event, DeliveriesEvent):
self._call_handler('delivery', delivery, event)
elif isinstance(event, PostBackEvent):
matched_callbacks = self.get_postback_callbacks(event)
self._call_handler('postback', postback, event)
for callback in matched_callbacks:
callback(event.payload, event)
elif isinstance(event, ReadEvent):
self._call_handler('read', read, event)
elif isinstance(event, AccountLinkingEvent):
self._call_handler('account_linking', account_linking, event)
elif isinstance(event, ReferralEvent):
self._call_handler('referral', referral, event)
elif isinstance(event, GamePlayEvent):
self._call_handler('game_play', game_play, event)
elif isinstance(event, PassThreadEvent):
self._call_handler('pass_thread_control', pass_thread_control, event)
elif isinstance(event, TakeThreadEvent):
self._call_handler('take_thread_control', take_thread_control, event)
elif isinstance(event, RequestThreadEvent):
self._call_handler('request_thread_control', request_thread_control, event)
elif isinstance(event, AppRoleEvent):
self._call_handler('app_roles', app_roles, event)
elif isinstance(event, PolicyEnforcementEvent):
self._call_handler('policy_enforcement', policy_enforcement, event)
elif isinstance(event ,CheckOutUpdateEvent):
self._call_handler('checkout_update', checkout_update, event)
elif isinstance(event, PaymentEvent):
self._call_handler('payment', payment, event)
elif isinstance(event, StandByEvent):
self._call_handler('standby', standby, event)
else:
print("Webhook received unknown messaging Event:", event)
@property
def page_id(self):
if self._page_id is None:
self._fetch_page_info()
return self._page_id
@property
def page_name(self):
if self._page_name is None:
self._fetch_page_info()
return self._page_name
def _fetch_page_info(self):
r = requests.get(self._api_uri("me"),
params={"access_token": self.page_access_token},
headers={'Content-type': 'application/json'})
if r.status_code != requests.codes.ok:
print(r.text)
return
data = json.loads(r.text)
if 'id' not in data or 'name' not in data:
raise ValueError('Could not fetch data : GET /' + self._api_ver +
'/me')
self._page_id = data['id']
self._page_name = data['name']
def get_user_profile(self, fb_user_id):
r = requests.get(self._api_uri(fb_user_id),
params={"access_token": self.page_access_token},
headers={'Content-type': 'application/json'})
if r.status_code != requests.codes.ok:
print(r.text)
return
return json.loads(r.text)
def get_messenger_code(self, ref=None, image_size=1000):
d = {}
d['type']='standard'
d['image_size'] = image_size
if ref:
d['data'] = {'ref': ref}
r = requests.post(self._api_uri("me/messenger_codes"),
params={"access_token": self.page_access_token},
json=d,
headers={'Content-type': 'application/json'})
if r.status_code != requests.codes.ok:
print(r.text)
return None
data = json.loads(r.text)
if 'uri' not in data:
raise ValueError('Could not fetch messener code : GET /' +
self._api_ver + '/me')
return data['uri']
def _send(self, payload, callback=None):
r = requests.post(self._api_uri("me/messages"),
params={"access_token": self.page_access_token},
data=payload.to_json(),
headers={'Content-type': 'application/json'})
if r.status_code != requests.codes.ok:
print(r.text)
if callback is not None:
callback(payload, r)
if self._after_send is not None:
self._after_send(payload, r)
return r
def send(self, recipient_id, message, quick_replies=None, metadata=None,
notification_type=None, callback=None, tag=None):
if sys.version_info >= (3, 0):
text = message if isinstance(message, str) else None
else:
text = message if isinstance(message, str) else message.encode('utf-8') if isinstance(message, unicode) else None
attachment = message if not text else None
payload = Payload(recipient=Recipient(id=recipient_id),
message=Message(text=text,
attachment=attachment,
quick_replies=quick_replies,
metadata=metadata),
notification_type=notification_type,
tag=tag)
return self._send(payload, callback=callback)
def send_json(self, json_payload, callback=None):
return self._send(Payload(**json.loads(json_payload)), callback)
def send_dict(self, dict_payload, callback=None):
return self._send(Payload(**dict_payload), callback)
def typing_on(self, recipient_id):
payload = Payload(recipient=Recipient(id=recipient_id),
sender_action=SenderAction.TYPING_ON)
self._send(payload)
def typing_off(self, recipient_id):
payload = Payload(recipient=Recipient(id=recipient_id),
sender_action=SenderAction.TYPING_OFF)
self._send(payload)
def mark_seen(self, recipient_id):
payload = Payload(recipient=Recipient(id=recipient_id),
sender_action=SenderAction.MARK_SEEN)
self._send(payload)
"""
messenger profile (see https://developers.facebook.com/docs/messenger-platform/reference/messenger-profile-api)
"""
def _set_profile_property(self, pname, pval):
r = requests.post(self._api_uri("me/messenger_profile"),
params={"access_token": self.page_access_token},
data=json.dumps({
pname: pval
}),
headers={'Content-type': 'application/json'})
if r.status_code != requests.codes.ok:
print(r.text)
def _del_profile_property(self, pname):
r = requests.delete(self._api_uri("me/messenger_profile"),
params={"access_token": self.page_access_token},
data=json.dumps({
'fields': [pname,]
}),
headers={'Content-type': 'application/json'})
if r.status_code != requests.codes.ok:
print(r.text)
def greeting(self, text):
self.localized_greeting([LocalizedObj(locale="default", obj=text)])
def localized_greeting(self, locale_list):
if not locale_list:
raise ValueError("List of locales is mandatory")
pval = []
for l in locale_list:
if not isinstance(l, LocalizedObj):
raise ValueError("greeting type error")
if not isinstance(l.obj, str):
raise ValueError("greeting text error")
pval.append({
"locale": l.locale,
"text": l.obj
})
self._set_profile_property(pname="greeting", pval=pval)
def hide_greeting(self):
self._del_profile_property(pname="greeting")
def show_starting_button(self, payload):
if not payload or not isinstance(payload, str):
raise ValueError("show_starting_button payload error")
self._set_profile_property(pname="get_started",
pval={"payload": payload})
def hide_starting_button(self):
self._del_profile_property(pname="get_started")
def show_persistent_menu(self, buttons):
self.show_localized_persistent_menu([LocalizedObj(locale="default",
obj=buttons)])
def show_localized_persistent_menu(self, locale_list):
if not locale_list:
raise ValueError("List of locales is mandatory")
pval = []
for l in locale_list:
if not isinstance(l, LocalizedObj):
raise ValueError("persistent_menu error")
if not isinstance(l.obj, list):
raise ValueError("menu call_to_actions error")
buttons = Buttons.convert_shortcut_buttons(l.obj)
buttons_dict = []
for button in buttons:
if isinstance(button, ButtonWeb):
buttons_dict.append({
"type": "web_url",
"title": button.title,
"url": button.url
})
elif isinstance(button, ButtonPostBack):
buttons_dict.append({
"type": "postback",
"title": button.title,
"payload": button.payload
})
else:
raise ValueError('show_persistent_menu button type must be "url" or "postback"')
pval.append({
"locale": l.locale,
"call_to_actions": buttons_dict
})
self._set_profile_property(pname="persistent_menu", pval=pval)
def hide_persistent_menu(self):
self._del_profile_property(pname="persistent_menu")
"""
handlers and decorations
"""
def set_webhook_handler(self, scope, callback):
"""
Allows adding a webhook_handler as an alternative to the decorators
"""
scope = scope.lower()
if scope == 'after_send':
self._after_send = callback
return
if scope not in Page.WEBHOOK_ENDPOINTS:
raise ValueError("The 'scope' argument must be one of {}.".format(Page.WEBHOOK_ENDPOINTS))
self._webhook_handlers[scope] = callback
def handle_optin(self, func):
self._webhook_handlers['optin'] = func
def handle_message(self, func):
self._webhook_handlers['message'] = func
def handle_echo(self, func):
self._webhook_handlers['echo'] = func
def handle_delivery(self, func):
self._webhook_handlers['delivery'] = func
def handle_postback(self, func):
self._webhook_handlers['postback'] = func
def handle_read(self, func):
self._webhook_handlers['read'] = func
def handle_account_linking(self, func):
self._webhook_handlers['account_linking'] = func
def handle_referral(self, func):
self._webhook_handlers['referral'] = func
def handle_game_play(self, func):
self._webhook_handlers['game_play'] = func
def handle_pass_thread_control(self, func):
self._webhook_handlers['pass_thread_control'] = func
def handle_take_thread_control(self, func):
self._webhook_handlers['take_thread_control'] = func
def handle_request_thread_control(self, func):
self._webhook_handlers['request_thread_control'] = func
def handle_app_roles(self, func):
self._webhook_handlers['app_roles'] = func
def handle_policy_enforcement(self, func):
self._webhook_handlers['policy_enforcement'] = func
def handle_checkout_update(self, func):
self._webhook_handlers['checkout_update'] = func
def handle_payment(self, func):
self._webhook_handlers['payment'] = func
def handle_standby(self, func):
self._webhook_handlers['standby'] = func
def after_send(self, func):
self._after_send = func
_callback_default_types = ['QUICK_REPLY', 'POSTBACK']
def callback(self, payloads=None, types=None):
if types is None:
types = self._callback_default_types
if not isinstance(types, list):
raise ValueError('callback types must be list')
for type in types:
if type not in self._callback_default_types:
raise ValueError('callback types must be "QUICK_REPLY" or "POSTBACK"')
def wrapper(func):
if payloads is None:
return func
for payload in payloads:
if 'QUICK_REPLY' in types:
self._quick_reply_callbacks[payload] = func
if 'POSTBACK' in types:
self._button_callbacks[payload] = func
return func
return wrapper
def get_quick_reply_callbacks(self, event):
callbacks = []
for key in self._quick_reply_callbacks.keys():
if key not in self._quick_reply_callbacks_key_regex:
self._quick_reply_callbacks_key_regex[key] = re.compile(key + '$')
if self._quick_reply_callbacks_key_regex[key].match(event.quick_reply_payload):
callbacks.append(self._quick_reply_callbacks[key])
return callbacks
def get_postback_callbacks(self, event):
callbacks = []
for key in self._button_callbacks.keys():
if key not in self._button_callbacks_key_regex:
self._button_callbacks_key_regex[key] = re.compile(key + '$')
if self._button_callbacks_key_regex[key].match(event.payload):
callbacks.append(self._button_callbacks[key])
return callbacks
``` |
{
"source": "joemarct/python-mnemonic",
"score": 2
} |
#### File: python-mnemonic/mnemonic/shamir.py
```python
import sys
import binascii
from .secretsharing import secret_int_to_points, points_to_secret_int
from .mnemonic import Mnemonic
class Shamir(object):
def __init__(self, language):
self.mnemo = Mnemonic(language)
# see https://primes.utm.edu/lists/2small/ for biggest primes that fit into X bits
self.primes = {
15: (2**120 - 119),
19: (2**152 - 17),
23: (2**184 - 33),
27: (2**216 - 377),
31: (2**248 - 237)
}
def split(self, data, m, n):
if not len(data) in self.primes.keys():
raise Exception('Unknown data length')
if m < 2 or m > 15:
raise Exception('Invalid M provided')
if n < 2 or n > 15:
raise Exception('Invalid N provided')
prime = self.primes[len(data)]
s = secret_int_to_points(int(binascii.hexlify(data), 16), m, n, prime)
s = ['%x%x%s' % (m, x[0], ('%x' % x[1]).zfill(len(data) * 2)) for x in s]
return [self.mnemo.to_mnemonic(binascii.unhexlify(x)) for x in s]
def combine(self, shares):
words = set([len(x.split(' ')) for x in shares])
if len(words) != 1:
raise Exception('Inconsistent number of words')
datalen = list(words)[0] * 4 // 3 - 1
shares = [binascii.hexlify(self.mnemo.to_entropy(x)) for x in shares]
if sys.version > '3':
if set([int(chr(x[0]), 16) for x in shares]) != set([len(shares)]):
raise Exception('Number of shares does not match the threshold')
points = [(int(chr(x[1]), 16), int(x[2:], 16)) for x in shares]
else:
if set([int(x[0], 16) for x in shares]) != set([len(shares)]):
raise Exception('Number of shares does not match the threshold')
points = [(int(x[1], 16), int(x[2:], 16)) for x in shares]
prime = self.primes[datalen]
r = points_to_secret_int(points, prime)
r = hex(r)[2:]
if r.endswith('L'):
r = r[:-1]
r = r.zfill(datalen * 2)
return binascii.unhexlify(r)
``` |
{
"source": "joemarshall/DeepLabV3Plus-Pytorch",
"score": 2
} |
#### File: joemarshall/DeepLabV3Plus-Pytorch/hubconf.py
```python
dependencies = ['torch','torchvision']
import network
import torch
from network import deeplabv3_resnet50 as _deeplabv3_resnet50
from network import deeplabv3plus_resnet50 as _deeplabv3plus_resnet50
from network import deeplabv3_resnet101 as _deeplabv3_resnet101
from network import deeplabv3plus_resnet101 as _deeplabv3plus_resnet101
from network import deeplabv3_mobilenet as _deeplabv3_mobilenet
from network import deeplabv3plus_mobilenet as _deeplabv3plus_mobilenet
def deeplabv3plus_resnet50(pretrained=False,progress=True,**kwargs):
model=_deeplabv3plus_resnet50(**kwargs)
if pretrained:
checkpoint = 'https://github.com/joemarshall/DeepLabV3Plus-Pytorch/releases/download/pretrained_1.0/best_deeplabv3plus_resnet50_voc_os16.pth'
model.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=progress)["model_state"])
return model
def deeplabv3_resnet50(pretrained=False,progress=True,**kwargs):
model=_deeplabv3_resnet50(**kwargs)
if pretrained:
checkpoint = 'https://github.com/joemarshall/DeepLabV3Plus-Pytorch/releases/download/pretrained_1.0/best_deeplabv3_resnet50_voc_os16.pth'
model.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=progress)["model_state"])
return model
def deeplabv3_resnet101(pretrained=False,progress=True,**kwargs):
model=_deeplabv3_resnet101(**kwargs)
if pretrained:
checkpoint = 'https://github.com/joemarshall/DeepLabV3Plus-Pytorch/releases/download/pretrained_1.0/best_deeplabv3_resnet101_voc_os16.pth'
model.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=progress)["model_state"])
return model
def deeplabv3plus_resnet101(pretrained=False,progress=True,**kwargs):
model=_deeplabv3plus_resnet101(**kwargs)
if pretrained:
checkpoint = 'https://github.com/joemarshall/DeepLabV3Plus-Pytorch/releases/download/pretrained_1.0/best_deeplabv3plus_resnet101_voc_os16.pth'
model.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=progress)["model_state"])
return model
def deeplabv3_mobilenet(pretrained=False,progress=True,**kwargs):
model=_deeplabv3_mobilenet(**kwargs)
if pretrained:
checkpoint = 'https://github.com/joemarshall/DeepLabV3Plus-Pytorch/releases/download/pretrained_1.0/best_deeplabv3_mobilenet_voc_os16.pth'
model.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=progress)["model_state"])
return model
def deeplabv3plus_mobilenet(pretrained=False,progress=True,**kwargs):
model=_deeplabv3plus_mobilenet(**kwargs)
if pretrained:
checkpoint = 'https://github.com/joemarshall/DeepLabV3Plus-Pytorch/releases/download/pretrained_1.0/best_deeplabv3plus_mobilenet_voc_os16.pth'
model.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=progress)["model_state"])
return model
``` |
{
"source": "joemarshall/GIMP-ML",
"score": 2
} |
#### File: GIMP-ML/models/MaskGAN.py
```python
import sys
import numpy as np
import torch
import torch.hub
from PIL import Image
from torchvision import transforms
from _model_base import ModelBase, handle_alpha
colors = np.array([
[0, 0, 0], [204, 0, 0], [76, 153, 0],
[204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255],
[51, 255, 255], [102, 51, 0], [255, 0, 0], [102, 204, 0],
[255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51, 153],
[0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]
], dtype=np.uint8)
def mask_colors_to_indices(mask):
x = np.zeros((mask.shape[0], mask.shape[1], 3), dtype=np.uint8)
for idx, color in enumerate(colors):
x[np.all(mask == color, axis=2), :] = idx
return x
def scale_to_width_transform(width, method):
def f(img):
ow, oh = img.size
if ow == width:
return img
w = width
h = int(width * oh / ow)
return img.resize((w, h), method)
return f
def get_img_transform(target_width):
return transforms.Compose([
Image.fromarray,
scale_to_width_transform(target_width, Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
lambda x: x.unsqueeze(0)
])
def get_mask_transform(target_width):
return transforms.Compose([
mask_colors_to_indices,
Image.fromarray,
scale_to_width_transform(target_width, Image.NEAREST),
transforms.ToTensor(),
transforms.Normalize((0, 0, 0), (1 / 255., 1 / 255., 1 / 255.)),
lambda x: x.unsqueeze(0)
])
class MaskGAN(ModelBase):
def __init__(self):
super().__init__()
self.hub_repo = 'valgur/CelebAMask-HQ'
def load_model(self):
device = torch.device(self.device)
if device.type == "cpu":
raise RuntimeError("MaskGAN does not support CPU inference")
gpu_ids = [device.index or 0]
model = torch.hub.load(self.hub_repo, 'Pix2PixHD',
pretrained=True, map_location=self.device, gpu_ids=gpu_ids)
model.to(self.device)
return model
@handle_alpha
@torch.no_grad()
def predict(self, img, mask, mask_m):
h, w, d = img.shape
assert d == 3, "Input image must be RGB"
assert img.shape == mask.shape
assert img.shape == mask_m.shape
opt = self.model.opt
transform_mask = get_mask_transform(opt.loadSize)
transform_image = get_img_transform(opt.loadSize)
mask = transform_mask(mask).to(self.device)
mask_m = transform_mask(mask_m).to(self.device)
img = transform_image(img).to(self.device)
generated = self.model.inference(mask_m, mask, img)
result = generated.squeeze().permute(1, 2, 0)
result = (result + 1) * 127.5
result = result.clamp(0, 255).byte().cpu().numpy()
result = Image.fromarray(result)
result = result.resize([w, h])
return np.array(result)
model = MaskGAN()
if __name__ == '__main__':
rpc_url = sys.argv[1]
model.process_rpc(rpc_url)
```
#### File: GIMP-ML/plugins/monodepth.py
```python
import sys
from os.path import dirname, realpath
sys.path.append(realpath(dirname(__file__)))
from gimpfu import main
from _plugin_base import GimpPluginBase
class MonoDepth(GimpPluginBase):
def run(self):
self.model_file = 'Monodepth2.py'
result = self.predict(self.drawable)
self.create_layer(result)
plugin = MonoDepth()
plugin.register(
proc_name="MonoDepth",
blurb="MonoDepth",
help="Generate monocular disparity map based on deep learning.",
author="<NAME>",
copyright="",
date="2020",
label="MonoDepth...",
imagetypes="RGB*"
)
main()
```
#### File: GIMP-ML/plugins/_plugin_base.py
```python
from __future__ import print_function, absolute_import, division
import os
import subprocess
import sys
import threading
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
from abc import ABCMeta, abstractmethod
from xmlrpclib import Binary
import gimpfu as gfu
from gimpfu import gimp, pdb
from _config import python3_executable
base_dir = os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))
models_dir = os.path.join(base_dir, 'models')
class GimpPluginBase(object):
__metaclass__ = ABCMeta
def __init__(self):
self.model_file = None
self.gimp_img = None
self.drawable = None
self.name = None
@abstractmethod
def run(self, *args, **kwargs):
# example
self.model_file = 'xyz.py'
result = self.predict(self.drawable)
self.create_layer(result)
def create_layer(self, result, name=None):
name = name or self.drawable.name + ' ' + self.name
imgarray_to_layer(result, self.gimp_img, name)
def create_image(self, result, name=None):
name = name or self.drawable.name + ' ' + self.name
imgarray_to_image(result, name)
def register(self, proc_name, blurb, help, author, copyright, date, label,
imagetypes, params=None, results=None, menu="<Image>/Layer/GIML-ML",
domain=None, on_query=None, on_run=None):
self.name = proc_name
gfu.register(
proc_name,
blurb,
help,
author,
copyright,
date,
label,
imagetypes,
params=[(gfu.PF_IMAGE, "image", "Input image", None),
(gfu.PF_DRAWABLE, "drawable", "Input drawable", None)]
+ (params or []),
results=results or [],
function=self.run_outer,
menu=menu,
domain=domain, on_query=on_query, on_run=on_run
)
def run_outer(self, gimp_img, drawable, *extra_args):
self.gimp_img = gimp_img
self.drawable = drawable
print("Running {}...".format(self.name))
gimp.progress_init("Running {}...".format(self.name))
self.run(*extra_args)
def predict(self, *args, **kwargs):
assert self.model_file is not None
model_proxy = ModelProxy(self.model_file)
return model_proxy(*args, **kwargs)
class ModelProxy(object):
"""
When called, runs
python3 models/<model_file>
and waits for the subprocess to call get_args() and then return_result() over XML-RPC.
Additionally, any progress info can be sent via update_progress().
"""
def __init__(self, model_file):
self.python_executable = python3_executable
self.model_path = os.path.join(models_dir, model_file)
self.server = None
self.args = None
self.kwargs = None
self.result = None
@staticmethod
def _encode(x):
if isinstance(x, gimp.Layer):
x = layer_to_imgarray(x)
if isinstance(x, ImgArray):
x = x.encode()
return x
@staticmethod
def _decode(x):
if isinstance(x, list) and len(x) == 2 and hasattr(x[0], 'data'):
x = ImgArray.decode(x)
return x
def _rpc_get_args(self):
assert isinstance(self.args, (list, tuple))
assert isinstance(self.kwargs, dict)
args = [self._encode(arg) for arg in self.args]
kwargs = {k: self._encode(v) for k, v in self.kwargs.items()}
return args, kwargs
def _rpc_return_result(self, result):
assert isinstance(result, (list, tuple))
self.result = tuple(self._decode(x) for x in result)
threading.Thread(target=lambda: self.server.shutdown()).start()
def _subproc_thread(self, rpc_port):
env = self._add_conda_env_to_path()
try:
self.proc = subprocess.Popen([
self.python_executable,
self.model_path,
'http://127.0.0.1:{}/'.format(rpc_port)
], env=env)
self.proc.wait()
finally:
self.server.shutdown()
self.server.server_close()
def _add_conda_env_to_path(self):
env = os.environ.copy()
conda_root = os.path.dirname(self.python_executable)
env['PATH'] = os.pathsep.join([
conda_root,
os.path.join(conda_root, 'Library', 'mingw-w64', 'bin'),
os.path.join(conda_root, 'Library', 'usr', 'bin'),
os.path.join(conda_root, 'Library', 'bin'),
os.path.join(conda_root, 'Scripts'),
os.path.join(conda_root, 'bin'),
env['PATH']
])
return env
def __call__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
# For cleaner exception info
class RequestHandler(SimpleXMLRPCRequestHandler):
def _dispatch(self, method, params):
try:
return self.server.funcs[method](*params)
except:
self.server.exception = sys.exc_info()
raise
self.server = SimpleXMLRPCServer(('127.0.0.1', 0), allow_none=True, logRequests=False,
requestHandler=RequestHandler)
rpc_port = self.server.server_address[1]
self.server.register_function(self._rpc_get_args, 'get_args')
self.server.register_function(self._rpc_return_result, 'return_result')
self.server.register_function(update_progress)
t = threading.Thread(target=self._subproc_thread, args=(rpc_port,))
t.start()
self.server.exception = None
self.server.serve_forever()
if self.result is None:
if self.server.exception:
type, value, traceback = self.server.exception
raise type, value, traceback
raise RuntimeError("Model did not return a result!")
if len(self.result) == 1:
return self.result[0]
return self.result
class ImgArray(object):
"""Minimal Numpy ndarray-like object for serialization in RPC."""
def __init__(self, buffer, shape):
self.buffer = buffer
self.shape = shape
def encode(self):
return Binary(self.buffer), self.shape
@staticmethod
def decode(x):
return ImgArray(x[0].data, x[1])
image_type_map = {
1: gfu.GRAY_IMAGE,
2: gfu.GRAYA_IMAGE,
3: gfu.RGB_IMAGE,
4: gfu.RGBA_IMAGE,
}
image_base_type_map = {
1: gfu.GRAY,
2: gfu.GRAY,
3: gfu.RGB,
4: gfu.RGB,
}
def layer_to_imgarray(layer):
region = layer.get_pixel_rgn(0, 0, layer.width, layer.height)
pixChars = region[:, :] # Take whole layer
return ImgArray(pixChars, (layer.height, layer.width, region.bpp))
def imgarray_to_layer(array, gimp_img, name):
h, w, d = array.shape
layer = gimp.Layer(gimp_img, name, w, h, image_type_map[d])
region = layer.get_pixel_rgn(0, 0, w, h)
region[:, :] = array.buffer
gimp_img.insert_layer(layer, position=0)
return layer
def imgarray_to_image(array, name):
h, w, d = array.shape
img = gimp.Image(w, h, image_base_type_map[d])
imgarray_to_layer(array, img, name)
gimp.Display(img)
gimp.displays_flush()
def update_progress(percent, message):
if percent is not None:
pdb.gimp_progress_update(percent)
else:
pdb.gimp_progress_pulse()
pdb.gimp_progress_set_text(message)
``` |
{
"source": "joemarshall/GrovePi",
"score": 2
} |
#### File: Python/grove_rgb_lcd/grove_rgb_lcd.py
```python
import time,sys
if sys.platform == 'uwp':
import winrt_smbus as smbus
bus = smbus.SMBus(1)
else:
import smbus
import RPi.GPIO as GPIO
rev = GPIO.RPI_REVISION
if rev == 2 or rev == 3:
bus = smbus.SMBus(1)
else:
bus = smbus.SMBus(0)
# this device has two I2C addresses
DISPLAY_RGB_ADDR = 0x62
DISPLAY_TEXT_ADDR = 0x3e
# set backlight to (R,G,B) (values from 0..255 for each)
def setRGB(r,g,b):
bus.write_byte_data(DISPLAY_RGB_ADDR,0,0)
bus.write_byte_data(DISPLAY_RGB_ADDR,1,0)
bus.write_byte_data(DISPLAY_RGB_ADDR,0x08,0xaa)
bus.write_byte_data(DISPLAY_RGB_ADDR,4,r)
bus.write_byte_data(DISPLAY_RGB_ADDR,3,g)
bus.write_byte_data(DISPLAY_RGB_ADDR,2,b)
# send command to display (no need for external use)
def textCommand(cmd):
bus.write_byte_data(DISPLAY_TEXT_ADDR,0x80,cmd)
# set display text \n for second line(or auto wrap)
def setText(text):
textCommand(0x01) # clear display
time.sleep(.05)
textCommand(0x08 | 0x04) # display on, no cursor
textCommand(0x28) # 2 lines
time.sleep(.05)
count = 0
row = 0
for c in text:
if c == '\n' or count == 16:
count = 0
row += 1
if row == 2:
break
textCommand(0xc0)
if c == '\n':
continue
count += 1
bus.write_byte_data(DISPLAY_TEXT_ADDR,0x40,ord(c))
#Update the display without erasing the display
def setText_norefresh(text):
textCommand(0x02) # return home
time.sleep(.05)
textCommand(0x08 | 0x04) # display on, no cursor
textCommand(0x28) # 2 lines
time.sleep(.05)
count = 0
row = 0
for c in text:
if c == '\n' or count == 16:
count = 0
row += 1
if row == 2:
break
textCommand(0xc0)
if c == '\n':
continue
count += 1
bus.write_byte_data(DISPLAY_TEXT_ADDR,0x40,ord(c))
# example code
if __name__=="__main__":
setText("Hello world\nThis is an LCD test")
setRGB(0,128,64)
for c in range(0,255):
setRGB(c,255-c,0)
time.sleep(0.01)
setRGB(0,255,0)
setText("Bye bye, this should wrap onto next line")
``` |
{
"source": "joemash/django-products-order",
"score": 2
} |
#### File: django-products-order/orders/models.py
```python
import string, random, datetime,locale
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.mail import send_mail, EmailMultiAlternatives
from django.template.loader import get_template, render_to_string
from django.db import models
from django.db.models.signals import pre_save
from django.utils.crypto import get_random_string
from django.template import Context
from accounts.models import Customer
from products.models import Product,CustomerPrice
from taxes.models import Tax
class PaymentMethod(models.Model):
"""
Represents payment methods for order
"""
# Payment methods
ONLINE = 'ON'
PURCHASE_ORDER = 'PO'
ALL = (
(ONLINE, 'Online'),
(PURCHASE_ORDER, 'Purchase Order'))
ALL_METHODS = dict(ALL)
code = models.CharField(primary_key=True, max_length=2, choices=ALL)
name = models.CharField(max_length=100, unique=True)
is_active = models.BooleanField(default=True)
updated_by = models.CharField(max_length=100)
updated_on = models.DateTimeField(auto_now=True)
created_on = models.DateTimeField(auto_now_add=True)
created_by = models.CharField(max_length=100)
class Meta:
db_table = 'sales_payment_method'
verbose_name_plural = 'Payment Methods'
def __str__(self):
return '%s: %s' % (self.code, self.name)
class OrderManager(models.Manager):
locale.setlocale(locale.LC_ALL, '')
def place(self,payment_method,payment_status,
order_number,total,lineitems,order_status,customer,delivery_date):
#billing_address = Address.objects.get(customer=customer)
payment_method = PaymentMethod.objects.get(code=payment_method)
receipt_code = get_random_string(20) # allows secure access to order receipt
order = self.create(customer=customer,
total = total,
payment_method=payment_method,
payment_status=payment_status,
order_status = order_status,
order_number=order_number,
delivery_date=delivery_date,#datetime.datetime.strptime(delivery_date,'%m/%d/%y').strftime('%Y-%m-%d'),
updated_by=customer,
created_by=customer
)
for mydict in lineitems:
product_id = CustomerPrice.objects.get(id=mydict['product_id'])
OrderItem.objects.create(order = order,
product = product_id.product,
price = mydict['price'],
quantity = mydict['quantity'],
sub_total = mydict['line_total'],
#sub_total = locale.atof(mydict['line_total']),
#tax_rate=tax_rate,
#tax_method=tax_method,
updated_by=customer,
created_by=customer)
return order
def send_order_confirmation(self,
order_number,total,lineitems,customer):
"""Sends email to user confirming order and contents"""
items = []
for mydict in lineitems:
product_id = CustomerPrice.objects.get(id=mydict['product_id'])
items.append({'product':product_id.product,'price':mydict['price'],
'quantity':mydict['quantity'],'sub_total':mydict['line_total']})
context = {
'user': customer.last_name,
'order_items':items,
'order_number':order_number,
'total':total,
#'order_status':order_status,
'MEDIA_URL': settings.MEDIA_URL,
'SITE_URL': settings.SITE_URL
}
subject = 'Thanks for your purchase at Gaea! Order ' + str(order_number)
from_email = 'Sales gaeafoods <<EMAIL>>'
# If at order screen, user provided an email address
to = customer
text_content = render_to_string('orders/email/order_confirmation.txt', context)
html_content = render_to_string('orders/email/order_confirmation.html', context)
msg = EmailMultiAlternatives(subject, text_content)
msg.attach_alternative(html_content, "text/html")
msg.to = [to]
msg.from_email = from_email
# Send a copy to me after finished as well
msg_copy = EmailMultiAlternatives("#Order " + str(order_number) + '--' + str(customer.company), text_content)
msg_copy.attach_alternative(html_content, "text/html")
msg_copy.to = ['<EMAIL>']
msg_copy.from_email = from_email
msg.send()
msg_copy.send()
def get_all_orders(self):
myorders = Order.objects.all()
orders = []
for item in myorders:
orders.append({'created_on':str(item.created_on),'customer':item.customer.company,
'order_number':item.order_number,'allocate':item.allocate_order,'payment_status':item.payment_status,'total':str(item.total)})
return orders
class Order(models.Model):
"""
Represents customer's order
"""
ORDER_PENDING = 'Pending'
ORDER_PROCESSING = 'Processing'
ORDER_COMPLETE = 'Complete'
ORDER_CANCELLED = 'Cancelled'
ORDER_STATUS = (
(ORDER_PENDING,'Pending'),
(ORDER_PROCESSING,'Processing'),
(ORDER_COMPLETE ,'Complete'),
(ORDER_CANCELLED,'Cancelled'),
)
# Payment statuses
PAYMENT_PENDING = 'Pending'
PAYMENT_AUTHORIZED = 'Authorized'
PAYMENT_PAID = 'Paid'
PAYMENT_STATUS = (
(PAYMENT_PENDING, 'Pending'),
(PAYMENT_AUTHORIZED, 'Authorized'),
(PAYMENT_PAID, 'Paid'),
)
customer = models.ForeignKey(Customer,null=True,blank=True)
order_number = models.CharField(max_length=50,null=True,blank=True,help_text='Purchase Order number')
shipping_cost = models.DecimalField(max_digits=9, decimal_places=2,null=True,blank=True)
taxes = models.DecimalField(max_digits=9, decimal_places=2,null=True,blank=True)
total = models.DecimalField(max_digits=9, decimal_places=2)
payment_method = models.ForeignKey(PaymentMethod,default='Pending')
order_status = models.CharField(max_length=20,choices=ORDER_STATUS)
payment_status = models.CharField(max_length=20, choices=PAYMENT_STATUS)
#billing_address = models.ForeignKey(Address, null=True,blank=True,related_name='billing_orders')
allocate_order = models.BooleanField(blank=True,default=False)
delivery_date = models.DateField(null=True,blank=True)
receipt_code = models.CharField(max_length=100,null=True,blank=True, help_text="Random code generate for each order for secure access.")
updated_by = models.CharField(max_length=100)
updated_on = models.DateTimeField(auto_now=True)
created_by = models.CharField(max_length=100)
created_on = models.DateField(auto_now_add=True)
objects = OrderManager()
@property
def company(self):
return self.customer.company
def get_orderitems(self):
return self.items.prefetch_related('products').all()
def __str__(self):
return self.order_number
class OrderItem(models.Model):
"""
Represents a purchase product
"""
order = models.ForeignKey(Order, related_name='items')
product = models.ForeignKey(Product)
price = models.DecimalField(max_digits=9, decimal_places=2, help_text='Unit price of the product')
quantity = models.IntegerField()
taxes = models.DecimalField(max_digits=9, decimal_places=2,null=True,blank=True,default=1)
sub_total = models.DecimalField(max_digits=9, decimal_places=2)
tax_rate = models.FloatField(default=0.0)
tax_method = models.CharField(max_length=2, choices=Tax.TAX_METHODS, null=True, blank=True)
updated_on = models.DateTimeField(auto_now=True)
updated_by = models.CharField(max_length=100,null=True,blank=True)
created_on = models.DateTimeField(auto_now_add=True)
created_by = models.CharField(max_length=100,null=True,blank=True)
class Meta:
verbose_name_plural = 'Order Items'
def get_absolute_url(self):
return reverse('orders:single_order',kwargs={'order_id':self.id})
```
#### File: django-products-order/payments/views.py
```python
import json
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic import ListView
from orders.models import Order
from accounts.models import Customer
from utils import LoginRequiredMixin
class PaymentAllocationView(LoginRequiredMixin):
template_name = 'payment_allocation.html'
def get(self,request,*args,**kwargs):
customers = Customer.objects.all()
myorders = Order.objects.get_all_orders()
if request.is_ajax():
json_data = json.dumps(myorders) #dump list as JSON
return HttpResponse(json_data,content_type='application/json')
return super(PaymentAllocationView,self).get(request,customers=customers)
def post(self,request,*args,**kwargs):
if request.is_ajax():
order_number = request.POST.get('order_no')
payment_chkbox = request.POST.get('payment_chk')
if payment_chkbox == 'true':
payment_status = True
Order.objects.filter(order_number= order_number).update(payment_status='Paid',total=0.0)
else:
payment_status = False
Order.objects.filter(order_number= order_number).update(payment_status='Pending')
Order.objects.filter(order_number= order_number).update(allocate_order=payment_status)
return super(PaymentAllocationView,self).get(request)
```
#### File: django-products-order/taxes/models.py
```python
from django.db import models
class Tax(models.Model):
"""
Represents a Tax Category
"""
TAX_PERCENTAGE = 'PE'
TAX_FIXED = 'FI'
TAX_METHODS = ((TAX_PERCENTAGE, 'Percentage'),
(TAX_FIXED, 'Fixed'))
name = models.CharField(max_length=100, unique=True)
method = models.CharField(
max_length=2, choices=TAX_METHODS,
help_text='Tax deduction method: fixed tax per product or percentage (in fraction) of price per product')
rate = models.FloatField(default=0.0)
updated_by = models.CharField(max_length=100)
updated_on = models.DateTimeField(auto_now=True)
created_on = models.DateTimeField(auto_now_add=True)
created_by = models.CharField(max_length=100)
class Meta:
db_table = 'financial_tax'
verbose_name_plural = 'Taxes'
def __str__(self):
return '%s [%s]: %s' % (self.name, self.method, self.rate)
def calculate(self, price, quantity):
"""
Calculate tax on price & quantity based on tax method
"""
return self._calculate(price, quantity, self.method, self.rate, self.name)
@classmethod
def get_taxes(cls):
"""
Return all taxes defined in system
"""
return list(cls.objects.all())
@classmethod
def _calculate(cls, price, quantity, method, rate, name=None):
"""
Calculate tax on price & quantity based on tax method
"""
if method == cls.TAX_FIXED:
return float(rate) * float(quantity)
elif method == cls.TAX_PERCENTAGE:
return float(rate) * float(quantity) * float(price)
if name:
raise Exception('Unknown tax method "%s" defined for tax rate: "%s"' % (method, name))
raise Exception('Unknown tax method "%s"' % method)
``` |
{
"source": "joemash/django-schools",
"score": 2
} |
#### File: django_school/classroom/admin.py
```python
from django.contrib import admin
# Register your models here.
from .models import (User, Subject, Quiz, Question,
Answer, Student, TakenQuiz, AuditEntry)
@admin.register(AuditEntry)
class AuditEntryAdmin(admin.ModelAdmin):
readonly_fields = ['log_time',]
list_display = ['action', 'username', 'log_time','show_ip_url',]
list_filter = ['action',]
def show_ip_url(self, obj):
ip = obj.ip
if ip in [None,'127.0.0.1']: return ip
from django.utils.html import format_html
url = f'https://ipinfo.io/{obj.ip}/json' # https://stackoverflow.com/a/55432323/2351696
return format_html("<a href='{url}'>{ip}</a>", url=url, ip=ip)
# admin.site.register(AuditEntry)
admin.site.register(User)
admin.site.register(Subject)
admin.site.register(Quiz)
admin.site.register(Question)
admin.site.register(Answer)
admin.site.register(Student)
admin.site.register(TakenQuiz)
``` |
{
"source": "joematune/activity",
"score": 2
} |
#### File: activity/formlibrary/serializers.py
```python
from rest_framework import serializers
from formlibrary.models import Individual, Training, Distribution, Household
from workflow.serializers import SiteProfileSerializer, ProgramSerializer
from feed.serializers import ActivityUserSerializer
class TrainingSerializer(serializers.ModelSerializer):
class Meta:
model = Training
fields = ['id', 'name']
class DistributionSerializer(serializers.ModelSerializer):
class Meta:
model = Distribution
fields = ['id', 'name']
class IndividualSerializer(serializers.ModelSerializer):
training = TrainingSerializer(many=True, read_only=True)
distribution = DistributionSerializer(many=True, read_only=True)
site = SiteProfileSerializer(read_only=True)
program = ProgramSerializer
class Meta:
model = Individual
fields = ['id', 'first_name', 'last_name', 'id_number', 'primary_phone',
'date_of_birth', 'sex', 'age',
'training', 'distribution', 'site', 'program', 'create_date']
def get_age(self, obj):
return obj.individual.age()
class HouseholdSerializer(serializers.ModelSerializer):
class Meta:
model = Household
exclude = ['postal_code']
class HouseholdListDataSerializer(serializers.ModelSerializer):
program = ProgramSerializer()
created_by = ActivityUserSerializer()
class Meta:
model = Household
fields = ['id', 'name', 'program', 'created_by', 'create_date']
```
#### File: formlibrary/tests/test_individual.py
```python
from django.test import TestCase
from formlibrary.models import Individual, Household
from workflow.models import Program
from django.urls import reverse
import datetime
from rest_framework.test import APIClient
from django.contrib.auth.models import User
class IndividualTestCase(TestCase):
fixtures = [
'fixtures/tests/users.json',
'fixtures/tests/activity-users.json',
'fixtures/tests/programs.json',
'fixtures/tests/organization.json',
]
def setUp(self):
self.user = User.objects.first()
self.household = Household.objects.create(name="MyHouse", primary_phone='40-29104782')
self.program = Program.objects.first()
self.individual = Individual.objects.create(
first_name="Nate",
last_name="Test",
date_of_birth=datetime.date(2000, 10, 10),
sex="M",
signature=False,
description="life",
household_id=self.household,
program_id=self.program.id
)
self.client = APIClient()
def test_individual_create(self):
"""Check for the Individual object"""
get_individual = Individual.objects.get(first_name="Nate")
self.assertEqual(Individual.objects.filter(
id=get_individual.id).count(), 1)
self.assertEqual(get_individual.sex, 'M')
self.assertIsInstance(get_individual.household_id, Household)
def test_individual_does_not_exists(self):
get_individual = Individual()
self.assertEqual(Individual.objects.filter(
id=get_individual.id).count(), 0)
def test_edit_individual(self):
individual = Individual.objects.first()
individual.sex = "F"
individual.save()
updated_individual = Individual.objects.get(pk=individual.pk)
self.assertEqual(updated_individual.sex, "F")
def test_delete_individual(self):
individual = Individual.objects.filter(first_name="Nate")
individual.delete()
self.assertEqual(individual.count(), 0)
def test_create_individual_request(self):
individual = {
'first_name': 'test',
'last_name': 'test_last',
'date_of_birth': '2000-10-10',
'sex': 'M',
'signature': False,
'description': 'life',
'program': '1'
}
url = reverse("individual", kwargs={'pk': 0})
self.client.force_login(self.user, backend=None)
resp = self.client.post(url, data=individual)
self.assertEqual(resp.status_code, 201)
def test_edit_individual_request(self):
url = reverse("individual_update", args=[self.individual.id])
self.client.force_login(self.user, backend=None)
data = {
'last_name': 'test_last',
'sex': 'F',
}
resp = self.client.post(url, data=data)
self.assertEqual(resp.status_code, 200)
def test_delete_individual_request(self):
url = reverse("individual", kwargs={'pk': self.individual.pk})
self.client.force_login(self.user, backend=None)
resp = self.client.delete(url)
self.assertEqual(resp.status_code, 204)
``` |
{
"source": "joembis/weather_texter",
"score": 3
} |
#### File: weather_texter/sample/message.py
```python
from twilio.rest import Client
from prettytable import PrettyTable
account_sid = '' # account sid from twilio
auth_token = '' # account auth token from twilio
to_number = 'whatsapp:+44xxxxxxxxxx' # phone number
client = Client(account_sid, auth_token)
def make_message(parsed_weather):
"""takes a dict of weather data and processes into a message to send to whatsapp"""
table = PrettyTable(['T'.center(6), '°C'.center(6), 'description', 'c%'.center(3), 'ws'.center(4)])
for p in ['00', '03', '06', '09', '12', '15', '18', '21']:
table.add_row(parsed_weather[p])
# print(table)
return table
def send_message(message):
"""uses twilio to send a whatsapp message to a phone number"""
print(message)
message = client.messages.create(
body=message,
from_='whatsapp:+14155238886',
to=to_number
)
``` |
{
"source": "joe-mcgill/f8ForeignStuff",
"score": 2
} |
#### File: joe-mcgill/f8ForeignStuff/save_for_affinities_mlof.py
```python
import pandas as pd
import pymysql
import string
import subprocess
import time
from multiprocessing import Pool
import multiprocessing
from tqdm import tqdm
import sys
#mysql database connection
rand_str = lambda n: ''.join([random.choice(string.ascii_lowercase) for i in range(n)])
alleles=[i.strip() for i in open('all_alleles.txt').readlines()]
print(alleles)
def get_promiscuity(peptide,alleles=alleles):
db2=pymysql.connect(host='127.0.0.1',user='jmcgill',passwd='<PASSWORD>!',db='netmhciipan31')
sql_comm="select promiscuity from deimmunizecas9_donors where (peptide = \'%s\') and (threshold = %d);"%(peptide,threshold)
cur=db2.cursor()
cur.execute(sql_comm)
results=cur.fetchall()
if len(results)==0:
s = rand_str(10)
outfile=open('./tmp/'+s+'.fasta','w')
outfile.write('>temp\n')
outfile.write(peptide)
outfile.close()
bash_command = 'for allele in ' + ' '.join(list(alleles.index)) + '; do netmhciipan -f ./tmp/'+ s+'.fasta -a $allele -length '+str(len(peptide))+'; done'
process = subprocess.Popen(bash_command, shell=True, stdout=subprocess.PIPE)
process_results=str(process.communicate()).split('\\n')
#print(process_results[:5])
# terminal output of netmhc run as a list
line_list = list([1 if float(i.split()[9])<threshold else 0 for i in process_results if len(i.split()) in [11,12] and i.split()[0]=='1'])
#line_list = [i+['NA'] if len(i)==11 else i for i in line_list ]
#print(line_list)
prom=sum([float(line_list[i])*((alleles[i])) for i in range(len(line_list))])
sql_comm="insert into deimmunizecas9b (peptide,changes,blosum_scores,promiscuity,threshold) values (\'%s\',\'%s\',\'%s\',%f,%f);"%(peptide,changes,blosum_scores,prom,threshold)
cur=db2.cursor()
cur.execute(sql_comm)
db2.commit()
try:
subprocess.call('rm ./tmp/%s.fasta'%(s))
except:
pass
return prom
else:
return list(results)[0][0]
db2.close()
'''
for pool in range(1):
for number_of_changes in [10]:
for run in range(25):
print('%s Pool: %s Run: %d Number: %d Last Run: %.2fm'%(time.asctime(),pool,run,number_of_changes,time_to_print/60))
start_total=time.time()
simulated_annealing(number_of_changes,20,starting_sequence,pool,number_of_iterations=10000,temperature_decrease_alpha=.001)
end_total=time.time()
time_to_print=end_total-start_total
'''
``` |
{
"source": "JoeMcMahon87/MarkovTweets",
"score": 3
} |
#### File: JoeMcMahon87/MarkovTweets/twitter_bot.py
```python
from creds import *
import tweepy
import markovify
import os
import argparse
# Execute in script directory
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
def generate_tweet(test_mode=False):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Read content to feed Markov model
with open("tweet-content.txt", 'r') as f:
text = f.read()
text_model = markovify.NewlineText(text)
# Generate the tweet text (use Twitter regular form of 140 characters)
tweet = text_model.make_short_sentence(140)
if test_mode:
print(tweet)
else:
api.update_status(tweet)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate a random tweet using Markov chain generation and post it.')
parser.add_argument('--test', action='store_true', help='Test the functionality by printing the tweet')
args = parser.parse_args()
generate_tweet(args.test)
``` |
{
"source": "joemcnease/Snake",
"score": 4
} |
#### File: joemcnease/Snake/SnakeBasic.py
```python
import sys
import random
import pygame
class Snake:
""" Initialize Snake class with starting dimensions, position,
and velocity of the snake. """
snake_body = []
def __init__(self, x, y, width, height, velocity, color=(0,255,0), score=0):
"""
(x, y): denotes always positive postion from top left (0,0).
(width, height): describes height and width of snake.
velocity: defines how much (x, y) values change with each move.
"""
self.x = x
self.y = y
self.width = width
self.height = height
self.velocity = velocity
self.color = color
# self.hitbox = (self.x, self.y, self.width, self.height)
self.score = score
self.high_score = high_score = 0
def draw_snake(self):
""" Redraws rectangles that are the "Snake". """
pygame.draw.rect(screen, self.color, (self.x, self.y, self.width, self.height))
# self.hitbox = (self.x, self.y, self.width, self.height)
# pygame.draw.rect(screen, (0, 0, 255), self.hitbox, 2)
# Basic movement function
def move_left(self):
if self.x > 0:
self.x -= self.velocity
def move_right(self):
if self.x < win.width - player1.width:
self.x += self.velocity
def move_up(self):
if self.y > 0:
self.y -= self.velocity
def move_down(self):
if self.y < win.height - player1.height:
self.y += self.velocity
# Functions to make game competitive
def death(self):
""" If the snake meets the boundary of the screen, he/she will die. """
death_sound.play()
Snake.snake_body = []
Food.food = []
self.score = 0
player1.x, player1.y = 400, 400
def add_score(self):
score_sound.play()
self.score += 1
if self.score > self.high_score:
self.high_score = self.score
class Food:
""" Food class. """
food = []
def __init__(self, x, y, radius, color=(255,0,0)):
""" Gives food basic pygame rect attributes. """
self.x = x
self.y = y
self.radius = radius
self.color = color
def draw_food(self):
""" Redraws circles (Food) to screen. """
pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius)
class Window:
""" Sets basic window parameters. """
def __init__(self, width, height):
self.width = width
self.height = height
# Should probably put this in Window class
def redraw_game_window():
"""
Main graphics function.
1.) Screen is filled black
2.) Player1's "head" is drawn
3.) Player1's "body" is drawn
4.) Food is drawn
"""
screen.fill((0,0,0))
player1.draw_snake()
screen.blit(font.render("High Score: " + str(player1.high_score), True, (255, 255, 255)),
(win.width/10, win.height/100))
screen.blit(font.render("Score: " + str(player1.score), True, (255, 255, 255)),
(win.width/1.5, win.height/100))
pygame.draw.line(screen, (255,255,255), (0, 50), (win.width, 50), 3)
for s in Snake.snake_body:
s.draw_snake()
for f in Food.food:
f.draw_food()
pygame.display.update()
# Initialize pygame and mixer (sounds)
pygame.mixer.pre_init(44100, -16, 2, 512)
pygame.init()
# Vital objects and variables to instantiate and initialize
win = Window(800, 800)
screen = pygame.display.set_mode((win.width, win.height))
pygame.display.set_caption("Snake")
font = pygame.font.Font(None, 72)
# Create instance of sound
death_sound = pygame.mixer.Sound('WallHit.ogg')
score_sound = pygame.mixer.Sound('ScoreUpSound.ogg')
pygame.mixer.music.load('HumbleMatch.ogg')
pygame.mixer.music.play(-1)
player1 = Snake(400, 400, 20, 20, 10)
# Delay time for Main Loop
game_speed = 30
run = True
# Extra Screens for score, etc...
# Not sure how to do this. Maybe increase window size and keep player boundary the
# same. Then print score to unused area of window.
# This can be added to Snake class, but should have a default value.
# The default value (currently 'left') can also be randomly chosen,
# you just have to make a list and use random.choice(list)
direction = ['left']
# Main Loop
# This is where most of the game logic lies
while run:
pygame.time.delay(game_speed)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
# Checks if player hits edge of window and if that is True, the player1.death() fucntion is called.
if player1.x == win.width - player1.width or player1.x == 0 or player1.y == win.height - player1.height or player1.y == 0:
player1.death()
# Checks if head contacts tail and if that is True, the player1.death() function is called
#for snake in Snake.snake_body:
# if snake.x > player1.x and snake.x < player1.x + player1.width:
# if snake.y > player1.y - player1.height and snake.y < player1.y:
# player1.death()
# If there is no food, add a piece.
# If there is check if snake head is touching, if True empty food list and add another item to snake_list.
if Food.food == []:
Food.food.append(Food(random.randrange(0, win.width - 5), random.randrange(0, win.height -5), 5))
else:
for f in Food.food:
if f.x + f.radius > player1.x - (player1.width/2) and f.x - f.radius < player1.x + player1.width:
if f.y + f.radius > player1.y - (player1.height/3) and f.y - f.radius < player1.y + player1.height:
Food.food.remove(f)
player1.add_score()
# Add segment to snake
tail = Snake(player1.x, player1.y, player1.width, player1.height, player1.velocity)
Snake.snake_body.append(tail)
# Move segments in reverse order
for index in range(len(Snake.snake_body) - 1, 0, -1):
x = Snake.snake_body[index - 1].x
y = Snake.snake_body[index - 1].y
Snake.snake_body[index].x = x
Snake.snake_body[index].y = y
# If length of snake > 0, then move only snake_body item to previous head position
if len(Snake.snake_body) > 0:
x = player1.x
y = player1.y
Snake.snake_body[0].x = x
Snake.snake_body[0].y = y
# Calls Snake.move() methods if direction list contains 'direction'.
if direction[-1] == 'left':
player1.move_left()
elif direction[-1] == 'right':
player1.move_right()
elif direction[-1] == 'up':
player1.move_up()
elif direction[-1] == 'down':
player1.move_down()
# This is how you check for keypresses in Pygame.
# pygame.key.get_pressed() is a dictionary with boolean values.
# If key == K_LEFT, K_RIGHT, K_UP, K_DOWN then it executes code.
if pygame.key.get_pressed()[pygame.K_LEFT]:
direction.append('left')
del direction[:-1]
elif pygame.key.get_pressed()[pygame.K_RIGHT]:
direction.append('right')
del direction[:-1]
elif pygame.key.get_pressed()[pygame.K_UP]:
direction.append('up')
del direction[:-1]
elif pygame.key.get_pressed()[pygame.K_DOWN]:
direction.append('down')
del direction[:-1]
redraw_game_window()
pygame.quit()
``` |
{
"source": "joemeister/httpagentparser",
"score": 3
} |
#### File: joemeister/httpagentparser/tests.py
```python
import unittest
import time
import httpagentparser
detect = httpagentparser.detect
simple_detect = httpagentparser.simple_detect
data = (
# tuple of tuples
# tuple (agent-string, expected result of simple_detect, expected result of detect)
("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3861.0 Safari/537.36 Edg/77.0.230.2",
('Windows 10', 'ChromiumEdge 172.16.17.32'),
{'bot': False, 'os': {'version': '10', 'name': 'Windows'}, 'browser': {'version': '172.16.17.32', 'name': 'ChromiumEdge'}},),
("Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-GB; rv:1.9.0.10) Gecko/2009042315 Firefox/3.0.10",
('MacOS Macintosh X 10.5', 'Firefox 3.0.10'),
{'bot': False, 'flavor': {'version': 'X 10.5', 'name': 'MacOS'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '3.0.10', 'name': 'Firefox'}},),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_6) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.3 Safari/534.24,gzip(gfe)",
('MacOS Macintosh X 10.6.6', 'Chrome 11.0.696.3'),
{'bot': False, 'flavor': {'version': 'X 10.6.6', 'name': 'MacOS'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '11.0.696.3', 'name': 'Chrome'}},),
("Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2) Gecko/20100308 Ubuntu/10.04 (lucid) Firefox/3.6 GTB7.1",
('Ubuntu Linux 10.04', 'Firefox 3.6'),
{'bot': False, 'dist': {'version': '10.04', 'name': 'Ubuntu'}, 'os': {'name': 'Linux'}, 'browser': {'version': '3.6', 'name': 'Firefox'}},),
("Mozilla/5.0 (Linux; U; Android 2.2.1; fr-ch; A43 Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
('Android Linux 2.2.1', 'AndroidBrowser'),
{'bot': False, 'dist': {'version': '2.2.1', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'name': 'AndroidBrowser'}},),
("Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3",
('iPhone iOS', 'Safari 3.0'),
{'bot': False, 'os': {'name': 'iOS'}, 'dist': {'name': 'iPhone'}, 'browser': {'version': '3.0', 'name': 'Safari'}},),
("Mozilla/5.0 (X11; CrOS i686 0.0.0) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.27 Safari/534.24,gzip(gfe)",
('ChromeOS 0.0.0', 'Chrome 11.0.696.27'),
{'bot': False, 'os': {'name': 'ChromeOS', 'version': '0.0.0'}, 'browser': {'name': 'Chrome', 'version': '11.0.696.27'}},),
("Mozilla/4.0 (compatible; MSIE 6.0; MSIE 5.5; Windows NT 5.1) Opera 7.02 [en]",
('Windows XP', 'Opera 7.02'),
{'bot': False, 'os': {'name': 'Windows', 'version': 'XP'}, 'browser': {'name': 'Opera', 'version': '7.02'}},),
("Opera/9.64(Windows NT 5.1; U; en) Presto/2.1.1",
('Windows XP', 'Opera 9.64'),
{'bot': False, 'os': {'name': 'Windows', 'version': 'XP'}, 'browser': {'name': 'Opera', 'version': '9.64'}},),
("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
('Windows 7', 'Microsoft Internet Explorer 10.0'),
{'bot': False, 'os': {'version': '7', 'name': 'Windows'}, 'browser': {'version': '10.0', 'name': 'Microsoft Internet Explorer'}},),
("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; yie8)",
('Windows 7', 'Microsoft Internet Explorer 9.0'),
{'bot': False, 'os': {'version': '7', 'name': 'Windows'}, 'browser': {'version': '9.0', 'name': 'Microsoft Internet Explorer'}},),
("Mozilla/5.0 (MSIE 7.0; Macintosh; U; SunOS; X11; gu; SV1; InfoPath.2; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648",
('Macintosh', 'Microsoft Internet Explorer 7.0'),
{'bot': False, 'os': {'name': 'Macintosh'}, 'browser': {'version': '7.0', 'name': 'Microsoft Internet Explorer'}}),
("Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; GTB6.5; QQDownload 534; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; SLCC2; .NET CLR 2.0.50727; Media Center PC 6.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729)",
('Windows XP', 'Microsoft Internet Explorer 8.0'),
{'bot': False, 'os': {'version': 'XP', 'name': 'Windows'}, 'browser': {'version': '8.0', 'name': 'Microsoft Internet Explorer'}}),
('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; SLCC1; .NET CLR 2.0.50727; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618; .NET4.0C)',
('Windows XP', 'Microsoft Internet Explorer 8.0'),
{'bot': False, 'os': {'version': 'XP', 'name': 'Windows'}, 'browser': {'version': '8.0', 'name': 'Microsoft Internet Explorer'}},),
("Opera/9.80 (X11; Linux i686; U; en) Presto/2.9.168 Version/11.50",
("Linux", "Opera 11.50"),
{'bot': False, "os": {"name": "Linux"}, "browser": {"name": "Opera", "version": "11.50"}},),
("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5) Gecko/20060127 Netscape/8.1",
("Windows XP", "Netscape 8.1"),
{'bot': False, 'os': {'name': 'Windows', 'version': 'XP'}, 'browser': {'name': 'Netscape', 'version': '8.1'}},),
("Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.2; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/234.40.1 Safari/534.6 TouchPad/1.0",
("WebOS Linux 3.0.2", "WOSBrowser"),
{'bot': False, 'dist': {'name': 'WebOS', 'version': '3.0.2'}, 'os' : {'name' : 'Linux'}, 'browser': {'name': 'WOSBrowser'}},),
("Mozilla/5.0 (iPad; CPU OS 5_0_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A405 Safari/7534.48.3",
('IPad iOS 5.0.1', 'Safari 5.1'),
{'bot': False, 'os': {'name': 'iOS'}, 'dist': {'version': '5.0.1', 'name': 'IPad'}, 'browser': {'version': '5.1', 'name': 'Safari'}},),
("AppleCoreMedia/1.0.0.10B329 (iPad; U; CPU OS 6_1_3 like Mac OS X; en_us)",
('IPad iOS 6.1.3', 'Unknown Browser'),
{'bot': False, 'dist': {'name': 'IPad', 'version': '6.1.3'}, 'os': {'name': 'iOS'}},),
("Mozilla/5.0 (iPad; CPU OS 7_1 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D167 Safari/9537.53",
('IPad iOS 7.1', 'Safari 7.0'),
{'bot': False, 'browser': {'name': 'Safari', 'version': '7.0'}, 'dist': {'name': 'IPad', 'version': '7.1'}, 'os': {'name': 'iOS'}}),
("Mozilla/5.0 (Linux; U; Android 3.2.1; en-gb; Transformer TF101 Build/HTK75) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
('Android Linux 3.2.1', 'AndroidBrowser'),
{'bot': False, 'dist': {'version': '3.2.1', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'name': 'AndroidBrowser'}},),
("Mozilla/5.0 (BlackBerry; U; BlackBerry 9700; en-US) AppleWebKit/534.8+ (KHTML, like Gecko) Version/6.0.0.448 Mobile Safari/534.8+",
('Blackberry', 'Safari 6.0.0.448'),
{'bot': False, 'os': {'name': 'Blackberry'}, 'browser': {'version': '6.0.0.448', 'name': 'Safari'}},),
("Mozilla/5.0 (PlayBook; U; RIM Tablet OS 1.0.0; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.7 Safari/534.11+",
('BlackberryPlaybook', 'Safari 7.1.0.7'),
{'bot': False, 'dist': {'name': 'BlackberryPlaybook'}, 'browser': {'version': '7.1.0.7', 'name': 'Safari'}},),
("Opera/9.80 (Android 2.3.5; Linux; Opera Mobi/build-1203300859; U; en) Presto/2.10.254 Version/12.00",
('Android Linux 2.3.5', 'Opera Mobile 12.00'),
{'bot': False, 'dist': {'version': '2.3.5', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'version': '12.00', 'name': 'Opera Mobile'}},),
("Mozilla/5.0 (Linux; U; Android 2.3.5; en-in; HTC_DesireS_S510e Build/GRJ90) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
('Android Linux 2.3.5', 'AndroidBrowser'),
{'bot': False, 'dist': {'version': '2.3.5', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'name': 'AndroidBrowser'}},),
("Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; es-es) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3",
('iPhone iOS 5.1.1', 'ChromeiOS 19.0.1084.60'),
{'bot': False, 'os': {'name': 'iOS'}, 'dist': {'version': '5.1.1', 'name': 'iPhone'}, 'browser': {'version': '19.0.1084.60', 'name': 'ChromeiOS'}}),
("Mozilla/5.0 (X11; Linux x86_64; rv:7.0.1) Gecko/20111011 Firefox/7.0.1 SeaMonkey/2.4.1",
("Linux", "SeaMonkey 2.4.1"),
{'bot': False, "os" : {"name": "Linux"}, "browser": {"name": "SeaMonkey", "version": "2.4.1"}}),
("Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
("Ubuntu Linux", "Firefox 16.0"),
{'bot': False, 'dist': {'name': 'Ubuntu'}, 'os': {'name': 'Linux'}, 'browser': {'version': '16.0', 'name': 'Firefox'}},),
("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.1 Safari/537.17",
("Linux", "Chrome 24.0.1312.1"),
{'bot': False, "os" : {"name": "Linux"}, "browser": {"name": "Chrome", "version": "24.0.1312.1"}}),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.19 (KHTML, like Gecko) Chrome/25.0.1323.1 Safari/537.19",
("MacOS Macintosh X 10.8.2", "Chrome 25.0.1323.1"),
{'bot': False, 'flavor': {'name': 'MacOS', 'version': 'X 10.8.2'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '25.0.1323.1', 'name': 'Chrome'}},),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/536.26.14 (KHTML, like Gecko) Version/6.0.1 Safari/536.26.14",
("MacOS Macintosh X 10.8.2", "Safari 6.0.1"),
{'bot': False, 'flavor': {'name': 'MacOS', 'version': 'X 10.8.2'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '6.0.1', 'name': 'Safari'}},),
("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
("Windows 7", "Chrome 23.0.1271.64"),
{'bot': False, 'os': {'name': 'Windows', 'version': '7'}, 'browser': {'version': '23.0.1271.64', 'name': 'Chrome'}},),
("Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)",
("Windows XP", "Microsoft Internet Explorer 8.0"),
{'bot': False, 'os': {'name': 'Windows', 'version': 'XP'}, 'browser': {'version': '8.0', 'name': 'Microsoft Internet Explorer'}},),
("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)",
("Windows 7", "Microsoft Internet Explorer 9.0"),
{'bot': False, 'os': {'name': 'Windows', 'version': '7'}, 'browser': {'version': '9.0', 'name': 'Microsoft Internet Explorer'}},),
("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20100101 Firefox/15.0.1",
("Windows 7", "Firefox 15.0.1"),
{'bot': False, 'os': {'name': 'Windows', 'version': '7'}, 'browser': {'version': '15.0.1', 'name': 'Firefox'}},),
("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
("Windows 7", "Safari 5.1.7"),
{'bot': False, 'os': {'name': 'Windows', 'version': '7'}, 'browser': {'version': '5.1.7', 'name': 'Safari'}},),
("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36 OPR/17.0.1241.53",
("Windows 7", "Opera 17.0.1241.53"),
{'bot': False, 'os': {'name': 'Windows', 'version': '7'}, 'browser': {'version': '17.0.1241.53', 'name': 'Opera'}},),
('Mozilla/5.0+(X11;+CrOS+i686+2465.163.0)+AppleWebKit/537.1+(KHTML,+like+Gecko)+Chrome/21.0.1180.91+Safari/537.1',
('ChromeOS 2465.163.0', 'Chrome 21.0.1180.91'),
{'bot': False, 'os': {'version': '2465.163.0', 'name': 'ChromeOS'}, 'browser': {'version': '21.0.1180.91', 'name': 'Chrome'}},),
('Mozilla/5.0 (Linux; U; en-us; KFOT Build/IML74K) AppleWebKit/535.19 (KHTML, like Gecko) Silk/2.2 Safari/535.19 Silk-Accelerated=true',
('Linux', 'Safari 535.19'),
{'bot': False, 'os': {'name': 'Linux'}, 'browser': {'version': '535.19', 'name': 'Safari'}}),
('Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko',
('Windows 8.1', 'Microsoft Internet Explorer 11.0'),
{'bot': False, 'os': {'name': 'Windows', 'version': '8.1'}, 'browser': {'version': '11.0', 'name': 'Microsoft Internet Explorer'}},),
('Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
('Unknown OS', 'GoogleBot 2.1'),
{'bot': True, 'browser': {'name': 'GoogleBot', 'version': '2.1'}},),
('"Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"',
('Unknown OS', 'BingBot 2.0'),
{'bot': True, 'browser': {'name': 'BingBot', 'version': '2.0'}}),
('Mozilla/5.0 (compatible; YandexBot/3.0)',
('Unknown OS', 'YandexBot 3.0'),
{'bot': True, 'browser': {'name': 'YandexBot', 'version': '3.0'}}),
('Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
('Unknown OS', 'BaiduBot 2.0'),
{'bot': True, 'browser': {'name': 'BaiduBot', 'version': '2.0'}}),
('Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Radar 4G)',
('Windows Phone 7.5', 'Microsoft Internet Explorer 9.0'),
{'bot': False, 'browser': {'name': 'Microsoft Internet Explorer', 'version': '9.0'}, 'os': {'name': 'Windows Phone', 'version': '7.5'}}),
('Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0; SAMSUNG; GT-i8700)',
('Windows Phone 7.0', 'Microsoft Internet Explorer 7.0'),
{'bot': False, 'browser': {'name': 'Microsoft Internet Explorer', 'version': '7.0'}, 'os': {'name': 'Windows Phone', 'version': '7.0'}}),
('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; HTC_HD2_T8585; Windows Phone 6.5)',
('Windows Phone 6.5', 'Microsoft Internet Explorer 6.0'),
{'bot': False, 'browser': {'name': 'Microsoft Internet Explorer', 'version': '6.0'}, 'os': {'name': 'Windows Phone', 'version': '6.5'}}),
('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; HTC_HD2_T8585; Windows Phone 6.5)',
('Windows Phone 6.5', 'Microsoft Internet Explorer 6.0'),
{'bot': False, 'browser': {'name': 'Microsoft Internet Explorer', 'version': '6.0'}, 'os': {'name': 'Windows Phone', 'version': '6.5'}}),
('Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20110814 Firefox/6.0 Google (+https://developers.google.com/+/web/snippet/)',
('Windows 7', 'GoogleBot'),
{'bot': True, 'browser': {'name': 'GoogleBot'}, 'os': {'name': 'Windows', 'version': '7'}}),
('facebookexternalhit/1.1 (+http://www.facebook.com/externalhit_uatext.php)',
('Unknown OS', 'FacebookExternalHit 1.1'),
{'bot': True, 'browser': {'name': 'FacebookExternalHit', 'version': '1.1'},}),
('runscope-radar/2.0',
('Unknown OS', 'RunscopeRadar'),
{'bot': True, 'browser': {'name': 'RunscopeRadar'}}),
('Mozilla/5.0 (Mobile; Windows Phone 8.1; Android 4.0; ARM; Trident/7.0; Touch; rv:11.0; IEMobile/11.0; NOKIA; Lumia 720) like iPhone OS 7_0_3 Mac OS X AppleWebKit/537 (KHTML, like Gecko) Mobile Safari/537',
('Windows Phone 8.1', 'Microsoft Internet Explorer 11.0'),
{'os': {'version': '8.1', 'name': 'Windows Phone'}, 'bot': False, 'browser': {'version': '11.0', 'name': 'Microsoft Internet Explorer'}}),
('5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 YaBrowser/16.2.0.1818 (beta) Safari/537.36',
('Linux', 'Yandex.Browser 16.2.0.1818'),
{'os': {'name': 'Linux'}, 'bot': False, 'browser': {'version': '16.2.0.1818', 'name': 'Yandex.Browser'}}),
('Mozilla/5.0 (Linux; Android 8.0.0; Nexus 5X Build/OPR6.170623.023) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.84 Mobile Safari/537.36',
('Android Linux 8.0.0', 'Chrome 62.0.3202.84'),
{'bot': False, 'browser': {'name': 'Chrome', 'version': '62.0.3202.84'}, 'dist': {'name': 'Android', 'version': '8.0.0'}, 'os': {'name': 'Linux'}}),
('Mozilla/5.0 (Android 6.0.1; Mobile; rv:63.0) Gecko/63.0 Firefox/63.0',
('Android 6.0.1', 'Firefox 63.0'),
{'dist': {'name': 'Android', 'version': '6.0.1'}, 'bot': False, 'browser': {'name': 'Firefox', 'version': '63.0'}}),
)
class TestHAP(unittest.TestCase):
def setUp(self):
self.harass_repeat = 1000
self.data = data
def test_simple_detect(self):
for agent, simple_res, res in data:
self.assertEqual(simple_detect(agent), simple_res)
def test_detect(self):
for agent, simple_res, res in data:
detected = detect(agent)
del detected['platform']
self.assertEqual(detected, res)
def test_bot(self):
s = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
d = detect(s)
self.assertTrue(d['bot'])
def test_harass(self):
then = time.time()
for agent, simple_res, res in data * self.harass_repeat:
detect(agent)
time_taken = time.time() - then
no_of_tests = len(self.data) * self.harass_repeat
print("\nTime taken for %s detections: %s" %
(no_of_tests, time_taken))
print("Time taken for single detection: %f" %
(time_taken / (len(self.data) * self.harass_repeat)))
def test_fill_none(self):
self.assertEqual(detect(''), {'platform': {'version': None, 'name': None}}) # default
self.assertEqual(detect('', fill_none=False), {'platform': {'version': None, 'name': None}})
result = detect('', fill_none=True)
self.assertEqual(result['os']['name'], None)
self.assertEqual(result['browser']['version'], None)
result = detect('Linux; Android', fill_none=True)
self.assertEqual(result['os']['name'], 'Linux')
self.assertEqual(result['os']['version'], None)
self.assertEqual(result['browser']['name'], 'AndroidBrowser')
self.assertEqual(result['browser']['version'], None)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joemenna/Game-Survey",
"score": 3
} |
#### File: joemenna/Game-Survey/app.py
```python
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
ENV = 'prod'
if ENV == 'dev':
app.debug = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:chichi30@localhost/Game Survey'
else:
app.debug == False
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://kmraszhsnxacee:425aa49af1d069f679c1669fb87717000e27fd2514f51ec09fdfad69204c40c1@ec2-52-71-107-99.compute-1.amazonaws.com:5432/d1d7rrdqbb7ud3'
app.config['SQLAlCHEMY_TRACK-MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Feedback(db.Model):
__tablename__ = 'feedback'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(200), unique=True)
email = db.Column(db.String(200), unique=True)
age = db.Column(db.Integer)
genre = db.Column(db.String(200))
hours_played = db.Column(db.String(200))
favorite = db.Column(db.String)
comments = db.Column(db.Text())
def __init__(self, name, email, age, genre, hours_played, favorite, comments):
self.name = name
self.email = email
self.age = age
self.genre = genre
self.hours_played = hours_played
self.favorite = favorite
self.comments = comments
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submit', methods=['POST'])
def submit():
if request.method == 'POST':
name = request.form['name']
email = request.form['email']
age = request.form['age']
genre = request.form['genre']
hours_played = request.form['hours-played']
favorite = request.form['favorite']
comments = request.form['comment']
#print(name, email, age, genre, hours_played, favorite, comments )
if db.session.query(Feedback).filter(Feedback.name == name).count() == 0 and db.session.query(Feedback).filter(Feedback.email == email).count() == 0:
data = Feedback(name, email, age, genre, hours_played, favorite, comments)
db.session.add(data)
db.session.commit()
return render_template('success.html')
return render_template('index.html', message='You have already taken this survey!')
if __name__=='__main__':
app.run()
``` |
{
"source": "joemeree/ai_engineering",
"score": 3
} |
#### File: datarobotapiwrapper/business_logic/datarobot_client.py
```python
from http import HTTPStatus
import pandas as pd
import requests
from spyne import ArgumentError
class DataRobotClient:
def __init__(self, prediction_server):
self.prediction_url = prediction_server.server_url
self.https_session = self.get_https_session(prediction_server)
def get_https_session(self, prediction_server):
https_session = requests.Session()
https_session.headers.update(
{'datarobot-key': prediction_server.datarobot_key,
'Content-Type': 'application/json',
'x-forwarded-proto': 'https'})
https_session.auth = (
prediction_server.datarobot_username, prediction_server.api_token)
return https_session
def get_predictions(self, features, gorupby_ids):
prediction_url = self.get_prediction_url(gorupby_ids)
predictions_response = self._request_predictions(features, prediction_url)
predictions = self._parse_response(
predictions_response, features.index)
return predictions
def get_prediction_url(self, gorupby_ids):
if len(gorupby_ids) == 1:
full_url = f'{self.prediction_url}/predApi/v1.0/deployments/{gorupby_ids[0]}/predictions'
else:
full_url = f'{self.prediction_url}/predApi/v1.0/' \
f'{gorupby_ids[0]}/{gorupby_ids[1]}/predict'
return full_url
def _request_predictions(self, features, full_url):
predictions_response = self.https_session.post(
full_url,
data=features.to_json(orient='records'))
if predictions_response.status_code != HTTPStatus.OK:
raise ArgumentError(
faultstring=predictions_response.content.decode('utf-8'))
return predictions_response.json()
@staticmethod
def _parse_response(predictions_json, index):
unordered = {item['rowId']: item['prediction']
for item in predictions_json['data']}
# The order of predictions which are returned by the server does not
# match the order of the rows which were sent for scoring.
# The server uses 'rowId' field to indicate the original order.
ordered = [unordered[key] for key in sorted(unordered.keys())]
return pd.DataFrame({'prediction': ordered}, index=index)
def add_predictions(self, prepared_df, prediction_column):
grouped_predictions = []
if 'deployment_id' in prepared_df.columns:
groupby_columns = ['deployment_id']
else:
groupby_columns = ['project_id', 'model_id']
grouped_features = prepared_df.groupby(groupby_columns)
for gorupby_ids, features in grouped_features:
# http://pandas.pydata.org/pandas-docs/stable/groupby.html#iterating-through-groups
ids = [gorupby_ids]
if isinstance(gorupby_ids, tuple):
ids = [id for id in gorupby_ids]
predictions = self.get_predictions(
features,
ids)
grouped_predictions.append(predictions)
prepared_df[prediction_column] = \
pd.concat(grouped_predictions)['prediction']
return prepared_df
```
#### File: business_logic/tests/sample_user_src_pandas.py
```python
import pandas as pd
# for test we can use fake model_id and project_id, since we are not going to make real api calls
MODEL_ID1 = '5b2b44639c5b179c69bd3799'
MODEL_ID2 = 'FFFF44639c5b179c69bdFFFF'
PROJECT_ID1 = '5b2b4281ff83430f31457be0'
PROJECT_ID2 = 'FFFF4281ff83430f3145FFFF'
# Incoming data can't use this feature/column names(They are used in pipeline)
RESERVED_COLUMNS = ('project_id', 'model_id', 'premium', 'predicted_price')
def data_prepare(features_df):
"""This function runs before sending data to scoring.
Here input data can be modified, prediction model can be different for each line.
:param features_df: features from SOAP request as Pandas DataFrame + 'premium' column
:return: DataFrame for scoring with columns 'project_id' and 'model_id'
"""
# we need project_id:model_id combination for scoring
projects_models = [
{'project_id': PROJECT_ID1,
'model_id': MODEL_ID1},
{'project_id': PROJECT_ID2,
'model_id': MODEL_ID2},
]
projects_models_df = pd.DataFrame(projects_models)
# add to each data row 'project_id' and 'model_id' for each used model
# so in this example, each row will be scored twice(once by each model)
projects_models_df['key'] = 0
features_df['key'] = 0
result_df = features_df.merge(projects_models_df, how='outer')
del result_df['key']
return result_df
def business_logic(scored_df):
price_adjustment = scored_df['predicted_price'].mean() / scored_df['predicted_price'].max()
return (price_adjustment, scored_df)
# This code is executed only when the business logic is saved as a standalone
# Python file and executed by calling `python demo_code.py`.
# It is a way to test business logic outside the price adjustment solution.
if __name__ == '__main__':
test_data = [
{'column1': 'c1_data1',
'column2': 'c2_data1'},
{'column1': 'c1_data2',
'column2': 'c2_data2'},
{'column1': 'c1_data3',
'column2': 'c2_data3'},
]
input_df = pd.DataFrame(test_data)
res = data_prepare(input_df)
res['predicted_price'] = 3
res['premium'] = 4
print(res)
print(business_logic(res))
```
#### File: datarobotapiwrapper/logs/firehose.py
```python
import boto3
from django.conf import settings
#firehose_client = boto3.client('firehose')
def put_record(data_list):
"""AWS Firehorse recommends newline (\n) or some other character
unique within the data delimeter for records"""
records = [{'Data': record + '\n'} for record in data_list]
# max size for 1 record - 1000kb, so better to split rows
""" response = firehose_client.put_record_batch(
DeliveryStreamName=settings.DELIVERY_STREAM,
Records=records) """
response = ""
# print('response', response)
```
#### File: logs/tests/test_firehose.py
```python
from unittest.mock import MagicMock
from price.logs import firehose
def test_put_record():
mock_firehose = MagicMock()
firehose.firehose_client = mock_firehose
records = ['rec1', 'rec2', 'rec3']
records_for_firehose = [{'Data': record + '\n'} for record in records]
firehose.put_record(records)
mock_firehose.put_record_batch.assert_called_once_with(
DeliveryStreamName='DELIVERY_STREAM',
Records=records_for_firehose)
assert mock_firehose.put_record_batch.call_count == 1
```
#### File: logs/tests/test_s3events.py
```python
import boto3
from django.test import TestCase
from moto import mock_s3
from price.logs.s3_helpers import s3_create_bucket
from ..s3events import save_file, get_s3_key, s3_get_file_text, \
parse_by_logic_connector, filter_by_logic_connector
BUCKET_NAME = 'bucket-unit-test'
class S3LogTestCase(TestCase):
def setUp(self):
self.s3 = boto3.client('s3')
self.key_path = 'path/file'
self.text_body = '''line1\nline2\n'''
self.lines_cnt = len(self.text_body.split('\n'))
def test_parse_by_logic_connector(self):
json_lines = ['{"age": 111, "str_field": "str_value1", "logic_connector": 1}',
'{"age": 222, "str_field": "str_value2", "logic_connector": 2}',
'{"age": 333, "str_field": "str_value3", "logic_connector": 3}',
'{"age": 22, "str_field": "str_value22", "logic_connector": 2}']
parsed_lines = parse_by_logic_connector('\n'.join(json_lines))
assert len(parsed_lines) == 3
assert parsed_lines[2] == '\n'.join([json_lines[1], json_lines[3]])
@mock_s3
def test_filter_by_logic_connector(self):
s3_create_bucket(BUCKET_NAME)
json_lines = ['{"age": 111, "str_field": "str_value1", "logic_connector": 1}',
'{"age": 222, "str_field": "str_value2", "logic_connector": 2}',
'{"age": 333, "str_field": "str_value3", "logic_connector": 3}',
'{"age": 22, "str_field": "str_value22", "logic_connector": 2}']
text_body = '\n'.join(json_lines)
file_name = 'test_filet.txt'
key_path = 'path/' + file_name
save_file(BUCKET_NAME, key_path, text_body)
s3_dict = {'s3': {'bucket': {'name': BUCKET_NAME},
'object': {'key': key_path}}}
event = {'Records': [s3_dict, ]}
context = {}
parsed_lines = filter_by_logic_connector(event, context)
assert len(parsed_lines) == 3
logic_connector_id = 1
assert len(parsed_lines[logic_connector_id].split('\n')) == 1
logic_connector_id = 2
assert len(parsed_lines[logic_connector_id].split('\n')) == 2
logic_connector_id = 3
assert len(parsed_lines[logic_connector_id].split('\n')) == 1
@mock_s3
def test_get_file_text(self):
s3_create_bucket(BUCKET_NAME)
saved_len = save_file(BUCKET_NAME, self.key_path, self.text_body)
text = s3_get_file_text(BUCKET_NAME, self.key_path)
assert text == self.text_body
@mock_s3
def test_save_file(self):
s3_create_bucket(BUCKET_NAME)
save_file(BUCKET_NAME, self.key_path, self.text_body)
text_body = s3_get_file_text(BUCKET_NAME, self.key_path)
# check that file saved to s3
assert text_body == self.text_body
def test_get_s3_key(self):
file_name = 'file2'
key_path = 'path/' + file_name
logic_connector_id = 123
key = get_s3_key(logic_connector_id, key_path)
assert str(logic_connector_id) in key
assert file_name in key
```
#### File: ai_engineering/predictor-app/app.py
```python
import os
from flask import Flask, flash, request, redirect, url_for, render_template, Response, session, \
make_response, abort,jsonify,send_from_directory
from werkzeug.utils import secure_filename
from subprocess import check_call
import pandas as pd
import boto3
from botocore.exceptions import ClientError
import time
from random import randrange,choices
import requests
from zappa.asynchronous import task, get_async_response
from flask_bootstrap import Bootstrap
import redis
from helper import predict
from pathlib import Path
from flask_session import Session as f_session
class DevConfig:
FLASK_APP = "app.py"
SESSION_TYPE = os.environ.get("SESSION_TYPE")
if SESSION_TYPE and SESSION_TYPE=='redis':
redis_url = os.environ.get('REDIS_CONN')
SESSION_REDIS = redis.from_url(redis_url)
else:
SESSION_TYPE = 'filesystem'
SESSION_FILE_DIR = '/tmp/flask_session/'
PERMANENT_SESSION_LIFETIME = 3600 # 30 minute timeout
SECRET_KEY = os.environ.get('secret_key')
app = Flask(__name__)
app.config.from_object(DevConfig)
Bootstrap(app)
if os.environ.get('SESSION_TYPE') == 'redis':
sess = f_session()
sess.init_app(app)
if os.environ.get('aws_access_key_id'):
s3_client = boto3.client('s3',
region_name=os.environ.get('aws_region'),
aws_access_key_id=os.environ.get('aws_access_key_id'),
aws_secret_access_key=os.environ.get('aws_secret_access_key'))
else:
s3_client = boto3.client('s3')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ['csv']
def get_features():
headers = {'Content-Type': 'text/plain; charset=UTF-8',
'Accept':'*/*','Authorization': 'Token {}'.format(session.get('API_KEY'))}
url = f"{session.get('ENDPOINT_URL')}/api/v2/deployments/{session.get('DEPLOYMENT_ID')}/features"
try:
response = requests.get(url,headers=headers)
features = response.json()['data']
features = [f['name'] for f in features]
except ValueError:
flash('HTTP Response Error: Cannot connect to endpoint.')
return render_template('score.html')
except KeyError:
flash('Session Timeout: Please re-enter your API Key.')
return redirect(url_for('index'))
return ",".join(features)
def is_ts(API_KEY,DEPLOYMENT_ID,ENDPOINT_URL):
# get project id
headers = {'Content-Type': 'text/plain; charset=UTF-8',
'Accept':'*/*','Authorization': 'Token {}'.format(API_KEY)}
url = f"{ENDPOINT_URL}/api/v2/deployments/{DEPLOYMENT_ID}"
try:
response = requests.get(url,headers=headers)
projectID = response.json()['model']['projectId']
except KeyError as e:
raise(e)
url = f"{ENDPOINT_URL}/api/v2/projects/{projectID}"
try:
response = requests.get(url,headers=headers)
is_time_series = response.json()['partition']['useTimeSeries']
except KeyError as e:
raise(e)
return is_time_series
def configure_bucket():
cors_configuration = {
'CORSRules': [{
'AllowedHeaders': ['*'],
'AllowedMethods': ['GET','PUT'],
'AllowedOrigins': ['*'],
'ExposeHeaders': ['ETag'],
'MaxAgeSeconds': 3000
}]
}
try:
s3_client.put_bucket_cors(Bucket=session.get('bucket'),CORSConfiguration=cors_configuration)
print('put bucket cors.')
signed_url = s3_client.generate_presigned_url(
ClientMethod='put_object',
Params={
'Bucket': session.get('bucket'),
'Key': f"uploads/{session.get('filename')}",
'ContentType': 'text/csv'
},
ExpiresIn=3600,
)
print(signed_url)
except ClientError as e:
raise(e)
return signed_url
@app.route('/score',methods=['GET','POST'])
def upload_file():
if request.method == 'POST':
req_form = request.form
if (session.get('API_KEY') is None):
return redirect(url_for('index'))
if ((session.get('time_series')) == True) and (req_form.get('start_date') == "" or req_form.get('end_date') == ""):
flash('Start and End Date are required for time series projects.')
return redirect(url_for('score'))
if req_form['all_cols']=='true':
cols = "all_columns"
else:
cols = ','.join(req_form.getlist('passthrough_columns'))
session['cols'] = cols
session['save_res'] = bool(req_form.get('save_res'))
session['explanations'] = req_form.get('explanations')
session['start_date'] = req_form.get('start_date')
session['end_date'] = req_form.get('end_date')
return redirect(url_for('payload',filename=session.get('filename'),bucket=session.get('bucket')))
elif (session.get('API_KEY') is None):
return redirect(url_for('index'))
feats = get_features()
newfilename = 'input_file_%s.csv'%(randrange(100000, 999999))
session['filename'] = newfilename
try:
signed_url = configure_bucket()
except Exception as e:
print(e)
flash(f"Bucket configuration error: permission denied to {session.get('bucket')}.")
return redirect(url_for('index'))
print(signed_url)
session['time_series'] = is_ts(session.get('API_KEY'),session.get('DEPLOYMENT_ID'),session.get('ENDPOINT_URL'))
return render_template('score.html',dep_features=feats,
signed_url=signed_url.split('?')[1],bucket=session.get('bucket'),
filename=session.get('filename'),
time_series=session.get('time_series'))
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method=='POST':
if ('API_KEY' in request.form and request.form['API_KEY'] != ''):
# get list of deployments from api key
session['API_KEY'] = request.form['API_KEY']
session['ENDPOINT_URL'] = request.form['ENDPOINT']
return redirect(url_for('config'))
else:
session.clear()
return render_template('auth.html',api_key='key')
if os.environ.get('bucket') is None or os.environ.get('bucket') == '':
flash('S3 bucket not configured.')
return render_template('auth.html',api_key='key')
else:
session['bucket'] = os.environ.get('bucket')
return render_template('auth.html',api_key='key')
@app.route('/config', methods=['GET', 'POST'])
def config():
if request.method=='POST' and request.form['submit_button'] == 'Save Settings':
print(request.form)
if (session['API_KEY'] == ''):
return redirect(url_for('index'))
if ('DEPLOYMENT_ID' in request.form): #
# get prediction servers from api key and save selected deployment id
session['DEPLOYMENT_ID'] = request.form['DEPLOYMENT_ID']
session['PRED_SERVER'] = request.form['PRED_SERVER']
if 's3_bucket' in request.form:
session['custom_bucket'] = request.form['s3_bucket']
session['custom_access_key'] = request.form['access_key']
session['custom_secret_key'] = request.form['secret_key']
else:
session['custom_bucket'],session['custom_access_key'],session['custom_secret_key'] = None,None,None
return redirect(url_for('upload_file'))
else:
# error if no credentials
flash('Please select a deployment and a prediction server')
return render_template('config.html')
elif session.get('API_KEY') != '':
headers = {'Content-Type': 'text/plain; charset=UTF-8',
'Accept':'*/*',
'Authorization': 'Token {}'.format(session.get('API_KEY'))}
url = session.get('ENDPOINT_URL')+'/api/v2/deployments'
try:
response = requests.get(url,headers=headers)
deployments = response.json()['data']
deployments = [d for d in deployments if d['defaultPredictionServer'] is not None]
except (ValueError, KeyError):
flash('HTTP Response Error - cannot connect to endpoint.')
return render_template('config.html')
# get list of pred servers
headers = {'Content-Type': 'text/plain; charset=UTF-8',
'Accept':'*/*','Authorization': 'Token {}'.format(session.get('API_KEY'))}
url = session.get('ENDPOINT_URL')+'/api/v2/predictionServers/'
try:
response = requests.get(url,headers=headers)
pred_servers = response.json()['data']
except ValueError:
flash('HTTP Response Error - cannot connect to endpoint.')
return render_template('config.html')
except KeyError:
flash('Invalid API KEY.')
return render_template('config.html')
# generate signed url to download past preds
signed_url = s3_client.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': session.get('bucket'),
'Key': 'saved_preds/%s/%s/preds.csv'%(session.get('API_KEY'),session.get('DEPLOYMENT_ID'))
}
)
return render_template('config.html',deployments=deployments, pred_servers=pred_servers,
bucket=session.get('bucket'), filename='preds.csv',
DEPLOYMENT_ID=session.get("DEPLOYMENT_ID"),API_KEY=session.get("API_KEY"), signed_url=signed_url.split('?')[1])
return render_template('config.html')
@app.route('/explanations/',methods=['GET','POST'])
def explanations():
# get and parse explanations from result df
response = request.get_json()
key = 'preds/%s_preds.csv'%(session.get('filename').split('.')[0])
obj = s3_client.get_object(Bucket=session.get('bucket'),Key=key,Range='bytes0-10000000')
df = pd.read_csv(obj['Body'])
row_number = int(float(response['row_number']))
row = df.iloc[row_number,5:]
feats = [x for x in df.columns if 'FEATURE_NAME' in x]
strength = [x for x in df.columns if 'QUALITATIVE_STRENGTH' in x]
val = [x for x in df.columns if 'ACTUAL_VALUE' in x]
expl_df = pd.concat([row[feats].reset_index().iloc[:,-1],row[strength].reset_index().iloc[:,-1],row[val].reset_index().iloc[:,-1]],axis=1)
expl_df.columns=['Feature Name','Impact','Value']
return render_template('preds.html',
title="Prediction Explanations",title_val="Explanations",explanations='True',
column_names=expl_df.columns.values,
row_data=list(expl_df.values.tolist()),zip=zip)
@app.route('/preds',methods=['GET', 'POST'])
def preds():
if (session.get('API_KEY') is None):
return redirect(url_for('index'))
elif (session.get('filename') is None):
return redirect(url_for('upload_file'))
else:
# get and display prediction results
key = 'preds/%s_preds.csv'%(session.get('filename').split('.')[0])
obj = s3_client.get_object(Bucket=session.get('bucket'),Key=key,Range='bytes0-10000000')
# generate signed url
signed_url = s3_client.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': session.get('bucket'),
'Key': key
}
)
print(signed_url)
# save to custom bucket
if session.get('custom_bucket'):
try:
obj_2 = s3_client.get_object(Bucket=session.get('bucket'),Key=key)
key='preds/{dep}/preds_{fn}'.format(dep=session.get('DEPLOYMENT_ID'),fn=session.get('filename').split('_')[2])
client_2=boto3.client('s3',aws_access_key_id=session.get('custom_access_key'),aws_secret_access_key=session.get('custom_secret_key'))
client_2.put_object(Bucket=session.get('custom_bucket'),Key=key,Body=obj_2['Body'].read())
except ClientError:
flash("Could not save to custom bucket %s: Permission Denied."%(session.get('custom_bucket')))
return redirect(url_for('config'))
df = pd.read_csv(obj['Body'])
print(df)
# display first 100 rows
if df.shape[0]>100:
df = df.iloc[0:100]
print(df.columns)
if 'EXPLANATION_1_FEATURE_NAME' in df.columns:
# dont show explanation columns
cols = [x for x in df.columns if 'EXPLANATION' not in x]
df=df[cols]
df.reset_index(inplace=True)
return render_template('preds.html',
title="Prediction Results",title_val="Results",hov=True,
column_names=df.columns.values, signed_url=signed_url.split('?')[1],
filename=key.split('/')[1], bucket=session.get('bucket'),
row_data=list(df.values.tolist()),zip=zip)
return render_template('preds.html',
title="Prediction Results",title_val="Results",hov=None,
column_names=df.columns.values, signed_url=signed_url.split('?')[1],
filename=key.split('/')[1], bucket=session.get('bucket'),
row_data=list(df.values.tolist()),zip=zip)
def get_preds(bucket,key):
time_end = time.time() + 3*60
while time.time() < time_end:
try:
s3_client.head_object(Bucket=bucket, Key=key)
return 'done'
except ClientError:
time.sleep(5)
print('waiting for %s'%key)
continue
return 'not found'
@app.route('/payload/#/<bucket>/')
def payload(filename,bucket):
print('in payload function')
# payload=session['payload']
payload_vars = '{"bucket":"%s","key":"uploads/%s","DEPLOYMENT_ID":"%s","API_KEY":"%s",\
"ENDPOINT_URL":"%s","PRED_SERVER":"%s","DATAROBOT_KEY":"%s","explanations": "%s",\
"cols":"%s","save":"%s","start_date":"%s","end_date":"%s"}'%(session.get('bucket'),session.get('filename'),session.get('DEPLOYMENT_ID'),
session.get('API_KEY'),session.get('ENDPOINT_URL'),session.get('PRED_SERVER'),
session.get('DR_KEY'),session.get('explanations'),
session.get('cols'),session.get('save_res'),session.get('start_date'),session.get('end_date'))
x = longrunner(payload_vars)
return redirect(url_for('response', response_id=x.response_id,filename=filename))
@app.route('/async-response/<response_id>/#')
def response(response_id,filename):
response = get_async_response(response_id)
if response is None:
flash('Scoring error, please try again')
return redirect(url_for('upload_file'))
if response['status'] == 'complete':
return redirect(url_for('preds',filename=filename))
time.sleep(5)
return "Not yet ready. Redirecting.", 302, {
'Content-Type': 'text/plain; charset=utf-8',
'Location': url_for('response', response_id=response_id, filename=filename,backoff=5),
'X-redirect-reason': "Not yet ready.",
}
@task(capture_response=True)
def longrunner(payload_vars):
# key = 'preds/%s_preds.csv'%(filename.split('.')[0])
return predict.handler(payload_vars)
if __name__ == '__main__':
app.run()
```
#### File: predictor-app/helper/predict.py
```python
import json
import urllib.parse
import boto3
from subprocess import check_call
import csv
from datetime import datetime
import sys
import ast
from botocore.exceptions import ClientError
from flask import url_for
from helper import single_predict,batch_predict
import time
import os
if os.environ.get('aws_access_key_id'):
s3 = boto3.client('s3',
region_name='us-east-2',
aws_access_key_id=os.environ.get('aws_access_key_id'),
aws_secret_access_key=os.environ.get('aws_secret_access_key'))
print('here')
else:
s3 = boto3.client('s3')
print(os.environ)
MAX_PREDICTION_FILE_SIZE_BYTES = 10485760
def batch_needed(filename):
data = open(filename, 'rb').read()
data_size = sys.getsizeof(data)
print(data_size)
if data_size >= MAX_PREDICTION_FILE_SIZE_BYTES:
return True
return False
def append_csvs(in_file,out_file):
fout=open(out_file,"a")
f = open(in_file,"r")
f.readline() # skip the header
for line in f:
fout.write(line)
f.close()
fout.close()
return out_file
def wait_for_file(bucket,key):
# wait 15 minutes for file
time_end = time.time() + 15*60
while time.time() < time_end:
try:
obj = s3.head_object(Bucket = bucket, Key = key)
size = obj['ContentLength']
return size
except ClientError as e:
if e.response['Error']['Code']=='403':
return e
elif e.response['Error']['Code']=='404':
print('not found - retrying...')
time.sleep(5)
continue
else:
return e
return 'File not found'
def handler(event):
# prepare config vars for predict function
event = ast.literal_eval(event)
bucket = event['bucket']
key = event['key']
new_key = 'preds/%s_preds.csv'%(key.split('/')[1].split('.')[0])
input_file = '/tmp/'+ key.split('/')[1]
output_csv = '/tmp/%s_preds.csv'%(key.split('/')[1].split('.')[0])
auth_args = {'API_KEY': event['API_KEY'],
'DEPLOYMENT_ID': event['DEPLOYMENT_ID'],
'BASE_URL' : event['ENDPOINT_URL'],
'EXPLANATIONS': (event['explanations'] == 'explanations')}
cols = event['cols']
if cols == 'all_columns':
auth_args['passthrough_columns_set'] = True
elif cols:
auth_args['keep_cols'] = cols
if event['start_date'] != "None":
auth_args['start_date'] = event['start_date']
auth_args['end_date'] = event['end_date']
print(auth_args)
# wait until file present
file_info = wait_for_file(bucket,key)
if isinstance(file_info,str):
return file_info
elif not isinstance(file_info,int):
return file_info
# run predict function
if file_info >= MAX_PREDICTION_FILE_SIZE_BYTES:
print('batch predict')
try:
batch_predict.predict(input_file=f"s3://{bucket}/{key}", output_file=f"s3://{bucket}/{new_key}", **auth_args)
except Exception as e:
return
else:
s3.download_file(bucket,key,input_file)
auth_args['PRED_SERVER'] = event['PRED_SERVER']
try:
single_predict.predict(input_file=input_file, output_file=output_csv, **auth_args)
s3.upload_file(output_csv, bucket, new_key) # upload result file for consumption in app
except Exception as e:
return
# save predictions to audit
if event['save'] == 'True':
save_key = 'saved_preds/%s/%s/preds.csv'%(event['API_KEY'],event['DEPLOYMENT_ID'])
saved_file = '/tmp/saved.csv'
try:
s3.download_file(bucket, save_key, saved_file)
output_csv=append_csvs(in_file=output_csv,out_file=saved_file)
except ClientError as e:
print("file doesn't exist yet")
print('saving file to %s'%(saved_file))
s3.upload_file(output_csv, bucket, save_key)
s3.delete_object(Bucket=bucket,Key=key) # delete uploaded file
``` |
{
"source": "joemfox/ray-tracer-challenge",
"score": 3
} |
#### File: ray-tracer-challenge/features/Intersection.py
```python
class Intersection():
def __init__(self,t,object):
self.t = t
self.object = object
class Intersections(list):
def __init__(self,i):
self += i
def hit(self):
hit = None
pos = list(filter(lambda x: x.t >= 0,self))
if len(pos):
hit = min(pos,key=lambda x:x.t)
return hit
```
#### File: ray-tracer-challenge/features/Sphere.py
```python
from features.Tuple import Point, Vector
from features.Matrix import identity_matrix
from features.Material import Material
class Sphere():
def __init__(self):
self.position = Point(0,0,0)
self.transform = identity_matrix()
self.material = Material()
def normal(self,p):
obj_p = self.transform.inverse() * p
obj_n = obj_p - Point(0,0,0)
n = self.transform.submatrix(3,3).inverse().transpose() * obj_n
return n.normalize()
```
#### File: ray-tracer-challenge/features/util.py
```python
def equals(a, b, EPSILON = 0.00001):
return abs(a - b) < EPSILON
``` |
{
"source": "joemoorhouse/quantum-mc",
"score": 3
} |
#### File: quantum_mc/arithmetic/arithmetic.py
```python
from math import pi
from qiskit import QuantumRegister, QuantumCircuit, AncillaRegister
from .qft import qft, iqft, cqft, ciqft, ccu1
from qiskit.circuit.library import SXdgGate
# Modified version of qarithmetic https://github.com/hkhetawat/QArithmetic
################################################################################
# Bitwise Operators
################################################################################
# bit-wise operations
def bitwise_and(qc, a, b, c, N):
for i in range(0, N):
qc.ccx(a[i], b[i], c[i])
def bitwise_or(qc, a, b, c, N):
for i in range(0, N):
qc.ccx(a[i], b[i], c[i])
qc.cx(a[i], c[i])
qc.cx(b[i], c[i])
def bitwise_xor(qc, a, b, c, N):
for i in range(0, N):
qc.cx(a[i], c[i])
qc.cx(b[i], c[i])
def bitwise_not(qc, a, c, N):
for i in range(0, N):
qc.cx(a[i], c[i])
qc.x(c[i])
# Cyclically left-shifts a binary string "a" of length n.
# If "a" is zero-padded, equivalent to multiplying "a" by 2.
def lshift(circ, a, n=-1):
# Init n if it was not
if n == -1:
n = len(a)
# Iterate through pairs and do swaps.
for i in range(n,1,-1):
circ.swap(a[i-1],a[i-2])
# Cyclically left-shifts a binary string "a" of length n, controlled by c.
# If "a" is zero-padded, equivalent to multiplying "a" by 2, if and only if c.
def c_lshift(circ, c, a, n=-1):
# Init n if it was not
if n == -1:
n = len(a)
# Iterate through pairs and do swaps.
for i in range(n,1,-1):
circ.cswap(c, a[i-1],a[i-2])
# Cyclically right-shifts a binary string "a" of length n.
# If "a" is zero-padded, equivalent to dividing "a" by 2.
def rshift(circ, a, n=-1):
# Init n if it was not
if n == -1:
n = len(a)
# Iterate through pairs and do swaps.
for i in range(n-1):
circ.swap(a[i],a[i+1])
# Cyclically right-shifts a binary string "a" of length n, controlled by c.
# If "a" is zero-padded, equivalent to dividing "a" by 2, if and only if c.
def c_rshift(circ, c, a, n=-1):
# Init n if it was not
if n == -1:
n = len(a)
# Iterate through pairs and do swaps.
for i in range(n,1,-1):
circ.cswap(c, a[i-1],a[i-2])
################################################################################
# Addition Circuits
################################################################################
# Define some functions for the ripple adder.
def sum(circ, cin, a, b):
circ.cx(a,b)
circ.cx(cin,b)
def sum_cq(circ, cin, a, b):
if a == 1:
circ.x(b)
circ.cx(cin,b)
def carry(circ, cin, a, b, cout):
circ.ccx(a, b, cout)
circ.cx(a, b)
circ.ccx(cin, b, cout)
# in this version a is classical and b quatum
def carry_cq(circ, cin, a, b, cout):
if a == 1:
circ.cx(b, cout)
circ.x(b)
circ.ccx(cin, b, cout)
def carry_dg_cq(circ, cin, a, b, cout):
circ.ccx(cin, b, cout)
if a == 1:
circ.x(b)
circ.cx(b, cout)
def carry_dg(circ, cin, a, b, cout):
circ.ccx(cin, b, cout)
circ.cx(a, b)
circ.ccx(a, b, cout)
# Draper adder that takes |a>|b> to |a>|a+b>.
# |a> has length x and is less than or equal to n
# |b> has length n+1 (left padded with a zero).
# https://arxiv.org/pdf/quant-ph/0008033.pdf
def add(circ, a, b, n):
# move n forward by one to account for overflow
n += 1
# Take the QFT.
qft(circ, b, n)
# Compute controlled-phases.
# Iterate through the targets.
for i in range(n,0,-1):
# Iterate through the controls.
for j in range(i,0,-1):
# If the qubit a[j-1] exists run cu1, if not assume the qubit is 0 and never existed
if len(a) - 1 >= j - 1:
circ.cu1(2*pi/2**(i-j+1), a[j-1], b[i-1])
# Take the inverse QFT.
iqft(circ, b, n)
# Draper adder that takes |a>|b> to |a>|a+b>, controlled on |c>.
# |a> has length x and is less than or equal to n
# |b> has length n+1 (left padded with a zero).
# |c> is a single qubit that's the control.
def cadd(circ, c, a, b, n):
# move n forward by one to account for overflow
n += 1
# Take the QFT.
cqft(circ, c, b, n)
# Compute controlled-phases.
# Iterate through the targets.
for i in range(n,0,-1):
# Iterate through the controls.
for j in range(i,0,-1):
# If the qubit a[j-1] exists run ccu, if not assume the qubit is 0 and never existed
if len(a) - 1 >= j - 1:
ccu1(circ, 2*pi/2**(i-j+1), c, a[j-1], b[i-1])
# Take the inverse QFT.
ciqft(circ, c, b, n)
# Adder that takes |a>|b> to |a>|a+b>.
# |a> has length n.
# |b> has length n+1.
# Based on Vedral, Barenco, and Ekert (1996).
def add_ripple(circ, a, b, n):
# Create a carry register of length n.
c = QuantumRegister(n)
circ.add_register(c)
# Calculate all the carries except the last one.
for i in range(0, n-1):
carry(circ, c[i], a[i], b[i], c[i+1])
# The last carry bit is the leftmost bit of the sum.
carry(circ, c[n-1], a[n-1], b[n-1], b[n])
# Calculate the second-to-leftmost bit of the sum.
circ.cx(c[n-1],b[n-1])
# Invert the carries and calculate the remaining sums.
for i in range(n-2,-1,-1):
carry_dg(circ, c[i], a[i], b[i], c[i+1])
sum(circ, c[i], a[i], b[i])
# Adder that takes |a>|b> to |a>|a+b>.
# |a> has length n.
# |b> has length n+1.
# Based on Vedral, Barenco, and Ekert (1996).
def add_ripple_in_place(circ, a, b, anc, n):
# Calculate all the carries except the last one.
for i in range(0, n-1):
carry(circ, anc[i], a[i], b[i], anc[i+1])
# The last carry bit is the leftmost bit of the sum.
carry(circ, anc[n-1], a[n-1], b[n-1], b[n])
# Calculate the second-to-leftmost bit of the sum.
circ.cx(anc[n-1],b[n-1])
# Invert the carries and calculate the remaining sums.
for i in range(n-2,-1,-1):
carry_dg(circ, anc[i], a[i], b[i], anc[i+1])
sum(circ, anc[i], a[i], b[i])
# Adder that takes |a>|b> to |a>|a+b>.
# |a> has length <= n. |a> will be padded with zeros to length n
# |b> has length n+1.
# Based on Vedral, Barenco, and Ekert (1996).
def add_ripple_in_place_padding(circ, a, b, anc, n):
# Calculate all the carries except the last one.
for i in range(0, n - 1):
if i < len(a):
carry(circ, anc[i], a[i], b[i], anc[i+1])
else: # pad with zeros
carry_cq(circ, anc[i], 0, b[i], anc[i+1])
# The last carry bit is the leftmost bit of the sum.
if (n-1) < len(a):
carry(circ, anc[n-1], a[n-1], b[n-1], b[n])
else:
carry_cq(circ, anc[n-1], 0, b[n-1], b[n])
# Calculate the second-to-leftmost bit of the sum.
circ.cx(anc[n-1],b[n-1])
# Invert the carries and calculate the remaining sums.
for i in range(n-2,-1,-1):
if i < len(a):
carry_dg(circ, anc[i], a[i], b[i], anc[i+1])
sum(circ, anc[i], a[i], b[i])
else:
carry_dg_cq(circ, anc[i], 0, b[i], anc[i+1])
sum_cq(circ, anc[i], 0, b[i])
# Adder that takes |a>|b> to |a>|a+b>.
# |a> has length n *and is classical*.
# |b> has length n+1.
# Based on Vedral, Barenco, and Ekert (1996).
def add_ripple_in_place_cq(circ, a, qr_b, qr_anc, n):
# Calculate all the carries except the last one.
for i in range(0, n-1):
carry_cq(circ, qr_anc[i], a[i], qr_b[i], qr_anc[i+1])
# The last carry bit is the leftmost bit of the sum.
carry_cq(circ, qr_anc[n-1], a[n-1], qr_b[n-1], qr_b[n])
# Calculate the second-to-leftmost bit of the sum.
circ.cx(qr_anc[n-1],qr_b[n-1])
# Invert the carries and calculate the remaining sums.
for i in range(n-2,-1,-1):
carry_dg_cq(circ, qr_anc[i], a[i], qr_b[i], qr_anc[i+1])
sum_cq(circ, qr_anc[i], a[i], qr_b[i])
# Adder that takes |a>|b>|0> to |a>|b>|a+b>.
# |a> has length n.
# |b> has length n.
# |s> = |0> has length n+1.
def add_ripple_ex(circ, a, b, s, n):
# Copy b to s.
for i in range(0, n):
circ.cx(b[i],s[i])
# Add a and s.
add_ripple(circ, a, s, n)
################################################################################
# Subtraction Circuits
################################################################################
# Subtractor that takes |a>|b> to |a>|a-b>.
# |a> has length n+1 (left padded with a zero).
# |b> has length n+1 (left padded with a zero).
def sub(circ, a, b, n):
# Flip the bits of a.
circ.x(a)
# Add it to b.
add(circ, a, b, n - 1)
# Flip the bits of the result. This yields the sum.
circ.x(b)
# Flip back the bits of a.
circ.x(a)
# Subtractor that takes |a>|b> to |a-b>|b>.
# |a> has length n+1 (left padded with a zero).
# |b> has length n+1 (left padded with a zero).
def sub_swap(circ, a, b, n):
# Flip the bits of a.
circ.x(a)
# Add it to b.
add(circ, b, a, n - 1)
# Flip the bits of the result. This yields the sum.
circ.x(a)
# Subtractor that takes |a>|b> to |a>|a-b>.
# |a> has length n.
# |b> has length n+1.
def sub_ripple(circ, a, b, n):
# We add "a" to the 2's complement of "b."
# First flip the bits of "b."
circ.x(b)
# Create a carry register of length n.
c = QuantumRegister(n)
circ.add_register(c)
# Add 1 to the carry register, which adds 1 to b, negating it.
circ.x(c[0])
# Calculate all the carries except the last one.
for i in range(0, n-1):
carry(circ, c[i], a[i], b[i], c[i+1])
# The last carry bit is the leftmost bit of the sum.
carry(circ, c[n-1], a[n-1], b[n-1], b[n])
# Calculate the second-to-leftmost bit of the sum.
circ.cx(c[n-1],b[n-1])
# Invert the carries and calculate the remaining sums.
for i in range(n-2,-1,-1):
carry_dg(circ, c[i], a[i], b[i], c[i+1])
sum(circ, c[i], a[i], b[i])
# Flip the carry to restore it to zero.
circ.x(c[0])
# Subtractor that takes |a>|b>|0> to |a>|b>|a-b>.
# |a> has length n.
# |b> has length n.
# |s> = |0> has length n+1.
def sub_ripple_ex(circ, a, b, s, n):
# Copy b to s.
for i in range(0, n):
circ.cx(b[i],s[i])
# Subtract a and s.
sub_ripple(circ, a, s, n)
################################################################################
# Multiplication Circuit
################################################################################
# Controlled operations
# Take a subset of a quantum register from index x to y, inclusive.
def sub_qr(qr, x, y): # may also be able to use qbit_argument_conversion
sub = []
for i in range (x, y+1):
sub = sub + [(qr[i])]
return sub
def full_qr(qr):
return sub_qr(qr, 0, len(qr) - 1)
# Computes the product c=a*b.
# a has length n.
# b has length n.
# c has length 2n.
def mult(circ, a, b, c, n):
for i in range (0, n):
cadd(circ, a[i], b, sub_qr(c, i, n+i), n)
# Computes the product c=a*b if and only if control.
# a has length n.
# b has length n.
# control has length 1.
# c has length 2n.
def cmult(circ, control, a, b, c, n):
qa = QuantumRegister(len(a))
qb = QuantumRegister(len(b))
qc = QuantumRegister(len(c))
tempCircuit = QuantumCircuit(qa, qb, qc)
mult(tempCircuit, qa, qb, qc, n)
tempCircuit = tempCircuit.control(1) #Add Decomposition after pull request inclusion #5446 on terra
print("Remember To Decompose after release >0.16.1")
circ.compose(tempCircuit, qubits=full_qr(control) + full_qr(a) + full_qr(b) + full_qr(c), inplace=True)
################################################################################
# Division Circuit
################################################################################
# Divider that takes |p>|d>|q>.
# |p> is length 2n and has n zeros on the left: 0 ... 0 p_n ... p_1.
# |d> has length 2n and has n zeros on the right: d_2n ... d_{n+1) 0 ... 0.
# |q> has length n and is initially all zeros.
# At the end of the algorithm, |q> will contain the quotient of p/d, and the
# left n qubits of |p> will contain the remainder of p/d.
def div(circ, p, d, q, n):
# Calculate each bit of the quotient and remainder.
for i in range(n,0,-1):
# Left shift |p>, which multiplies it by 2.
lshift(circ, p, 2*n)
# Subtract |d> from |p>.
sub_swap(circ, p, d, 2*n)
# If |p> is positive, indicated by its most significant bit being 0,
# the (i-1)th bit of the quotient is 1.
circ.x(p[2*n-1])
circ.cx(p[2*n-1], q[i-1])
circ.x(p[2*n-1])
# If |p> is negative, indicated by the (i-1)th bit of |q> being 0, add D back
# to P.
circ.x(q[i-1])
cadd(circ, q[i-1], d, p, 2*n - 1)
circ.x(q[i-1])
################################################################################
# Expontential Circuit
################################################################################
# square that takes |a> |b>
# |a> is length n and is a unsigned integer
# |b> is length 2n and has 2n zeros, after execution b = a^2
def square(circ, a, b, n=-1):
if n == -1:
n = len(a)
# First Addition
circ.cx(a[0], b[0])
for i in range(1, n):
circ.ccx(a[0], a[i], b[i])
# Custom Addition Circuit For Each Qubit of A
for k in range(1, n):
# modifying qubits
d = b[k:n+k+1]
qft(circ, d, n+1) #Technically the first few QFT could be refactored to use less gates due to guaranteed controls
# Compute controlled-phases.
# Iterate through the targets.
for i in range(n+1,0,-1):
# Iterate through the controls.
for j in range(i,0,-1):
if len(a) - 1 < j - 1:
pass # skip over non existent qubits
elif k == j - 1: # Cannot control twice
circ.cu1(2*pi/2**(i-j+1), a[j-1], d[i-1])
else:
ccu1(circ, 2*pi/2**(i-j+1), a[k], a[j-1], d[i-1])
iqft(circ, d, n+1)
# a has length n
# b has length v
# finalOut has length n*((2^v)-1), for safety
def power(circ, a, b, finalOut): #Because this is reversible/gate friendly memory blooms to say the least
# Track Number of Qubits
n = len(a)
v = len(b)
# Left 0 pad a, to satisfy multiplication function arguments
aPad = AncillaRegister(n * (pow(2, v) - 3)) # Unsure of where to Anciallas these
circ.add_register(aPad)
padAList = full_qr(aPad)
aList = full_qr(a)
a = aList + padAList
# Create a register d for mults and init with state 1
d = AncillaRegister(n) # Unsure of where to Anciallas these
circ.add_register(d)
# Create a register for tracking the output of cmult to the end
ancOut = AncillaRegister(n*2) # Unsure of where to Anciallas these
circ.add_register(ancOut)
# Left 0 pad finalOut to provide safety to the final multiplication
if (len(a) * 2) - len(finalOut) > 0:
foPad = AncillaRegister((len(a) * 2) - len(finalOut))
circ.add_register(foPad)
padFoList = full_qr(foPad)
foList = full_qr(finalOut)
finalOut = foList + padFoList
# Create zero bits
num_recycle = (2 * n * (pow(2, v) - 2)) - (n * pow(2, v)) # 24
permaZeros = []
if num_recycle > 0:
permaZeros = AncillaRegister(num_recycle) #8
circ.add_register(permaZeros)
permaZeros = full_qr(permaZeros)
# Instead of MULT copy bits over
if v >= 1:
for i in range(n):
circ.ccx(b[0], a[i], d[i])
circ.x(b[0])
circ.cx(b[0], d[0])
circ.x(b[0])
# iterate through every qubit of b
for i in range(1,v): # for every bit of b
for j in range(pow(2, i)):
# run multiplication operation if and only if b is 1
bonus = permaZeros[:2*len(d) - len(ancOut)]
cmult(circ, [b[i]], a[:len(d)], d, full_qr(ancOut) + bonus, len(d))
# if the multiplication was not run copy the qubits so they are not destroyed when creating new register
circ.x(b[i])
for qub in range(0,len(d)):
circ.ccx(b[i], d[qub], ancOut[qub])
circ.x(b[i])
# Move the output to the input for next function and double the qubit length
d = ancOut
if i == v - 1 and j == pow(2, i) - 2:
# this is the second to last step send qubiits to output
ancOut = finalOut
elif not (i == v - 1 and j == pow(2, i) - 1):
# if this is not the very last step
# create a new output register of twice the length and register it
ancOut = AncillaRegister(len(d) + n) # Should label permazero bits
circ.add_register(ancOut)
```
#### File: quantum_mc/calculation/var_calculation.py
```python
from qiskit import QuantumRegister, QuantumCircuit, Aer, execute
from qiskit.circuit.library import LinearAmplitudeFunction
from qiskit.aqua.algorithms import IterativeAmplitudeEstimation
import numpy as np
# define linear objective function
num_sum_qubits = 5
breakpoints = [0]
slopes = [1]
offsets = [0]
f_min = 0
f_max = 10
c_approx = 0.25
objective = LinearAmplitudeFunction(
num_sum_qubits,
slope=slopes,
offset=offsets,
# max value that can be reached by the qubit register (will not always be reached)
domain=(0, 2**num_sum_qubits-1),
image=(f_min, f_max),
rescaling_factor=c_approx,
breakpoints=breakpoints
)
qr_sum = QuantumRegister(5, "sum")
state_preparation = QuantumCircuit(qr_sum) # to complete
# set target precision and confidence level
epsilon = 0.01
alpha = 0.05
# construct amplitude estimation
ae_cdf = IterativeAmplitudeEstimation(state_preparation=state_preparation,
epsilon=epsilon, alpha=alpha,
objective_qubits=[len(qr_sum)])
result_cdf = ae_cdf.run(quantum_instance=Aer.get_backend('qasm_simulator'), shots=100)
# print results
exact_value = 1 # to calculate
conf_int = np.array(result_cdf['confidence_interval'])
print('Exact value: \t%.4f' % exact_value)
print('Estimated value:\t%.4f' % result_cdf['estimation'])
print('Confidence interval: \t[%.4f, %.4f]' % tuple(conf_int))
def transform_from_
```
#### File: quantum-mc/test/test_mc_var.py
```python
from quantum_mc.arithmetic.piecewise_linear_transform import PiecewiseLinearTransform3
import unittest
import numpy as np
from qiskit.test.base import QiskitTestCase
import quantum_mc.calibration.fitting as ft
import quantum_mc.calibration.time_series as ts
from scipy.stats import multivariate_normal, norm
from qiskit.test.base import QiskitTestCase
from qiskit import execute, Aer, QuantumCircuit, QuantumRegister, ClassicalRegister, AncillaRegister
from qiskit.circuit.library import NormalDistribution
from qiskit.quantum_info import Statevector
from qiskit.circuit.library import NormalDistribution, LogNormalDistribution, IntegerComparator
from qiskit.utils import QuantumInstance
from qiskit.algorithms import IterativeAmplitudeEstimation, EstimationProblem
def get_sims(normal_distribution):
import numpy as np
values = normal_distribution._values
probs = normal_distribution._probabilities
# we generate a bunch of realisation of values, based
upper_bounds = [0.0]
stop = 0.0
for val, prob in zip(values, probs):
stop += prob
upper_bounds.append(stop)
r = np.random.uniform(low=0.0, high=1.0, size=10)
indices = np.searchsorted(upper_bounds, r, side='left', sorter=None) - 1
g1, g2 = np.meshgrid(range(2**3), range(2**3), indexing="ij",)
i1 = g1.flatten()[indices]
i2 = g2.flatten()[indices]
#x = list(zip(*(grid.flatten() for grid in meshgrid)))
return i1, i2
class TestMcVar(QiskitTestCase):
def test_distribution_load(self):
""" Test that calculates a cumulative probability from the P&L distribution."""
correl = ft.get_correl("AAPL", "MSFT")
bounds_std = 3.0
num_qubits = [3, 3]
sigma = correl
bounds = [(-bounds_std, bounds_std), (-bounds_std, bounds_std)]
mu = [0, 0]
# starting point is a multi-variate normal distribution
normal = NormalDistribution(num_qubits, mu=mu, sigma=sigma, bounds=bounds)
pl_set = []
coeff_set = []
for ticker in ["MSFT", "AAPL"]:
((cdf_x, cdf_y), sigma) = ft.get_cdf_data(ticker)
(x, y) = ft.get_fit_data(ticker, norm_to_rel = False)
(pl, coeffs) = ft.fit_piecewise_linear(x, y)
# scale, to apply an arbitrary delta (we happen to use the same value here, but could be different)
coeffs = ft.scaled_coeffs(coeffs, 1.2)
pl_set.append(lambda z : ft.piecewise_linear(z, *coeffs))
coeff_set.append(coeffs)
# calculate the max and min P&Ls
p_max = max(pl_set[0](bounds_std), pl_set[1](bounds_std))
p_min = min(pl_set[0](-bounds_std), pl_set[1](-bounds_std))
# we discretise the transforms and create the circuits
transforms = []
i_to_js = []
for i,ticker in enumerate(["MSFT", "AAPL"]):
(i_0, i_1, a0, a1, a2, b0, b1, b2, i_to_j, i_to_x, j_to_y) = ft.integer_piecewise_linear_coeffs(coeff_set[i], x_min = -bounds_std, x_max = bounds_std, y_min = p_min, y_max = p_max)
transforms.append(PiecewiseLinearTransform3(i_0, i_1, a0, a1, a2, b0, b1, b2))
i_to_js.append(np.vectorize(i_to_j))
i1, i2 = get_sims(normal)
j1 = i_to_js[0](i1)
j2 = i_to_js[1](i2)
j_tot = j1 + j2
num_ancillas = transforms[0].num_ancilla_qubits
qr_input = QuantumRegister(6, 'input') # 2 times 3 registers
qr_objective = QuantumRegister(1, 'objective')
qr_result = QuantumRegister(6, 'result')
qr_ancilla = QuantumRegister(num_ancillas, 'ancilla')
#output = ClassicalRegister(6, 'output')
state_preparation = QuantumCircuit(qr_input, qr_objective, qr_result, qr_ancilla) #, output)
state_preparation.append(normal, qr_input)
for i in range(2):
offset = i * 3
state_preparation.append(transforms[i], qr_input[offset:offset + 3] + qr_result[:] + qr_ancilla[:])
# to calculate the cdf, we use an additional comparator
x_eval = 4
comparator = IntegerComparator(len(qr_result), x_eval + 1, geq=False)
state_preparation.append(comparator, qr_result[:] + qr_objective[:] + qr_ancilla[0:comparator.num_ancillas])
# now check
check = False
if check:
job = execute(state_preparation, backend=Aer.get_backend('statevector_simulator'))
var_prob = 0
for i, a in enumerate(job.result().get_statevector()):
b = ('{0:0%sb}' % (len(qr_input) + 1)).format(i)[-(len(qr_input) + 1):]
prob = np.abs(a)**2
if prob > 1e-6 and b[0] == '1':
var_prob += prob
print('Operator CDF(%s)' % x_eval + ' = %.4f' % var_prob)
# now do AE
problem = EstimationProblem(state_preparation=state_preparation,
objective_qubits=[len(qr_input)])
# target precision and confidence level
epsilon = 0.01
alpha = 0.05
qi = QuantumInstance(Aer.get_backend('aer_simulator'), shots=100)
ae_cdf = IterativeAmplitudeEstimation(epsilon, alpha=alpha, quantum_instance=qi)
result_cdf = ae_cdf.estimate(problem)
conf_int = np.array(result_cdf.confidence_interval)
print('Estimated value:\t%.4f' % result_cdf.estimation)
print('Confidence interval: \t[%.4f, %.4f]' % tuple(conf_int))
state_preparation.draw()
``` |
{
"source": "joemphilips/plugins",
"score": 3
} |
#### File: plugins/commando/commando.py
```python
from pyln.client import Plugin, RpcError # type: ignore
import json
import textwrap
import time
import random
import secrets
import string
import runes # type: ignore
import multiprocessing
from typing import Dict, Tuple, Optional
plugin = Plugin()
# "YOLO"!
COMMANDO_CMD = 0x4c4f
# Replies are split across multiple CONTINUES, then TERM.
COMMANDO_REPLY_CONTINUES = 0x594b
COMMANDO_REPLY_TERM = 0x594d
class CommandResponse:
def __init__(self, req):
self.buf = bytes()
self.req = req
def split_cmd(cmdstr):
"""Interprets JSON and method and params"""
cmd = json.loads(cmdstr)
return cmd['method'], cmd.get('params', {}), cmd.get('rune')
def send_msg(plugin, peer_id, msgtype, idnum, contents):
"""Messages are form [8-byte-id][data]"""
msg = (msgtype.to_bytes(2, 'big')
+ idnum.to_bytes(8, 'big')
+ bytes(contents, encoding='utf8'))
plugin.rpc.call(plugin.msgcmd, {'node_id': peer_id, 'msg': msg.hex()})
def send_result(plugin, peer_id, idnum, res):
# We can only send 64k in a message, but there is 10 byte overhead
# in the message header; 65000 is safe.
parts = textwrap.wrap(json.dumps(res), 65000)
for p in parts[:-1]:
send_msg(plugin, peer_id, COMMANDO_REPLY_CONTINUES, idnum, p)
send_msg(plugin, peer_id, COMMANDO_REPLY_TERM, idnum, parts[-1])
def is_rune_valid(plugin, runestr) -> Tuple[Optional[runes.Rune], str]:
"""Is this runestring valid, and authorized for us?"""
try:
rune = runes.Rune.from_base64(runestr)
except: # noqa: E722
return None, 'Malformed base64 string'
if not plugin.masterrune.is_rune_authorized(rune):
return None, 'Invalid rune string'
return rune, ''
def check_rune(plugin, node_id, runestr, command, params) -> Tuple[bool, str]:
"""If we have a runestr, check it's valid and conditions met"""
# If they don't specify a rune, we use any previous for this peer
if runestr is None:
runestr = plugin.peer_runes.get(node_id)
if runestr is None:
# Finally, try reader-writer lists
if node_id in plugin.writers:
runestr = plugin.masterrune.to_base64()
elif node_id in plugin.readers:
runestr = add_reader_restrictions(plugin.masterrune.copy())
if runestr is None:
return False, 'No rune'
commando_dict = {'time': int(time.time()),
'id': node_id,
'version': plugin.version,
'method': command}
# FIXME: This doesn't work well with complex params (it makes them str())
if isinstance(params, list):
for i, p in enumerate(params):
commando_dict['parr{}'.format(i)] = p
else:
for k, v in params.items():
# Cannot have punctuation in fieldnames, so remove.
for c in string.punctuation:
k = k.replace(c, '')
commando_dict['pname{}'.format(k)] = v
return plugin.masterrune.check_with_reason(runestr, commando_dict)
def do_cacherune(plugin, peer_id, runestr):
if not plugin.have_datastore:
return {'error': 'No datastore available: try datastore.py?'}
if runestr is None:
return {'error': 'No rune set?'}
rune, whynot = is_rune_valid(plugin, runestr)
if not rune:
return {'error': whynot}
plugin.peer_runes[peer_id] = runestr
save_peer_rune(plugin, peer_id, runestr)
return {'result': {'rune': runestr}}
def command_run(plugin, peer_id, idnum, method, params):
"""Function to run a command and write the result"""
try:
res = {'result': plugin.rpc.call(method, params)}
except RpcError as e:
res = {'error': e.error}
send_result(plugin, peer_id, idnum, res)
def try_command(plugin, peer_id, idnum, method, params, runestr):
"""Run an arbitrary command and message back the result"""
# You can always set your rune, even if *that rune* wouldn't
# allow it!
if method == 'commando-cacherune':
res = do_cacherune(plugin, peer_id, runestr)
else:
ok, failstr = check_rune(plugin, peer_id, runestr, method, params)
if not ok:
res = {'error': 'Not authorized: ' + failstr}
elif method in plugin.methods:
# Don't try to call indirectly into ourselves; we deadlock!
# But commando-rune is useful, so hardcode that.
if method == "commando-rune":
if isinstance(params, list):
res = {'result': commando_rune(plugin, *params)}
else:
res = {'result': commando_rune(plugin, **params)}
else:
res = {'error': 'FIXME: Refusing to call inside ourselves'}
else:
# The subprocess does send_result itself: pyln-client doesn't
# support async RPC yet.
multiprocessing.Process(target=command_run,
args=(plugin, peer_id, idnum, method, params)).start()
return
send_result(plugin, peer_id, idnum, res)
@plugin.async_hook('custommsg')
def on_custommsg(peer_id, payload, plugin, request, **kwargs):
pbytes = bytes.fromhex(payload)
mtype = int.from_bytes(pbytes[:2], "big")
idnum = int.from_bytes(pbytes[2:10], "big")
data = pbytes[10:]
if mtype == COMMANDO_CMD:
method, params, runestr = split_cmd(data)
try_command(plugin, peer_id, idnum, method, params, runestr)
elif mtype == COMMANDO_REPLY_CONTINUES:
if idnum in plugin.reqs:
plugin.reqs[idnum].buf += data
elif mtype == COMMANDO_REPLY_TERM:
if idnum in plugin.reqs:
plugin.reqs[idnum].buf += data
finished = plugin.reqs[idnum]
del plugin.reqs[idnum]
try:
ret = json.loads(finished.buf.decode())
except Exception as e:
# Bad response
finished.req.set_exception(e)
return {'result': 'continue'}
if 'error' in ret:
# Pass through error
finished.req.set_exception(RpcError('commando', {},
ret['error']))
else:
# Pass through result
finished.req.set_result(ret['result'])
request.set_result({'result': 'continue'})
@plugin.async_method("commando")
def commando(plugin, request, peer_id, method, params=None, rune=None):
"""Send a command to node_id, and wait for a response"""
res = {'method': method}
if params:
res['params'] = params
if rune:
res['rune'] = rune
while True:
idnum = random.randint(0, 2**64)
if idnum not in plugin.reqs:
break
plugin.reqs[idnum] = CommandResponse(request)
send_msg(plugin, peer_id, COMMANDO_CMD, idnum, json.dumps(res))
@plugin.method("commando-cacherune")
def commando_cacherune(plugin, rune):
"""Sets the rune given to the persistent rune for this peer_id"""
# This is intercepted by commando runner, above.
raise RpcError('commando-cacherune', {},
'Must be called as a remote commando call')
def add_reader_restrictions(rune: runes.Rune) -> str:
"""Let them execute list or get, but not getsharesecret!"""
# Allow list*, get* or summary.
rune.add_restriction(runes.Restriction.from_str('method^list'
'|method^get'
'|method=summary'))
# But not getsharesecret!
rune.add_restriction(runes.Restriction.from_str('method/getsharedsecret'))
# And not listdatastore!
rune.add_restriction(runes.Restriction.from_str('method/listdatastore'))
return rune.to_base64()
def save_peer_rune(plugin, peer_id, runestr) -> None:
assert plugin.have_datastore
plugin.rpc.datastore(key=['commando', 'peer_runes', peer_id],
string=runestr,
mode='create-or-replace')
def load_peer_runes(plugin) -> Dict[str, str]:
if not plugin.have_datastore:
return {}
peer_runes = {}
entries = plugin.rpc.listdatastore(key=['commando', 'peer_runes'])
for entry in entries['datastore']:
peer_runes[entry['key'][2]] = entry['string']
return peer_runes
@plugin.method("commando-rune")
def commando_rune(plugin, rune=None, restrictions=[]):
"""Create a rune, (or derive from {rune}) with the given
{restrictions} array (or string), or 'readonly'"""
if not plugin.have_datastore:
raise RpcError('commando-rune', {},
'No datastore available: try datastore.py?')
if rune is None:
this_rune = plugin.masterrune.copy()
this_rune.add_restriction(runes.Restriction.unique_id(plugin.rune_counter))
else:
this_rune, whynot = is_rune_valid(plugin, rune)
if this_rune is None:
raise RpcError('commando-rune', {'rune': rune}, whynot)
if restrictions == 'readonly':
add_reader_restrictions(this_rune)
elif isinstance(restrictions, str):
this_rune.add_restriction(runes.Restriction.from_str(restrictions))
else:
for r in restrictions:
this_rune.add_restriction(runes.Restriction.from_str(r))
# Now we've succeeded, update rune_counter.
if rune is None:
plugin.rpc.datastore(key=['commando', 'rune_counter'],
string=str(plugin.rune_counter + 1),
mode='must-replace',
generation=plugin.rune_counter_generation)
plugin.rune_counter += 1
plugin.rune_counter_generation += 1
return {'rune': this_rune.to_base64()}
@plugin.init()
def init(options, configuration, plugin):
plugin.reqs = {}
plugin.writers = options['commando_writer']
plugin.readers = options['commando_reader']
plugin.version = plugin.rpc.getinfo()['version']
# dev-sendcustommsg was renamed to sendcustommsg for 0.10.1
try:
plugin.rpc.help('sendcustommsg')
plugin.msgcmd = 'sendcustommsg'
except RpcError:
plugin.msgcmd = 'dev-sendcustommsg'
# Unfortunately, on startup it can take a while for
# the datastore to be loaded (as it's actually a second plugin,
# loaded by the first.
end = time.time() + 10
secret = None
while time.time() < end:
try:
secret = plugin.rpc.listdatastore(['commando', 'secret'])['datastore']
except RpcError:
time.sleep(1)
else:
break
if secret is None:
# Use a throwaway secret
secret = secrets.token_bytes()
plugin.have_datastore = False
plugin.peer_runes = {}
plugin.log("Initialized without rune support"
" (needs datastore.py plugin)",
level="info")
else:
plugin.have_datastore = True
if secret == []:
plugin.log("Creating initial rune secret", level='unusual')
secret = secrets.token_bytes()
plugin.rpc.datastore(key=['commando', 'secret'], hex=secret.hex())
plugin.rune_counter = 0
plugin.rune_counter_generation = 0
plugin.rpc.datastore(key=['commando', 'rune_counter'], string=str(0))
else:
secret = bytes.fromhex(secret[0]['hex'])
counter = plugin.rpc.listdatastore(['commando', 'rune_counter'])['datastore'][0]
plugin.rune_counter = int(counter['string'])
plugin.rune_counter_generation = int(counter['generation'])
plugin.log("Initialized with rune support: {} runes so far".format(plugin.rune_counter),
level="info")
plugin.masterrune = runes.MasterRune(secret)
plugin.peer_runes = load_peer_runes(plugin)
plugin.add_option('commando_writer',
description="What nodeid can do all commands?",
default=[],
multi=True)
plugin.add_option('commando_reader',
description="What nodeid can do list/get/summary commands?",
default=[],
multi=True)
plugin.run()
```
#### File: plugins/drain/utils.py
```python
import time
TIMEOUT = 60
# we need to have this pyln.testing.utils code duplication
# as this also needs to be run without testing libs
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success() and time.time() < start_time + timeout:
time.sleep(interval)
interval *= 2
if interval > 5:
interval = 5
if time.time() > start_time + timeout:
raise ValueError("Timeout waiting for {}", success)
# waits for a bunch of nodes HTLCs to settle
def wait_for_all_htlcs(nodes):
for n in nodes:
n.wait_for_htlcs()
# returns our_amount_msat for a given node and scid
def get_ours(node, scid):
return [c for c in node.rpc.listfunds()['channels'] if c.get('short_channel_id') == scid][0]['our_amount_msat']
# returns their_amount_msat for a given node and scid
def get_theirs(node, scid):
ours = get_ours(node, scid)
total = [c for c in node.rpc.listfunds()['channels'] if c.get('short_channel_id') == scid][0]['amount_msat']
return total - ours
# these wait for the HTLC commit settlement to change our/their amounts
def wait_ours(node, scid, ours_before):
wait_for(lambda: ours_before != get_ours(node, scid))
return get_ours(node, scid)
``` |
{
"source": "joemull/pyjade",
"score": 3
} |
#### File: joemull/pyjade/pyjade.py
```python
import os
import re
import sys
import json
import string
import datetime
import mysql.connector
from diskcache import Cache
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from tqdm import tqdm
from safeprint import print
'''
Options
'''
try: # Options file setup credit <NAME>
with open(os.path.join('options.json')) as env_file:
ENV = json.loads(env_file.read())
except:
print('"Options.json" not found; please add "options.json" to the current directory.')
'''
SQL Connection
'''
DB = mysql.connector.connect(
host=ENV['SQL']['HOST'],
user=ENV['SQL']['USER'],
passwd=ENV['SQL']['PASSWORD'],
database=ENV['SQL']['DATABASE']
)
CUR = DB.cursor(buffered=True)
'''
Setup
'''
BEGIN = datetime.datetime.now()
TS = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
ITEM_ELEMENTS = ENV['ELEMENT_DICTIONARY']['DCTERMS_IN_USE']
ITEM_ELEMENTS.update(ENV['ELEMENT_DICTIONARY']['DESC_JADE_ELEMENTS'])
TYPES = ENV['ELEMENT_DICTIONARY']['TYPES']
OUT_DIR = 'outputs/'
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
DATASET_OPTIONS = ENV['DATASET_OPTIONS']
CRUMBS = DATASET_OPTIONS['EXPORT_SEPARATE_SQL_CRUMBS']
PROP_SET_LIST = DATASET_OPTIONS['PROPERTIES_TO_INCLUDE_FOR_EACH_TYPE']
INCLUDE_PROPS = DATASET_OPTIONS['PROPERTIES_TO_INCLUDE_FOR_EACH_TYPE']
class Dataset():
def __init__(self):
'''
Start building the dataset objects by pulling IDs and types from omek_items
'''
statement = '''
SELECT omek_items.id as item_id, omek_item_types.`name` as 'jade_type', collection_id as 'jade_collection' FROM omek_items
JOIN omek_item_types on omek_items.item_type_id = omek_item_types.id
WHERE public = 1
ORDER BY item_id;
'''
self.omek_items = pd.read_sql(statement,DB)
self.omek_items = self.omek_items.set_index('item_id',drop=False)
self.objects = self.omek_items.copy()
self.objects['item_id'] = self.objects['item_id'].apply(
lambda x: self.convert_to_jade_id(x))
self.objects.rename(columns={'item_id': 'jade_id'},inplace=True)
self.objects = self.objects.set_index('jade_id',drop=False)
self.objects = self.objects[self.objects['jade_type'].isin(
['Text','Event','Person','Organization','Publication']
)]
# Noise is an alternate dataset to record property values that dont fit the regular usage
self.noise = self.objects.copy()
self.noise.drop('jade_type',axis=1)
self.noise.drop('jade_collection',axis=1)
def ingest(self,limit=None):
'''
Get the item element texts
'''
statement = f'''
SELECT et.id AS id, et.record_id AS record_id,
et.element_id AS element_id, et.`text` AS el_text,
items.item_type_id AS item_type
FROM omek_element_texts as et
JOIN omek_items AS items ON et.record_id = items.id
WHERE record_type = "Item"
ORDER BY id;
'''
if limit != None:
statement = statement.split(';')[0] + f' LIMIT {str(limit)};'
self.element_texts = pd.read_sql(statement,DB)
# Load environment variables
ELEMENT_IDS = list(ITEM_ELEMENTS.keys())
# Set data structure:
data = {}
noise = {}
# Iterate through the element_texts
iter = tqdm(self.element_texts.iterrows())
iter.set_description("Ingesting item attributes")
for tup in iter:
row = tup[1]
element_id = str(row.loc['element_id'])
if row.loc['record_id'] in self.omek_items.index.values:
jade_type = self.omek_items.loc[row.loc['record_id'],'jade_type']
jade_id = self.convert_to_jade_id(row.loc['record_id'])
# Filter element texts through environment variables
if element_id in ELEMENT_IDS:
if jade_type in TYPES.values():
element_label = ITEM_ELEMENTS[element_id]
# Filters property values through the sets designated in the options
if element_label in INCLUDE_PROPS[jade_type]:
compile_json(data,jade_id,element_label,row.loc['el_text'])
else:
compile_json(noise,jade_id,element_label,row.loc['el_text'])
# if CRUMBS:
# print('Excluded',element_label,'in type',jade_type)
# Add accumulated data to DataFrame
new_df = pd.DataFrame.from_dict(data,orient='index')
new_noise_df = pd.DataFrame.from_dict(noise,orient='index')
self.objects = pd.concat([self.objects,new_df],axis=1)
self.noise = pd.concat([self.noise,new_noise_df],axis=1)
# Add URLs
base_url = "https://digital.janeaddams.ramapo.edu/items/show/"
self.objects.insert(loc=1,column='jade_url',value=[
base_url+id.split('_')[-1] for id in self.objects.index.values
])
self.add_collections(limit)
self.add_tags(limit)
# Remove records with no title fields found
self.objects = self.objects.dropna(subset=['dcterms_title'])
def convert_to_jade_id(self,item_id):
'''
Prepend the type string to the SQL primary key so that locations and items are unique in the same set of relations
'''
if type(item_id) != type(str):
if item_id in self.omek_items.index.values:
the_type = self.omek_items.at[item_id,"jade_type"]
if the_type in list(TYPES.values()):
return the_type.lower()+"_"+str(item_id)
else:
return "unspecified_"+str(item_id)
else:
return "unpublished_"+str(item_id)
else:
return item_id
def add_tags(self,limit):
'''
Pull tags from the database
'''
statement = f'''
SELECT * FROM omek_records_tags
JOIN omek_tags on omek_records_tags.tag_id = omek_tags.id;
'''
self.tag_df = pd.read_sql(statement,DB)
self.objects = self.objects[:limit].apply(
lambda x : self.add_tag(x),axis=1)
def add_tag(self, row_ser):
'''
Add the tag to the list for each object
'''
new_subj_field = []
id = row_ser.loc['jade_id']
try:
tag_names = self.tag_df.loc[self.tag_df['record_id'] == int(id.split("_")[-1])]
if not tag_names.empty:
for name in tag_names['name'].to_list():
if name not in new_subj_field:
new_subj_field.append(name)
row_ser['dcterms_subject'] = new_subj_field
return row_ser
except:
return row_ser
def add_collections(self,limit):
'''
Pull collections from the database
'''
statement = '''
SELECT omek_collections.id as collection_id, `text` as collection_name FROM omek_collections
JOIN omek_element_texts AS texts ON omek_collections.id = texts.record_id
WHERE record_type = "Collection"
AND element_id = 50
AND public = 1;
'''
self.collection_df = pd.read_sql(statement,DB)
self.collection_df = self.collection_df.set_index('collection_id')
self.objects = self.objects[:limit].apply(
lambda x : self.add_collection(x),
axis=1
)
def add_collection(self,row_ser):
'''
Add the collection to the list for each object
'''
new_collection_field = []
ids = row_ser.loc['jade_collection']
if not isinstance(ids, list):
ids = [ids]
try:
for coll_id in ids:
matches = self.collection_df.at[coll_id,'collection_name']
if isinstance(matches,np.ndarray):
match_list = matches.tolist()
elif isinstance(matches,str):
match_list = [matches]
else:
print("Unrecognized type of collection",type(matches))
for name in match_list:
if name not in new_collection_field:
new_collection_field.append(name)
row_ser['jade_collection'] = new_collection_field
return row_ser
except:
return row_ser
def add_relations(self,limit=None):
'''
Ingest relation data from SQL
'''
# Read from SQL tables omek_item_relations_relations and omek_item_relations_properties
statement = f'''
SELECT relations.id as id, relations.subject_item_id AS subjId, properties.id as relId, properties.label AS relLabel, relations.object_item_id AS objId
FROM omek_item_relations_relations AS relations
JOIN omek_item_relations_properties AS properties ON relations.property_id = properties.id;
'''
if limit != None:
statement = statement.split(';')[0] + f' LIMIT {str(limit)};'
self.relations = pd.read_sql(statement,DB,index_col='id')
# Style relation labels with camel case
self.relations['relLabel'] = self.relations['relLabel'].apply(
lambda x: camel(x))
# Set up data structure
data = {}
noise = {}
# Add the type prefix to the subject and object IDs
self.relations['subjId'] = self.relations['subjId'].apply(
lambda x: self.convert_to_jade_id(x))
self.relations['objId'] = self.relations['objId'].apply(
lambda x: self.convert_to_jade_id(x))
# Iterate through the relation set
iter = tqdm(self.relations.iterrows())
iter.set_description("Adding relations")
for tup in iter:
row = tup[1]
subjId = row['subjId']
relLabel = row['relLabel']
objId = row['objId']
if (
subjId in self.objects.index.values
) and (
objId in self.objects.index.values
):
# print(subjId,objId)
compile_json(data,subjId,relLabel,objId)
else:
compile_json(noise,subjId,relLabel,objId)
# Add locations to the relations
# This is a thorny call bramble that should probably be untangled in a future iteration of the script
locSet = LocationSet()
locSet.ingest(self,limit=limit)
data, noise = self.add_locations(locSet,data,noise)
# Add the compiled relation data into the main DataFrame and the noise bin
new_df = pd.DataFrame(data={"jade_relation":list(data.values())},index=list(data.keys()))
self.objects = pd.concat([self.objects,new_df],sort=False,axis=1)
new_noise_df = pd.DataFrame(data={"jade_relation":list(noise.values())},index=list(noise.keys()))
self.noise = pd.concat([self.noise,new_noise_df],sort=False,axis=1)
def add_locations(self,locSet,data,noise):
'''
Add locations from class object already constructed
'''
# Add the type prefix to the location and item IDs
locSet.locations['loc_id'] = locSet.locations['loc_id'].astype(str)
locSet.locations['loc_id'] = locSet.locations['loc_id'].apply(
lambda x : "location_" + str(x))
locSet.locations.rename(columns={'loc_id': 'jade_id'},inplace=True)
# Merge locations table into objects table
self.objects = pd.concat([self.objects,locSet.locations],axis=0)
self.objects = self.objects.set_index('jade_id',drop=False)
self.objects.index.name = None
dataset_ids = self.objects.index.values
self.location_duplicates = locSet.location_duplicates
# Iterate through the location set
iter = tqdm(locSet.locations.iterrows())
iter.set_description("Adding locations")
for tup in iter:
row = tup[1]
# Iterate through the collection of items for each location
for rel in list(row.loc['loc_relation'].items()):
loc_id = row.loc['jade_id']
desc_list = rel[1]
item_id = rel[0]
for desc in desc_list:
# Build up the data structure for the later DataFrame
if item_id in dataset_ids:
compile_json(data,item_id,desc,loc_id)
else:
compile_json(noise,item_id,desc,loc_id)
# Remove relations from locations table as they are now represented in item rows
self.objects = self.objects.drop("loc_relation",axis=1)
# Add location types
self.objects = self.objects.apply(
lambda ser : self.add_location_types(ser),
axis=1
)
self.noise = self.noise.apply(
lambda ser : self.add_location_types(ser),
axis=1
)
self.objects = self.objects.dropna(subset=['jade_id'])
return data, noise
def add_location_types(self,row):
'''
Look for null type values and adds location if location in jade_id prefix
'''
try:
if pd.isnull(row.loc['jade_type']):
if type(row.loc['jade_id']) == type(""):
if row.loc['jade_id'].split("_")[0] == "location":
row.loc['jade_type'] = "Location"
else:
print("Type null but not location:",row)
else:
print('Dropped type not included:',row['jade_url'])
return row
except:
print("Unknown problem during adding location type for:",row)
def quantify(self):
'''
Run counting functions on properties and relations to create descriptive statistics about the data
'''
self.quant = {}
# Items
self.quant["item_property_count"] = self.objects.count()
# Item properties
self.quantify_properties()
# Item properties by type
self.quantify_properties_by_type()
# Relations (including location relations)
self.quantify_relations()
# Data nesting
self.quant['nesting'] = {}
self.check_nesting(self.objects)
def quantify_properties(self):
'''
Run counts of properties
'''
# Iterate through properties identified for faceting
props = list(DATASET_OPTIONS['SUBSET_PROPERTIES_AND_QUANTITIES'].items())
iter = tqdm(props)
iter.set_description("Quantifying subsets by facet")
for prop, lim in iter:
if prop in self.objects.columns.values:
# Special cases
if prop in ['dcterms_date']:
# Date
dc_dates_ser = self.objects[prop]
dc_dates_ser = dc_dates_ser.apply(unwrap_list)
dc_dates_ser = dc_dates_ser.dropna()
for id in dc_dates_ser.index.values:
try:
date_val = dc_dates_ser[id]
if not isinstance(date_val, list):
date_list = [date_val]
else:
date_list = date_val
for date_string in date_list:
if not isinstance(date_string, str):
date_string = str(date_string)
yearlike = date_string.split('-')[0]
if (
len(yearlike) == 4
) and (
int(yearlike[0]) == 1
) and (
yearlike[3] in '0123456789'
):
year = yearlike
dc_dates_ser[id] = str(year)
else:
dc_dates_ser.drop(id)
print('Dropped unrecognized date value:',id,dc_dates_ser[id])
except:
dc_dates_ser.drop(id)
print('Dropped unrecognized date value:',id,dc_dates_ser[id])
if len(dc_dates_ser) > 1:
self.add_to_quant(
dc_dates_ser,
sort_on_property_name=False)
# All others / standard structure
else:
ser = self.objects[prop]
ser = ser.dropna()
if len(ser) > 1:
self.add_to_quant(ser)
def add_to_quant(
self,
series, # A named Series object whose index is the item or location IDs
# and whose values are non-empty strings or lists of strings
sort_on_property_name = False # Default False sorts by largest count. Optional True sorts alphabetically by property name
):
'''
Index the DataFrame's IDs by value of passed property (column name)
'''
property = series.name
# Create an index of jade_ids by property value for the series (column) passed
for id in series.index.values:
cell = series[id]
if isinstance(cell, np.ndarray):
cell = cell.tolist()
if not isinstance(cell, list):
cell = [cell]
for val in cell:
compile_json(
self.quant,
property,
val.strip() if isinstance(val, str) else val,
id)
# Create a dictionary of property values and instance counts
for val in list(self.quant[property].keys()):
compile_json(self.quant,
property+"_count",
val,
len(self.quant[property][val]))
# Sort the dictionary and add it to the dataset object
if not sort_on_property_name:
self.quant[property+"_count"] = dict(
sort_by_item_counts(self.quant[property+"_count"]))
self.quant[property+"_count"] = pd.Series(
self.quant[property+"_count"],
index=list(self.quant[property+"_count"].keys()),
name=property+"_count")
if sort_on_property_name:
self.quant[property+"_count"] = self.quant[property+"_count"].sort_index()
# Go ahead and unwrap the single-integer lists created by compile_json
self.quant[property+"_count"] = self.quant[property+"_count"].apply(unwrap_list)
def quantify_properties_by_type(self):
'''
Create a table of property counts by object type
'''
# Get a copy of the main DataFrame and send each row through the counter
self.quant['prop_table'] = {}
df = self.objects.copy()
df = df.apply(
lambda ser : self.compile_types_by_prop(ser),
axis=1
)
# Make the resulting dict a DataFrame, sort it, and abbreviate column headers
self.quant['prop_table'] = pd.DataFrame.from_dict(
self.quant['prop_table'],
orient='index')
self.quant['prop_table'] = self.quant['prop_table'][[
'Person',
'Text',
'Event',
'Organization',
'Publication',
'Location',
'All Types'
]]
self.quant['prop_table'] = self.quant['prop_table'].sort_index()
self.quant['prop_table'].rename(columns={'Organization':'Org.', 'Publication':'Pub.', 'Location':'Loc.'},inplace=True)
def compile_types_by_prop(self,ser):
'''
Count the properties in the passed series by object type
'''
jade_type = ser.loc['jade_type']
jade_type = unwrap_list(jade_type)
if jade_type in list(INCLUDE_PROPS.keys()):
for prop in ser.index.values:
if prop in INCLUDE_PROPS[jade_type]:
cell = ser.loc[prop]
if not isinstance(cell, list):
cell = [cell]
if not pd.isnull(cell).any():
if prop not in self.quant['prop_table']:
self.quant['prop_table'][prop] = {}
if "All Properties" not in self.quant['prop_table']:
self.quant['prop_table']['All Properties'] = {}
if jade_type not in self.quant['prop_table'][prop]:
self.quant['prop_table'][prop][jade_type] = 1
else:
self.quant['prop_table'][prop][jade_type] += 1
if "All Types" not in self.quant['prop_table'][prop]:
self.quant['prop_table'][prop]["All Types"] = 1
else:
self.quant['prop_table'][prop]["All Types"] += 1
if jade_type not in self.quant['prop_table']['All Properties']:
self.quant['prop_table']['All Properties'][jade_type] = 1
else:
self.quant['prop_table']['All Properties'][jade_type] += 1
return ser
def quantify_relations(self):
'''
Make a list of unique relation triples and a table of the most common subject–object pairs
'''
# Iterate through relations in the Dataset
uniq_rels = {}
count_df_index = []
count_df_columns = []
iter = tqdm(self.objects.index.values)
iter.set_description("Counting unique relations")
for subjId in iter:
row = self.objects.loc[subjId]
row_rels_dict = row.loc['jade_relation']
if not pd.isnull(row_rels_dict):
for relLabel, objIdList in row_rels_dict.items():
for objId in objIdList:
# Find the types of each subject and object
subjType = subjId.split('_')[0].capitalize()
objType = objId.split('_')[0].capitalize()
# Count the unique combinations of subject, relation, and object
rel = " ".join([subjType,relLabel,objType])
if rel not in uniq_rels:
uniq_rels[rel] = 1
else:
uniq_rels[rel] += 1
# Make the dimensions for a dataframe
if subjType not in count_df_index:
count_df_index.append(subjType)
if objType not in count_df_columns:
count_df_columns.append(objType)
# Sort and output simple list
self.quant["unique_relation_list"] = pd.DataFrame.from_dict(
dict(sort_by_item_counts(uniq_rels)),orient='index')
# Make the dataframe
count_df = pd.DataFrame(data=0,index=count_df_index,columns=count_df_columns)
for rel in list(uniq_rels.keys()):
count = uniq_rels[rel]
try:
subjType, relLabel, objType = rel.split(' ')
count_df.at[subjType,objType] += count
except:
print("Error counting relation:",rel)
self.quant["unique_relation_table"] = count_df
def check_nesting(self,df):
'''
Check whether each column in the passed df has repeating values in any of the rows
'''
for prop in df.columns.values:
column_ser = df[prop]
column_ser = column_ser.dropna()
self.is_nested(column_ser)
def is_nested(self,ser):
'''
Is the passed row repeating/nested?
'''
nested = False
for id, val in ser.iteritems():
if (
type(val) == type([])
) or (
type(val) == type({})
):
if len(val) > 1:
nested = True
self.quant['nesting'][ser.name] = nested
def unwrap_nonrepeating_columns(self):
'''
If a column hasn't been marked as nested, take its values out of the list wrappers
'''
for prop in self.objects.columns.values:
if not self.quant['nesting'][prop]:
self.objects[prop] = self.objects[prop].apply(unwrap_list)
def segment_by_type(self,df):
'''
Break up the passed dataframe by object type and return up to six separate frames that only have the properties belonging to their types
'''
type_segments = {}
for type_name in list(PROP_SET_LIST.keys()):
prospective_props = PROP_SET_LIST[type_name]
props_for_this_type = []
for prop in prospective_props:
if prop in df.columns.values:
props_for_this_type.append(prop)
segment_df = df[props_for_this_type]
segment_df = segment_df.loc[lambda text_df: text_df['jade_type'] == type_name, :]
type_segments[type_name] = segment_df
return type_segments
def export_stats(self):
'''
Export results from quantify to an XLSX file
'''
filepath = f'{OUT_DIR}{TS}-batch/'
if not os.path.exists(filepath):
os.makedirs(filepath)
with open(
filepath+"jade_data_stats.md",
'w',
encoding='utf-8'
) as md_writer:
with pd.ExcelWriter(
filepath+"jade_data_stats.xlsx",
encoding='utf-8'
) as excel_writer:
for k in list(self.quant.keys()):
if k.split("_")[-1] in ["count", "list", "table"]:
md_writer.write(f"\n\n## {k}\n"+self.quant[k].to_markdown())
if isinstance(self.quant[k], pd.Series):
df = self.quant[k].apply(lambda x : colons_and_semicolons(x))
df = df.apply(lambda x: zap_illegal_characters(x))
else:
df = self.quant[k].applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
df.to_excel(excel_writer,sheet_name=k)
def export_single_sheet(self):
'''
Export one big sheet that has all the objects and all the properties and relations (contains a lot of blank cells)
'''
filepath = f'{OUT_DIR}{TS}-batch/'
if not os.path.exists(filepath):
os.makedirs(filepath)
with pd.ExcelWriter(
filepath+"jade_data_single_sheet.xlsx",
encoding='utf-8'
) as excel_writer:
df = self.objects.applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
df.to_excel(excel_writer,index=False,sheet_name='jade_data')
def export_complete_dataset(self):
'''
Export a complete, curated dataset, segmented by object type in the XLSX and CSV formats
'''
self.type_segments = self.segment_by_type(self.objects)
filepath = f'{OUT_DIR}{TS}-batch/complete_data/'
self.run_outputs(self.type_segments,filepath)
# filepath = f'{OUT_DIR}{TS}-batch/complete_data/Locations'
# self.run_outputs(self.locations,filepath)
def export_subsets(self):
'''
Manage creation of subsets by property value, using quant information
'''
props = list(DATASET_OPTIONS['SUBSET_PROPERTIES_AND_QUANTITIES'].items())
iter = tqdm(props)
iter.set_description("Exporting subsets by facet")
for prop, lim in iter:
if prop in self.quant:
self.create_subset(
prop,
self.quant[prop],
self.quant[prop+'_count'],
lim)
def create_subset(self,prop,attr_dict,ranked_attr_counts,lim):
'''
Create a subset for the passed property, using indexes in quant
'''
ranked_attr_list = list(ranked_attr_counts.keys())
for val in ranked_attr_list[:lim]:
filtered_jade_ids = attr_dict[val]
count = str(ranked_attr_counts[val])
# Items
df = self.objects[self.objects.index.isin(filtered_jade_ids)]
segmented_subset_dfs = self.segment_by_type(df)
safe_val_string = safen_string(val)
filepath = f'{OUT_DIR}{TS}-batch/filtered_subsets/{prop}/{safe_val_string} {count}/'
self.run_outputs(segmented_subset_dfs,filepath,filename=f'{prop} {safe_val_string} {count}')
def export_crumbs(self):
'''
Export a spreadsheet with noise from the RDBMS that did not conform to regular property usage. Does not yet contain relation noise. May have a bug with location noise, including too many locations. Also has a bug with respect to jade_id and jade_collection, leaving all of the regular values for those properties in.
'''
filepath = f'{OUT_DIR}{TS}-batch/'
if not os.path.exists(filepath):
os.makedirs(filepath)
with pd.ExcelWriter(
filepath+"sql_crumbs.xlsx",
encoding='utf-8'
) as excel_writer:
df = self.noise.applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
df.to_excel(excel_writer,index=False,sheet_name='item_noise')
df = self.location_duplicates.applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
df.to_excel(excel_writer,index=False,sheet_name='location_noise')
def run_outputs(self,type_segment_dfs,filepath,filename='default'):
'''
Manages the outputs specified for the dfs passed
'''
if not os.path.exists(filepath):
os.makedirs(filepath)
tsdfs = type_segment_dfs
if DATASET_OPTIONS['EXPORT_XLSX']:
self.save_xlsx(tsdfs,filepath,filename)
if DATASET_OPTIONS['EXPORT_CSV']:
self.save_csv(tsdfs,filepath,filename)
if DATASET_OPTIONS['EXPORT_JSON']:
self.save_json(tsdfs,filepath,filename)
text_df = tsdfs['Text']
if (
DATASET_OPTIONS['EXPORT_TXT']
) or (
DATASET_OPTIONS['EXPORT_HTML']
):
if len(text_df) > 0:
self.save_txt_and_html(text_df,filepath,filename)
def save_xlsx(self,tsdfs,filepath,filename):
'''
Run an XLSX export, putting multiple tables in a single workbook
'''
with pd.ExcelWriter(
f"{filepath}{'jade_data' if filename == 'default' else filename}.xlsx",
encoding='utf-8'
) as excel_writer:
for name, df in list(tsdfs.items()):
df = df.applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
if len(df) > 0:
df.to_excel(excel_writer,index=False,sheet_name=name)
def save_csv(self,tsdfs,filepath,filename):
'''
Run a CSV export, using a subdirectory for multiples
'''
filepath+=f"{'jade_data' if filename == 'default' else filename}_csv"
if not os.path.exists(filepath):
os.makedirs(filepath)
for name, df in list(tsdfs.items()):
if len(df) > 0:
df.to_csv(f'{filepath}/jade_{name}.csv',index=False)
def save_json(self,tsdfs,filepath,filename):
'''
Run a JSON export, putting all the objects at the same level (no type segments) or wrapping them, depending on options
'''
json_output = {}
if DATASET_OPTIONS['WRAP_JSON_RECORDS_IN_TYPE_BRANCHES']:
for name, df in list(tsdfs.items()):
json_output[name] = json.loads(df.to_json(orient='index'))
if not DATASET_OPTIONS['WRAP_JSON_RECORDS_IN_TYPE_BRANCHES']:
for name, df in list(tsdfs.items()):
json_output.update(json.loads(df.to_json(orient='index')))
with open(filepath+f"{'jade_data' if filename == 'default' else filename}.json",'w') as fileref:
fileref.write(json.dumps(json_output))
def save_txt_and_html(self,df,filepath,filename):
'''
Run export of texts, using subdirectories by format
'''
if DATASET_OPTIONS['EXPORT_TXT']:
txt_filepath = filepath+f"{'jade_texts' if filename == 'default' else filename}_txt/"
if not os.path.exists(txt_filepath):
os.makedirs(txt_filepath)
if DATASET_OPTIONS['EXPORT_HTML']:
html_filepath = filepath+f"{'jade_texts' if filename == 'default' else filename}_html/"
if not os.path.exists(html_filepath):
os.makedirs(html_filepath)
# Iterate through the text column
text_ser = df["jade_text"]
text_ser = text_ser.dropna()
text_ser = text_ser.apply(unwrap_list)
for jade_id, val in text_ser.iteritems():
# Manage whether values are wrapped in lists
if not isinstance(val, list):
val_list = [val]
for val in val_list:
if not pd.isnull(val):
# Check whether value is html
is_html = False
if "<" in val:
if ">" in val:
is_html = True
# Run HTML and TXT exports
if is_html:
soup = BeautifulSoup(val,'html.parser')
if DATASET_OPTIONS['EXPORT_HTML']:
with open(html_filepath+jade_id+'.html','w',encoding='utf-8') as html_ref:
html_ref.write(soup.prettify())
if DATASET_OPTIONS['EXPORT_TXT']:
with open(txt_filepath+jade_id+'.txt','w',encoding='utf-8') as txt_ref:
txt_ref.write(text_with_newlines(soup))
else:
if DATASET_OPTIONS['EXPORT_TXT']:
with open(txt_filepath+jade_id+'.txt','w',encoding='utf-8') as txt_ref:
txt_ref.write(val)
class LocationSet():
'''
A class to hold locations in the few seconds before they get subsumed into the dataset object
'''
# A dummy init function
def __init__(self):
pass
# Ingest location data from SQL
def ingest(self,dataset,limit=None):
# Read from SQL table omek_locations
statement = f'''
SELECT * FROM omek_locations;
'''
if limit != None:
statement = statement.split(';')[0] + f' LIMIT {str(limit)};'
self.omek_locations = pd.read_sql(statement,DB)
# Set up data structure for later DataFrame
data = {}
noise = {}
ids = []
retrieved = []
# Convert item IDs
self.omek_locations['item_id'] = self.omek_locations['item_id'].apply(
lambda x: dataset.convert_to_jade_id(x))
# Read data retrieved from SQL
iter = tqdm(self.omek_locations.iterrows())
iter.set_description("Ingesting locations")
for tup in iter:
row = tup[1]
loc_id = row.loc['id']
if (
loc_id not in retrieved
) and (
row.loc['item_id'] in dataset.objects.index.values
):
cluster_address_versions = {}
# Check for duplicates
addr_fp = fingerprint(row.loc["address"])
cluster_statement = f'''
SELECT * FROM omek_locations
WHERE latitude = {row.loc['latitude']}
AND longitude = {row.loc['longitude']};
'''
cluster = pd.read_sql(cluster_statement,DB)
# Combine duplicates
for cluster_tup in cluster.iterrows():
cluster_row = cluster_tup[1]
if fingerprint(cluster_row.loc['address']) == addr_fp:
# Keep track of addresses to choose most common style below
if cluster_row.loc["address"] not in cluster_address_versions:
cluster_address_versions[cluster_row.loc["address"]] = 1
else:
cluster_address_versions[cluster_row.loc["address"]] += 1
# Group item-location relations, styling descriptions with camel case and defining blanks
cluster_loc_id = cluster_row.loc['id']
cluster_item_id = cluster_row.loc['item_id']
if (cluster_row.loc['description'] == '' or None):
cluster_desc = 'noDescription'
else:
cluster_desc = camel(cluster_row.loc['description'])
# Put approved forms in the curated data
compile_json(
data,
loc_id,
"loc_relation",
dataset.convert_to_jade_id(cluster_item_id),
cluster_desc)
# Keep track of which rows have been combined
compile_json(
noise,
loc_id,
"set_of_dup_loc_ids_with_assoc_item_ids",
cluster_loc_id,
cluster_item_id)
retrieved.append(cluster_loc_id)
# Update address for row to most commonly used capitalization and punctuation
chosen_style = sort_by_item_counts(cluster_address_versions)[0][0]
data[loc_id]['jade_address'] = chosen_style
noise[loc_id]['jade_address'] = chosen_style
# Add in other properties
data[loc_id]['loc_id'] = loc_id
# data[loc_id]['jade_zoom_level'] = row.loc['zoom_level']
# data[loc_id]['jade_map_type'] = row.loc['map_type']
data[loc_id]['jade_latitude'] = row.loc['latitude']
data[loc_id]['jade_longitude'] = row.loc['longitude']
# Create DataFrame
self.locations = pd.DataFrame.from_dict(data,orient='index')
self.location_duplicates = pd.DataFrame.from_dict(noise,orient='index')
def fingerprint(address):
'''
A rudimentary string matching tool that strips everything except letters and numbers
'''
address_fingerprint = ''
for l in address.lower():
if l in string.ascii_lowercase + string.digits:
address_fingerprint += l
return address_fingerprint
def camel(phrase_with_spaces,cap_first=False):
'''
Convert to camel case
'''
if len(phrase_with_spaces) == 0:
return ''
else:
capped_list = [w.capitalize() for w in phrase_with_spaces.split()]
if not cap_first:
new_list = [capped_list[0].lower()]
new_list.extend(capped_list[1:])
return "".join(new_list)
else:
return "".join(capped_list)
def compile_json(data,subj,relLabel,obj,obj2=None):
'''
This function nests the passed objects into a JSON tree, assuming that "data" is already an existing dictionary. If only four objects are passed, the fourth will appear in list structure. If five are passed, the fifth will be a list. The function does not return anything because dictionaries are mutable objects.
'''
if subj not in data:
data[subj] = {}
if obj2 == None:
try:
if relLabel not in data[subj]:
data[subj][relLabel] = []
except:
print(subj,relLabel,obj)
if obj not in data[subj][relLabel]:
data[subj][relLabel].append(obj)
else:
secondRelLabel = obj
if relLabel not in data[subj]:
data[subj][relLabel] = {}
if secondRelLabel not in data[subj][relLabel]:
data[subj][relLabel][secondRelLabel] = []
if obj2 not in data[subj][relLabel][secondRelLabel]:
data[subj][relLabel][secondRelLabel].append(obj2)
def sort_by_item_counts(count_dict):
'''
Sort a dictionary by the greatest integer (value)
'''
return sorted(
count_dict.items(),
key = lambda x : x[1],
reverse = True
)
def unwrap_list(something):
"""
Single things don't need to be in lists for some purposes
"""
if (isinstance(something, list)) and (len(something) == 1):
return something[0]
else:
return something
def safen_string(a_string):
"""
Don't save folders ending with periods, for example
"""
if not isinstance(a_string, str):
return a_string
safe_string = ''
for l in a_string:
if l in string.whitespace+string.ascii_letters+string.digits+'-':
safe_string += l
return safe_string.strip()
def text_with_newlines(elem):
'''
A custom alternative to BeautifulSoup's string methods that keeps line breaks represented by div and br elements. Whitespace and line breaks in JADE transcriptions are often used to represent spatial distance in the analog page.
Credit: https://gist.github.com/zmwangx/ad0830ba94b1fd98f428
'''
text = ''
for e in elem.descendants:
if isinstance(e, str):
text += e
elif e.name == 'br' or e.name == 'p' or e.name == 'div':
text += '\n'
return text
def colons_and_semicolons(val):
'''
A manager for making lists and dictionaries more human-readable, for nested values in XLSX and CSV formats
'''
if isinstance(val, list):
val = pylist_to_human_list(val)
elif isinstance(val, dict):
val = pydict_to_semicolon_list(val)
return val
def pylist_to_human_list(pylist,separator=';'):
'''
Brackets hurt my eyes
'''
returnable_string = ''
if len(pylist) >= 1:
returnable_string += str(pylist[0]).strip()
if len(pylist) > 1:
for item in pylist[1:]:
returnable_string +=f"{separator} "+str(item).strip()
return returnable_string
def pydict_to_semicolon_list(pydict):
'''
Braces hurt my eyes too
'''
returnable_string = ''
tup_list = list(pydict.items())
if len(tup_list) > 0:
first_one = True
for tup in tup_list:
if first_one:
returnable_string+=str(tup[0])
else:
returnable_string+="; "+str(tup[0])
returnable_string+=": "+pylist_to_human_list(tup[1],separator=',')
first_one = False
return returnable_string
def zap_illegal_characters(value):
'''
Somewhere in the JADE dataset, there are unescaped unicode characters that are in openpyxl's illegal characters list. This escapes all unicode characters in a passed string if it contains any of those illegal characters.
Source: https://openpyxl.readthedocs.io/en/2.4/_modules/openpyxl/cell/cell.html
'''
if value == None:
return
if isinstance(value, str):
ILLEGAL_CHARACTERS_RE = re.compile(r'[\000-\010]|[\013-\014]|[\016-\037]')
if next(ILLEGAL_CHARACTERS_RE.finditer(value), None):
value = value.encode('unicode_escape').decode('utf-8')
return value
if __name__ == '__main__':
'''
This run sequence depends on options.json, but it also asks for two decisions
'''
print('Loaded options file')
# Do you want to do just a little bit or all of it?
limit=1000
part_or_full = input(
f" 'part' to test script (by using limit={str(limit)} on the main SQL queries)\n 'full' to run full export \npyjade: ")
if part_or_full == 'full':
limit=None
# Are you just running the program because you changed the export options?
cache_or_fresh = input(
" 'cached' to load from cache (if just output settings were changed)\n 'fresh' to load from RDBMS \npyjade: ")
# Load from cache
if cache_or_fresh != 'fresh':
print("Using cached data set")
with Cache('DatasetCache') as ref:
dataset = ref[f"{part_or_full}_cached"]
# Get everything fresh from RDBMS
else:
print("Getting new data from RDBMS")
dataset = Dataset()
dataset.ingest(limit=limit)
dataset.add_relations(limit=limit)
dataset.quantify()
dataset.unwrap_nonrepeating_columns()
if cache_or_fresh == 'fresh':
with Cache('DatasetCache') as ref:
ref[f"{part_or_full}_cached"] = dataset
# When using cache, you can optionally requantify things (helpful for development)
if len(sys.argv) > 1:
if sys.argv[1] == 'requantify':
dataset.quantify()
# Do you want quant info?
if DATASET_OPTIONS['OUTPUT_STATS']:
dataset.export_stats()
# Do you want to run any data exports?
if DATASET_OPTIONS['EXPORT']:
if DATASET_OPTIONS['EXPORT_EVERYTHING_IN_SINGLE_XLSX_SHEET']:
dataset.export_single_sheet()
if DATASET_OPTIONS['EXPORT_COMPLETE_CURATED_DATASET']:
dataset.export_complete_dataset()
if DATASET_OPTIONS['EXPORT_SUBSETS']:
dataset.export_subsets()
# Do you want the noise / crumbs?
if CRUMBS:
dataset.export_crumbs()
end = datetime.datetime.now()
print("Time elapsed:",end-BEGIN)
``` |
{
"source": "joemulray/ostorybook-jarvis",
"score": 2
} |
#### File: modules/tests/test_flights.py
```python
import modules
def test_flights():
assert ('flights' == modules.process_query('Find me cheap flights')[0])
assert ('flights' == modules.process_query('Find me a flight home')[0])
assert ('flights' == modules.process_query('Can you find me a flight from <Location> to <Location>')[0])
assert ('flights' != modules.process_query('Flights to <Location> from <Location>')[0])
```
#### File: modules/tests/test_shopping.py
```python
import modules
def test_shopping():
assert ('shopping' == modules.process_query('Can you order me a smart bulb')[0])
assert ('shopping' == modules.process_query('Show me smartbulbs')[0])
assert ('shopping' == modules.process_query('Can you show me smartbulbs?')[0])
assert ('shopping' != modules.process_query('Show me stuff thats on sale')[0])
``` |
{
"source": "JoeMWatson/ncp",
"score": 2
} |
#### File: ncp/models/det_mix_ncp.py
```python
from tensorflow_probability import distributions as tfd
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from ncp import tools
def network(inputs, config):
hidden = inputs
for size in config.layer_sizes:
hidden = tf.layers.dense(hidden, size, tf.nn.leaky_relu)
mean = tf.layers.dense(hidden, 1)
noise = tf.layers.dense(hidden, 1, tf.nn.softplus) + 1e-6
uncertainty = tf.layers.dense(hidden, 1, None)
return mean, noise, uncertainty
def define_graph(config):
network_tpl = tf.make_template('network', network, config=config)
inputs = tf.placeholder(tf.float32, [None, config.num_inputs])
targets = tf.placeholder(tf.float32, [None, 1])
num_visible = tf.placeholder(tf.int32, [])
batch_size = tf.to_float(tf.shape(inputs)[0])
data_mean, data_noise, data_uncertainty = network_tpl(inputs)
ood_inputs = inputs + tf.random_normal(
tf.shape(inputs), 0.0, config.noise_std)
ood_mean, ood_noise, ood_uncertainty = network_tpl(ood_inputs)
losses = [
-tfd.Normal(data_mean, data_noise).log_prob(targets),
-tfd.Bernoulli(data_uncertainty).log_prob(0),
-tfd.Bernoulli(ood_uncertainty).log_prob(1),
]
if config.center_at_target:
losses.append(-tfd.Normal(ood_mean, ood_noise).log_prob(targets))
loss = sum(tf.reduce_sum(loss) for loss in losses) / batch_size
optimizer = tf.train.AdamOptimizer(config.learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(
loss, colocate_gradients_with_ops=True))
if config.clip_gradient:
gradients, _ = tf.clip_by_global_norm(gradients, config.clip_gradient)
optimize = optimizer.apply_gradients(zip(gradients, variables))
data_uncertainty = tf.sigmoid(data_uncertainty)
if not config.center_at_target:
data_mean = (1 - data_uncertainty) * data_mean + data_uncertainty * 0
data_noise = (1 - data_uncertainty) * data_noise + data_uncertainty * 0.1
return tools.AttrDict(locals())
``` |
{
"source": "JoeMWatson/trajopt",
"score": 2
} |
#### File: quanser/qube/qube.py
```python
import autograd.numpy as np
from autograd import jacobian
from trajopt.envs.quanser.common import VelocityFilter
from trajopt.envs.quanser.qube.base import QubeBase, QubeDynamics
class Qube(QubeBase):
def __init__(self, fs, fs_ctrl):
super(Qube, self).__init__(fs, fs_ctrl)
self.dyn = QubeDynamics()
self._sim_state = None
self._vis = {'vp': None, 'arm': None, 'pole': None, 'curve': None}
def _set_gui(self):
scene_range = 0.2
arm_radius = 0.003
arm_length = 0.085
pole_radius = 0.0045
pole_length = 0.129
# http://www.glowscript.org/docs/VPythonDocs/canvas.html
self._vis['vp'].scene.width = 400
self._vis['vp'].scene.height = 300
self._vis['vp'].scene.background = self._vis['vp'].color.gray(0.95)
self._vis['vp'].scene.lights = []
self._vis['vp'].distant_light(
direction=self._vis['vp'].vector(0.2, 0.2, 0.5),
color=self._vis['vp'].color.white)
self._vis['vp'].scene.up = self._vis['vp'].vector(0, 0, 1)
self._vis['vp'].scene.range = scene_range
self._vis['vp'].scene.center = self._vis['vp'].vector(0.04, 0, 0)
self._vis['vp'].scene.forward = self._vis['vp'].vector(-2, 1.2, -1)
self._vis['vp'].box(pos=self._vis['vp'].vector(0, 0, -0.07),
length=0.09, width=0.1, height=0.09,
color=self._vis['vp'].color.gray(0.5))
self._vis['vp'].cylinder(
axis=self._vis['vp'].vector(0, 0, -1), radius=0.005,
length=0.03, color=self._vis['vp'].color.gray(0.5))
# Arm
arm = self._vis['vp'].cylinder()
arm.radius = arm_radius
arm.length = arm_length
arm.color = self._vis['vp'].color.blue
# Pole
pole = self._vis['vp'].cylinder()
pole.radius = pole_radius
pole.length = pole_length
pole.color = self._vis['vp'].color.red
# Curve
curve = self._vis['vp'].curve(color=self._vis['vp'].color.white,
radius=0.0005, retain=2000)
return arm, pole, curve
def _calibrate(self):
self._vel_filt = VelocityFilter(x_len=self.sensor_space.shape[0],
x_init=np.array([0., np.pi]),
dt=self.timing.dt)
self._sim_state = np.array([0., np.pi + 0.01 * self._np_random.randn(), 0., 0.])
self._state = self._zero_sim_step()
def _sim_step(self, u):
# Add a bit of noise to action for robustness
u_noisy = u + 1e-6 * np.float32(
self._np_random.randn(self.action_space.shape[0]))
thdd, aldd = self.dyn(self._sim_state, u_noisy)
# Update internal simulation state
self._sim_state[3] += self.timing.dt * aldd
self._sim_state[2] += self.timing.dt * thdd
self._sim_state[1] += self.timing.dt * self._sim_state[3]
self._sim_state[0] += self.timing.dt * self._sim_state[2]
# Pretend to only observe position and obtain velocity by filtering
pos = self._sim_state[:2]
# vel = self._sim_state[2:]
vel = self._vel_filt(pos)
return np.concatenate([pos, vel])
def reset(self):
self._calibrate()
if self._vis['curve'] is not None:
self._vis['curve'].clear()
return self.step(np.array([0.0]))[0]
def render(self, mode='human'):
if self._vis['vp'] is None:
import importlib
self._vis['vp'] = importlib.import_module('vpython')
self._vis['arm'],\
self._vis['pole'],\
self._vis['curve'] = self._set_gui()
th, al, _, _ = self._state
arm_pos = (self.dyn.Lr * np.cos(th), self.dyn.Lr * np.sin(th), 0.0)
pole_ax = (-self.dyn.Lp * np.sin(al) * np.sin(th),
self.dyn.Lp * np.sin(al) * np.cos(th),
self.dyn.Lp * np.cos(al))
self._vis['arm'].axis = self._vis['vp'].vector(*arm_pos)
self._vis['pole'].pos = self._vis['vp'].vector(*arm_pos)
self._vis['pole'].axis = self._vis['vp'].vector(*pole_ax)
self._vis['curve'].append(
self._vis['pole'].pos + self._vis['pole'].axis)
self._vis['vp'].rate(self.timing.render_rate)
class QubeTO(QubeBase):
def __init__(self, fs, fs_ctrl):
super(QubeTO, self).__init__(fs, fs_ctrl)
self.dyn = QubeDynamics()
self._x0 = np.array([0., np.pi, 0., 0.])
self._sigma_0 = 1.e-4 * np.eye(4)
self._sigma = 1.e-4 * np.eye(4)
self._g = np.array([0., 2. * np.pi, 0., 0.])
self._gw = np.array([1.e-1, 1.e1, 1.e-1, 1.e-1])
self._uw = np.array([1.e-3])
def init(self):
return self._x0, self._sigma_0
def dynamics(self, x, u):
def f(x, u):
_acc = self.dyn(x, u)
return np.hstack((x[2], x[3], _acc))
k1 = f(x, u)
k2 = f(x + 0.5 * self.timing.dt * k1, u)
k3 = f(x + 0.5 * self.timing.dt * k2, u)
k4 = f(x + self.timing.dt * k3, u)
xn = x + self.timing.dt / 6. * (k1 + 2. * k2 + 2. * k3 + k4)
return xn
def features(self, x):
return x
def features_jacobian(self, x):
_J = jacobian(self.features, 0)
_j = self.features(x) - _J(x) @ x
return _J, _j
def noise(self, x=None, u=None):
return self._sigma
# xref is a hack to avoid autograd diffing through the jacobian
def cost(self, x, u, a, xref):
if a:
_J, _j = self.features_jacobian(xref)
_x = _J(xref) @ x + _j
return (_x - self._g).T @ np.diag(self._gw) @ (_x - self._g) + u.T @ np.diag(self._uw) @ u
else:
return u.T @ np.diag(self._uw) @ u
``` |
{
"source": "joemzhao/learn2learn",
"score": 3
} |
#### File: unit/vision/benchmarks_test.py
```python
import unittest
import learn2learn as l2l
TOO_BIG_TO_TEST = [
'tiered-imagenet',
]
class UtilTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_tasksets(self):
names = l2l.vision.benchmarks.list_tasksets()
for name in names:
if name in TOO_BIG_TO_TEST:
continue
tasksets = l2l.vision.benchmarks.get_tasksets(name, root='./data')
self.assertTrue(hasattr(tasksets, 'train'))
batch = tasksets.train.sample()
self.assertTrue(batch is not None)
self.assertTrue(hasattr(tasksets, 'validation'))
batch = tasksets.validation.sample()
self.assertTrue(batch is not None)
self.assertTrue(hasattr(tasksets, 'test'))
batch = tasksets.test.sample()
self.assertTrue(batch is not None)
del tasksets, batch
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joemzhao/ultradensifier",
"score": 2
} |
#### File: joemzhao/ultradensifier/helpers.py
```python
from __future__ import print_function
from __future__ import division
from six.moves import xrange
from sys import exit
from multiprocessing import Pool
import numpy as np
import scipy
import math
def normalizer(myvector):
mysum = 0.
for myvalue in myvector:
mysum += myvalue * myvalue
if mysum <= 0.:
return myvector
mysum = math.sqrt(mysum)
newvector = []
for myvalue in myvector:
newvector.append(myvalue/mysum)
return newvector
def emblookup(words, word2vec):
ret = []
for w in words:
w = w.lower()
if w not in word2vec:
continue
ret.append(word2vec[w])
return ret
def emblookup_verbose(words, word2vec):
ret = []
ret_w = []
for w in words:
w = w.lower()
if w not in word2vec:
continue
ret.append(word2vec[w])
ret_w.append(w)
return ret_w
def line_process(l):
l = l.strip().split()
try:
word = l[0].decode("utf-8").lower()
except:
print (l[0])
return (None, None)
vals = normalizer([float(v) for v in l[1:]])
return (word, vals)
def word2vec(emb_path):
word2vec = {}
pool = Pool(4)
with open(emb_path, "r") as f:
pairs = pool.map(line_process, f.readlines()[1:])
pool.close()
pool.join()
_pairs = []
for p in pairs:
if p[0] is not None:
_pairs.append(p)
return dict(_pairs)
``` |
{
"source": "joenghl/SwarmSim",
"score": 2
} |
#### File: joenghl/SwarmSim/evaluate.py
```python
import argparse
import torch
import time
import numpy as np
from torch.autograd import Variable
from pathlib import Path
from utils.airsim_env import Env
from algorithms.maddpg import MADDPG
# TODO: normalization
def run(config):
model_path = (Path('./models') / config.env_id /
('run%i' % config.run_num))
if config.incremental is not None:
model_path = model_path / 'incremental' / ('model_ep%i.pt' %
config.incremental)
else:
model_path = model_path / 'incremental' / ('model_ep%i.pt' %
config.incremental)
maddpg = MADDPG.init_from_save(model_path)
env = Env()
maddpg.prep_rollouts(device="cpu")
time_limit = 500
episode = 0
while True:
try:
print("episode %i begin" % (episode))
episode += 1
time_step = 0
dones = False
obs = env.reset()
while not dones and time_step < time_limit:
time_step += 1
torch_obs = [Variable(torch.Tensor(obs[i]).unsqueeze(0), requires_grad=False)
for i in range(maddpg.nagents)]
# print(torch_obs[0])
torch_agent_actions = maddpg.step(torch_obs, explore=False)
# convert actions to numpy arrays
agent_actions = [ac.data.numpy().squeeze() for ac in torch_agent_actions]
# rearrange actions to be per environment
# actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
# print(agent_actions)
# agent_actions = [np.array([0.,0.,0.,1.,0.]), np.array([0.,0.,0.,1.,0.]),np.array([0.,0.,0.,1.,0.])]
obs, rewards, dones, infos = env.step(agent_actions)
print(rewards)
except KeyboardInterrupt:
env.disconnect()
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--env_id", default="simple_spread")
parser.add_argument("--model_name", default="123",
help="123")
parser.add_argument("--run_num", default=1, type=int)
parser.add_argument("--save_gifs", action="store_true",
help="Saves gif of each episode into model directory")
parser.add_argument("--incremental", default=24001, type=int,
help="Load incremental policy from given episode " +
"rather than final policy")
parser.add_argument("--n_episodes", default=10, type=int)
parser.add_argument("--episode_length", default=25, type=int)
parser.add_argument("--fps", default=30, type=int)
config = parser.parse_args()
run(config)
``` |
{
"source": "JoenHune/DQN-Meets-Atari",
"score": 2
} |
#### File: JoenHune/DQN-Meets-Atari/paraments.py
```python
import sys
import tensorflow as tf2
DEFAULT_GPU_LIST = [0, 1, 2]
SCALE = 2
MEMORY_LENGTH = 1000000
STACK_LENGTH = 4
BATCH_SIZE = 64
LEARNING_RATE = 0.00025
GAMMA = 0.9
EPSILON = 1.0
EPSILON_MIN = 0.01
EPSILON_DECAY = 0.00003
GIVEN_GPU = [eval(sys.argv[1])] if len(sys.argv) > 1 else DEFAULT_GPU_LIST
def get_strategy(gpu_visible=None):
gpu_total = tf2.config.experimental.list_physical_devices(device_type="GPU")
gpu_candidates = []
if gpu_visible is None:
gpu_visible = GIVEN_GPU
for gpu_id in gpu_visible:
if 0 <= gpu_id < len(gpu_total):
gpu_candidates.append(gpu_total[gpu_id])
tf2.config.experimental.set_visible_devices(devices=gpu_candidates, device_type="GPU")
print("gpu_total :", gpu_total, "| gpu_candidates :", gpu_candidates)
strategy = tf2.distribute.OneDeviceStrategy(device="/cpu:0")
if len(gpu_candidates) == 1:
strategy = tf2.distribute.OneDeviceStrategy(device="/gpu:0")
elif len(gpu_candidates) > 1:
strategy = tf2.distribute.MirroredStrategy()
return strategy
``` |
{
"source": "joenilan/pycoin-grs",
"score": 3
} |
#### File: pycoin_grs/services/tx_db.py
```python
import os.path
from pycoin_grs.serialize import b2h_rev
from pycoin_grs.tx.Tx import Tx
class TxDb(object):
"""
This object can be used in many places that expect a dict.
"""
def __init__(self, lookup_methods=[], read_only_paths=[], writable_cache_path=None):
self.lookup_methods = lookup_methods
self.read_only_paths = read_only_paths
if writable_cache_path:
self.read_only_paths.append(writable_cache_path)
self.writable_cache_path = writable_cache_path
if self.writable_cache_path and not os.path.exists(self.writable_cache_path):
os.makedirs(self.writable_cache_path)
def paths_for_hash(self, hash):
name = b2h_rev(hash)
for base_dir in self.read_only_paths:
p = os.path.join(base_dir, "%s_tx.bin" % name)
if os.path.exists(p):
yield p
def put(self, tx):
name = b2h_rev(tx.hash())
if self.writable_cache_path:
try:
path = os.path.join(self.writable_cache_path, "%s_tx.bin" % name)
with open(path, "wb") as f:
tx.stream(f)
except IOError:
pass
def get(self, key):
for path in self.paths_for_hash(key):
try:
tx = Tx.parse(open(path, "rb"))
if tx and tx.hash() == key:
return tx
except IOError:
pass
for method in self.lookup_methods:
try:
tx = method(key)
if tx and tx.hash() == key:
self.put(tx)
return tx
except Exception:
pass
return None
def __getitem__(self, key):
raise NotImplemented
def __setitem__(self, key, val):
if val.hash() != key:
raise ValueError("bad key %s for %s" % (b2h_rev(key), val))
self.put(val)
```
#### File: tests/ecdsa/libsecp256k1_test.py
```python
import hashlib
import unittest
from ctypes import cdll, byref, c_int, c_uint, c_char_p, c_void_p, c_size_t, create_string_buffer
from pycoin_grs.ecdsa.secp256k1 import secp256k1_generator
from pycoin_grs.ecdsa.intstream import to_bytes, from_bytes
from pycoin_grs.ecdsa.native.secp256k1 import libsecp256k1, SECP256K1_EC_UNCOMPRESSED
from pycoin_grs.ecdsa.rfc6979 import deterministic_generate_k
from pycoin_grs.encoding import from_bytes_32, to_bytes_32
from pycoin_grs.intbytes import int2byte, byte2int
from pycoin_grs.serialize import b2h
from pycoin_grs.ecdsa.secp256k1 import Generator, _p, _a, _b, _Gx, _Gy, _r
legacy_secp256k1_group = Generator(_p, _a, _b, (_Gx, _Gy), _r)
class ECDSATestCase(unittest.TestCase):
def test_multiply_by_group_generator(self):
self.assertEqual(1 * secp256k1_generator, (
55066263022277343669578718895168534326250603453777594175500187360389116729240,
32670510020758816978083085130507043184471273380659243275938904335757337482424)
)
self.assertEqual(2 * secp256k1_generator, (
89565891926547004231252920425935692360644145829622209833684329913297188986597,
12158399299693830322967808612713398636155367887041628176798871954788371653930)
)
self.assertEqual(secp256k1_generator *
12158399299693830322967808612713398636155367887041628176798871954788371653930, (
73503477726599187100887421812915680925855587149907858411827017692118332824920,
27657251006027960104028534670901169416706551781681983309292004861017889370444)
)
def test_sign_verify_mutual_compatability(self):
if libsecp256k1 is None:
raise unittest.SkipTest("no libsecp256k1")
ctx = libsecp256k1.ctx
signature = create_string_buffer(64)
sighash = to_bytes_32(1000)
secret_key = to_bytes_32(100)
public_key = create_string_buffer(64)
r = libsecp256k1.secp256k1_ec_pubkey_create(ctx, public_key, secret_key)
self.assertEqual(r, 1)
self.assertEqual(b2h(public_key),
'880f50f7ceb4210289266a40b306e33ef52bb75f834c172e65175e3ce2ac3bed'
'<KEY>'
)
r = libsecp256k1.secp256k1_ecdsa_sign(ctx, signature, sighash, secret_key, None, None)
self.assertEqual(r, 1)
r = libsecp256k1.secp256k1_ecdsa_verify(ctx, signature, sighash, public_key)
self.assertEqual(r, 1)
signature1 = signature[:-1] + int2byte(byte2int(signature[-1]) ^ 1)
r = libsecp256k1.secp256k1_ecdsa_verify(ctx, signature1, sighash, public_key)
self.assertEqual(r, 0)
def test_sign(self):
if libsecp256k1 is None:
raise unittest.SkipTest("no libsecp256k1")
ctx = libsecp256k1.ctx
sighash = to_bytes_32(1000)
secret_key = to_bytes_32(100)
public_key = create_string_buffer(64)
r = libsecp256k1.secp256k1_ec_pubkey_create(ctx, public_key, secret_key)
self.assertEqual(r, 1)
self.assertEqual(b2h(public_key),
'880f50f7ceb4210289266a40b306e33ef52bb75f834c172e65175e3ce2ac3bed'
'6e2835e3d57ae1fcd0954808be17bd97bf871f7a8a5edadcffcc8812576f7ae5'
)
signature = create_string_buffer(64)
r = libsecp256k1.secp256k1_ecdsa_sign(ctx, signature, sighash, secret_key, None, None)
self.assertEqual(r, 1)
compact_signature = create_string_buffer(64)
libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(ctx, compact_signature, signature)
r = from_bytes_32(compact_signature[:32])
s = from_bytes_32(compact_signature[32:])
signature = (r, s)
pubkey_size = c_size_t(65)
pubkey_serialized = create_string_buffer(65)
libsecp256k1.secp256k1_ec_pubkey_serialize(
ctx, pubkey_serialized, byref(pubkey_size), public_key, SECP256K1_EC_UNCOMPRESSED)
x = from_bytes_32(pubkey_serialized[1:33])
y = from_bytes_32(pubkey_serialized[33:])
legacy_secp256k1_group.verify((x, y), 1000, signature)
def test_verify(self):
public_pair = secp256k1_generator * 1
self.assertEqual(public_pair, (
55066263022277343669578718895168534326250603453777594175500187360389116729240,
32670510020758816978083085130507043184471273380659243275938904335757337482424)
)
hash_value = 1
sig = (46340862580836590753275244201733144181782255593078084106116359912084275628184,
81369331955758484632176499244870227132558660296342819670803726373940306621624)
r = secp256k1_generator.verify(public_pair, hash_value, sig)
self.assertEqual(r, True)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joennlae/amp",
"score": 2
} |
#### File: amp/bin/marginal_loopy.py
```python
import numpy as np
import pickle
import multiprocessing as mp
from multiprocessing import Pool
import matplotlib
# matplotlib.rcParams.update({'font.size': 18})
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import itertools
from collections import defaultdict
from joblib import Parallel, delayed
from scipy.stats import multivariate_normal
# from modules import GaussianDiag, EP, MMSE, PowerEP, StochasticEP, ExpansionEP, ExpansionPowerEP, ExpectationConsistency, LoopyBP, LoopyMP, PPBP, AlphaBP, MMSEalphaBP, ML, VariationalBP, MMSEvarBP, EPalphaBP
import sys
sys.path.append("./src")
from utils import channel_component, sampling_noise, sampling_signal, sampling_H,real2complex
import matplotlib.pyplot as plt
import itertools
from collections import defaultdict
from joblib import Parallel, delayed
from scipy.stats import multivariate_normal
from loopy_modules import LoopyBP, AlphaBP, ML, MMSEalphaBP, Marginal
from utils import channel_component, sampling_noise, sampling_signal, sampling_H,real2complex, ERsampling_S
# configuration
class hparam(object):
num_tx = 9
num_rx = 9
soucrce_prior = [0.5, 0.5]
signal_var = 1
connect_prob = np.linspace(0.0, 0.9, 10)
monte = 30
constellation = [int(-1), int(1)]
alpha = None
stn_var= 1
# algos = {"LoopyBP": {"detector": LoopyBP, "alpha": None},
# }
algos = {"BP": {"detector": LoopyBP, "alpha": None, "legend": "BP", "row": 0},
# "AlphaBP, 0.2": {"detector": AlphaBP, "alpha": 0.2, "legend": r'$\alpha$-BP, 0.2'},
# "MMSEalphaBP, 0.4": {"detector": MMSEalphaBP, "alpha": 0.4, "legend": r'$\alpha$-BP+MMSE, 0.4', "row": 1},
"AlphaBP, 0.4": {"detector": AlphaBP, "alpha": 0.4, "legend": r'$\alpha$-BP, 0.4', "row": 1},
# "AlphaBP, 0.6": {"detector": AlphaBP, "alpha": 0.6, "legend": r'$\alpha$-BP, 0.6'},
"AlphaBP, 0.8": {"detector": AlphaBP, "alpha": 0.8, "legend": r'$\alpha$-BP, 0.8',"row": 2},
"AlphaBP, 1.2": {"detector": AlphaBP, "alpha": 1.2, "legend": r'$\alpha$-BP, 1.2', "row": 3}
}
iter_num = 100
for _, value in algos.items():
value["ser"] = []
def task(erp):
tmp = dict()
for name,_ in hparam.algos.items():
tmp[name] = []
for key, method in hparam.algos.items():
dict_marg = {"true":[], "est": []}
for monte in range(hparam.monte):
# sampling the S and b for exponential function
S, b = ERsampling_S(hparam, erp)
# compute the joint ML detection
detectMg = Marginal(hparam)
true_marginals = detectMg.detect(S, b)
# marginals estimated
hparam.alpha = method['alpha']
detector = method['detector'](None, hparam)
detector.fit(S=S,
b=b,
stop_iter=hparam.iter_num)
estimated_marginals = detector.marginals()
## concatenate the marginals
dict_marg["true"] = np.concatenate((dict_marg["true"], true_marginals[:,0]))
dict_marg["est"] = np.concatenate((dict_marg["est"], estimated_marginals[:,0]))
tmp[key].append(dict_marg)
# performance should be made by comparing with ML
performance = {"erp": erp}
for key, method in hparam.algos.items():
#method["ser"].append( np.mean(tmp[key])/hparam.num_tx )
performance[key] = tmp[key]
return performance
results = []
def collect_result(result):
global results
results.append(result)
# task(hparam.connect_prob[0])
pool = mp.Pool(mp.cpu_count())
results = pool.map(task, list(hparam.connect_prob))
pool.close()
performance = defaultdict(list)
#for the_result in RESULTS:
for connect_prob in list(hparam.connect_prob):
for the_result in results:
if the_result["erp"] == connect_prob:
for key, _ in hparam.algos.items():
performance[key].append( the_result[key] )
# save the experimental results
with open("figures/marginal_prob.pkl", 'wb') as handle:
pickle.dump(performance, handle)
# for snr in hparam.snr:
marker_list = ["o", "<", "+", ">", "v", "1", "2", "3", "8"]
iter_marker_list = iter(marker_list)
figure_format = 2*100 + hparam.connect_prob * 10
figure = plt.figure(figsize=(30,30))
for key, method in hparam.algos.items():
for i, prob in enumerate(hparam.connect_prob):
ax = figure.add_subplot(len(hparam.algos), 10, i+1 + method["row"] * 10, adjustable='box', aspect=1)
ax.scatter(performance[key][i][0]["true"],
performance[key][i][0]["est"])
ax.set_xlim(0,1)
ax.set_ylim(0,1)
# .set(xlabel="Edge Probability", ylabel="MAP Accuracy")
# ax.grid()
figure.tight_layout()
figure.savefig("figures/marginal_acc.pdf")
figure.show()
``` |
{
"source": "joennlae/halutmatmul",
"score": 2
} |
#### File: .github/vast.ai/vast_ai_helper.py
```python
import argparse
import json
import re
import subprocess
import sys
from time import sleep
from typing import Any
def run_command(cmd: str, print_all: bool = False) -> str:
# return subprocess.run([cmd], stdout=subprocess.PIPE, shell=True).stdout.decode(
# "utf-8"
# )
process = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)
output_str_list = []
total_output_str = ""
for line in iter(process.stdout.readline, b""): # mypy: ignore[union-attr]
decoded = line.decode("utf-8")
output_str_list.append(decoded)
total_output_str += decoded + "\n"
if print_all:
print(decoded, end="")
return total_output_str
def cleanup() -> None:
out = run_command(
"./vast.py show instances --raw",
)
dict_out = json.loads(out)
for server in dict_out:
print(f"Start destroying {server['id']}")
out = run_command(
f"./vast.py destroy instance {server['id']}",
)
print(out)
def startup() -> tuple[str, int]:
out = run_command(
"./vast.py search offers 'reliability > 0.98 num_gpus==1 rentable==True"
" inet_down > 100 disk_space > 30 dph_total < 0.25 inet_down_cost < 0.021"
" inet_up_cost < 0.021 cuda_vers >= 11.2' -o 'dph_total' --storage=32 --raw"
)
dict_out = json.loads(out)
print("Starting best server")
if len(dict_out) == 0:
print("NO SERVER FOUND")
sys.exit(1)
print(dict_out[0])
out = run_command(
f"./vast.py create instance {dict_out[0]['id']} "
"--image joennlae/halutmatmul-conda-gpu:latest --disk 32"
)
print(out)
starting = True
counter = 1
ssh_host = ""
ssh_port = 0
while starting:
print(f"Starting {counter}")
sleep(5)
out = run_command(f"./vast.py show instances --raw")
out_dict = json.loads(out)
if len(out_dict):
print(out_dict[0]["status_msg"])
if ssh_port == 0:
ssh_host = out_dict[0]["ssh_host"]
ssh_port = out_dict[0]["ssh_port"]
if out_dict[0]["actual_status"] == "running":
starting = False
counter += 1
return ssh_host, ssh_port
def run_ssh_commands(ssh_host: str, ssh_port: int, debug: bool = False) -> int:
# commands to execute the tests
# mv /venv/ /venv2 # because vast.ai already has venv
# source /venv2/bin/activate
# git clone https://github.com/joennlae/halutmatmul.git
# cd halutmatmul
# pytest -n4 -srPA src/python/test/test_kernel_gpu.py
# currently using 4 jobs in parallel
commands = 'cd /; mv /venv/ /venv2; source /venv2/bin/activate; \
git clone https://github.com/joennlae/halutmatmul.git; \
cd halutmatmul; pytest -n0 -srPA -k "gpu"; \
echo "ERROR CODE: $?";'
ssh_identity_str = ""
if debug:
ssh_identity_str = "-i .ssh/id_rsa"
print("SSH host", ssh_host)
print("SSH port", ssh_port)
out = run_command(
f"ssh -o StrictHostKeyChecking=no {ssh_identity_str} "
f'-p {ssh_port} root@{ssh_host} "{commands}"',
print_all=True,
)
error_code = re.findall(r"(?<=ERROR CODE: )\d+", out)
print("ERROR CODE: ", error_code)
return int(error_code[0])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Vast.ai helper")
parser.add_argument("--cleanup", "-c", action="store_true", help="run only cleanup")
parser.add_argument(
"--debug", "-d", action="store_true", help="set ssh key offline"
)
args = parser.parse_args()
print(args)
if args.cleanup:
cleanup()
else:
cleanup()
ssh_host, ssh_port = startup()
# ssh_host = "ssh4.vast.ai"
# ssh_port = 11182
sleep(5)
error_code = run_ssh_commands(ssh_host, ssh_port, args.debug)
cleanup()
sys.exit(error_code)
```
#### File: python/maddness/maddness.py
```python
import os, resource
from typing import Any, List, Optional, Tuple, Union
import numpy as np
from maddness.util.least_squares import ( # type: ignore[attr-defined]
_XW_encoded,
encoded_lstsq,
sparse_encoded_lstsq,
)
from maddness.util.hash_function_helper import ( # type: ignore[attr-defined]
Bucket,
MultiSplit,
create_codebook_start_end_idxs,
)
def learn_binary_tree_splits(
X: np.ndarray,
nsplits: int = 4, # levels of resulting binary hash tree
return_prototypes: bool = True,
# return_buckets: bool = False,
X_orig: Optional[np.ndarray] = None,
check_x_dims: int = 8, # can be used to check more or less dims with max losses
learn_quantize_params: bool = False,
) -> Tuple[list, int, Union[list, np.ndarray]]:
assert nsplits <= 4 # >4 splits means >16 split_vals for this func's impl
X = X.copy().astype(np.float32)
N, D = X.shape # D amount of IDx per codebook
X_orig = X.copy() if X_orig is None else X_orig.copy()
# initially, one big bucket with everything
buckets = [
Bucket(sumX=X.sum(axis=0), sumX2=(X * X).sum(axis=0), point_ids=np.arange(N))
]
# total_loss = sum([bucket.loss for bucket in buckets])
# print("================================")
# print("learn_binary_tree_splits(): initial loss: ", total_loss)
splits = []
col_losses = np.zeros(D, dtype=np.float32)
OFFSET = 0.0
SCALE_BY = 1.0
X = X * SCALE_BY + OFFSET
# X_orig = X_orig + OFFSET
for _ in range(nsplits):
# in the original code there are more strategies: eigenvec, bucket_eigenvecs, kurtosis
# dim_heuristic == "bucket_sse":
col_losses[:] = 0 # set all zero
for buck in buckets:
col_losses += buck.col_sum_sqs() # return variance
try_dims = np.argsort(col_losses)[::-1][
:check_x_dims
] # choose biggest column losses
losses = np.zeros(len(try_dims), dtype=X.dtype)
all_split_vals = [] # vals chosen by each bucket/group for each dim
# determine for this dim what the best split vals are for each
# group and what the loss is when using these split vals
for d, dim in enumerate(try_dims):
split_vals = [] # each bucket contributes one split val
for _, buck in enumerate(buckets):
# X.shape (50000, 32), dim is a number 0-31, val 1D, loss 1D
val, loss = buck.optimal_split_val(X, dim, X_orig=X_orig)
losses[d] += loss
if d > 0 and losses[d] >= np.min(losses[:d]):
# early stop
break
split_vals.append(val)
all_split_vals.append(split_vals)
# determine best dim to split on, and pull out associated split
# vals for all buckets
best_tried_dim_idx = np.argmin(losses)
best_dim = try_dims[best_tried_dim_idx]
use_split_vals = all_split_vals[best_tried_dim_idx]
split = MultiSplit(dim=best_dim, vals=use_split_vals)
if learn_quantize_params:
# simple version, which also handles 1 bucket: just set min
# value to be avg of min splitval and xval, and max value to
# be avg of max splitval and xval
x = X[:, best_dim] # Vector (50000, 1)
offset = (np.min(x) + np.min(use_split_vals)) / 2
upper_val = (np.max(x) + np.max(use_split_vals)) / 2 - offset
# TODO: why this specific scale value??
scale = 254.0 / upper_val
# if learn_quantize_params == "int16":
scale = 2.0 ** int(np.log2(scale))
split.offset = offset
split.scaleby = scale
split.vals = (split.vals - split.offset) * split.scaleby
# TODO: look at clippings
split.vals = np.clip(split.vals, 0, 255).astype(np.int32)
else:
split.offset = OFFSET
split.scaleby = SCALE_BY
splits.append(split)
# apply this split to get next round of buckets
new_buckets = []
for i, buck in enumerate(buckets):
val = use_split_vals[i]
new_buckets += list(buck.split(X, dim=best_dim, val=val, X_orig=X_orig))
buckets = new_buckets
# pylint: disable=consider-using-generator
loss = sum([bucket.loss for bucket in buckets])
# print("learn_binary_tree_splits(): returning loss: ", loss)
if return_prototypes:
prototypes = np.vstack([buck.col_means() for buck in buckets])
assert prototypes.shape == (len(buckets), X.shape[1])
return splits, loss, prototypes
# if return_buckets:
return splits, loss, buckets
def init_and_learn_hash_function(
X: np.ndarray, C: int, pq_perm_algo: str = "start"
) -> Tuple[np.ndarray, list, np.ndarray, list]:
_, D = X.shape
K = 16
X = X.astype(np.float32)
X_error = X.copy().astype(np.float32)
X_orig = X
all_prototypes = np.zeros((C, K, D), dtype=np.float32)
all_splits: List = []
pq_idxs = create_codebook_start_end_idxs(X, C, algo=pq_perm_algo)
# ------------------------ 0th iteration; initialize all codebooks
all_splits = []
all_buckets = []
for c in range(C):
start_idx, end_idx = pq_idxs[c]
idxs = np.arange(start_idx, end_idx)
# in original code there is other selections based on PCA and disjoint PCA
use_X_error = X_error[:, idxs]
use_X_orig = X_orig[:, idxs]
# learn codebook to soak current residuals
multisplits, _, buckets = learn_binary_tree_splits(
use_X_error, X_orig=use_X_orig, return_prototypes=False
)
for split in multisplits:
split.dim = idxs[split.dim]
all_splits.append(multisplits)
all_buckets.append(buckets)
# update residuals and store prototypes
# idxs = IDs that were look at for current codebook
# buck.point_ids = rows that landed in certain K
# [ 0 5 21 ... 99950 99979 99999] (N=100000)
# X_error = is here still the A input
# remove centroid from all the points that lie in a certain codebook
# set prototype value
centroid = np.zeros(D, dtype=np.float32)
for b, buck in enumerate(buckets):
# print(b, idxs, buck.point_ids, centroid, buck.col_means())
if len(buck.point_ids):
centroid[:] = 0
centroid[idxs] = buck.col_means()
X_error[buck.point_ids] -= centroid
# update centroid here in case we want to regularize it somehow
all_prototypes[c, b] = centroid
# X_error = A_input - all_centroids
ram_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print(
f"Learning progress {X.shape}-{C}-{K}: {c + 1}/{C} "
f"({(ram_usage / (1024 * 1024)):.3f} GB)"
)
return X_error, all_splits, all_prototypes, all_buckets
def apply_hash_function(X: np.ndarray, splits: List[MultiSplit]) -> np.ndarray:
N, _ = X.shape
nsplits = len(splits)
assert len(splits) >= 1
# original code had a distinction: not sure why
group_ids = np.zeros(N, dtype=np.int32)
for i in range(nsplits):
split = splits[i]
vals = split.vals[group_ids]
indicators = split.preprocess_x(X[:, split.dim]) > vals
group_ids = (group_ids * 2) + indicators
return group_ids
def maddness_encode(
X: np.ndarray, multisplits_lists: list[list[MultiSplit]]
) -> np.ndarray:
N, _ = X.shape
C = len(multisplits_lists)
A_enc = np.empty((N, C), dtype=np.int32, order="F") # column-major
for c in range(C):
A_enc[:, c] = apply_hash_function(X, multisplits_lists[c])
return np.ascontiguousarray(A_enc)
# @_memory.cache
def learn_proto_and_hash_function(
X: np.ndarray, C: int, lut_work_const: int = -1
) -> Tuple[list[list[MultiSplit]], np.ndarray, np.ndarray]:
_, D = X.shape
K = 16
used_perm_algo = "start" # or end
X_orig = X.astype(np.float32)
# X_error = X_orig - centroid shape: [N, D]
X_error, all_splits, all_prototypes, _ = init_and_learn_hash_function(
X, C, pq_perm_algo=used_perm_algo
)
msv_orig = (X_orig * X_orig).mean()
mse_error = (X_error * X_error).mean()
print(
"X_error mse / X mean squared value: ",
mse_error / msv_orig,
mse_error,
msv_orig,
np.mean(X_orig),
)
squared_diff = np.square(X_orig - X_error).mean()
print("Error to Original squared diff", squared_diff)
# optimize prototypes discriminatively conditioned on assignments
# applying g(A) [N, C] with values from 0-K (50000, 16)
A_enc = maddness_encode(X, all_splits)
# optimizing prototypes
if lut_work_const != 1: # if it's 1, equivalent to just doing PQ
if lut_work_const < 0:
# print("fitting dense lstsq to X_error")
W = encoded_lstsq(A_enc=A_enc, Y=X_error)
else:
W, _ = sparse_encoded_lstsq(
A_enc, X_error, nnz_blocks=lut_work_const, pq_perm_algo=used_perm_algo
)
all_prototypes_delta = W.reshape(C, K, D)
all_prototypes += all_prototypes_delta
# check how much improvement we got
X_error -= _XW_encoded(A_enc, W) # if we fit to X_error
mse_res = (X_error * X_error).mean()
print("X_error mse / X mse after lstsq: ", mse_res / msv_orig)
ram_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print(
f"After Ridge regression {X.shape}-{C}-{K}"
f"({(ram_usage / (1024 * 1024)):.3f} GB)"
)
report_array = np.array(
[
mse_error,
msv_orig,
mse_error / msv_orig,
np.mean(X_orig),
mse_res,
mse_res / msv_orig,
ram_usage / (1024 * 1024),
]
)
return all_splits, all_prototypes, report_array
def maddness_lut(q: np.ndarray, all_prototypes: np.ndarray) -> np.ndarray:
q = q.reshape(1, 1, -1) # all_prototypes is shape C, K, D
return (q * all_prototypes).sum(axis=2) # C, K
def maddness_quantize_luts(luts: np.ndarray, force_power_of_2: bool = True) -> Any:
mins = luts.min(axis=(0, 2))
maxs = luts.max(axis=(0, 2))
gaps = maxs - mins
gap = np.max(gaps)
if force_power_of_2:
exponent = np.ceil(np.log2(gap))
scale = 2 ** int(-exponent) # scale is a power of 2, so can just shift
scale *= 255.5 - 1e-10 # so max val is at most 255
else:
scale = (255.5 - 1e-10) / gap
offsets = mins[np.newaxis, :, np.newaxis]
luts_quantized = (luts - offsets) * scale
luts_quantized = (luts_quantized + 0.5).astype(np.int64)
assert np.min(luts_quantized) >= 0
assert np.max(luts_quantized) <= 255.0
return luts_quantized, offsets.sum(), scale
# pylint: disable=R0902
class MaddnessMatmul:
def __init__(self, C: int = 16, lut_work_const: int = -1) -> None:
# checks
if lut_work_const > 0 and lut_work_const > C:
raise Exception("lut_work_const > C: {} > {}".format(lut_work_const, C))
self.lut_work_const = lut_work_const
self.C = C
self.K = 16
self.A_enc: Optional[np.ndarray] = None
self.luts: Optional[np.ndarray] = None
self.quantize_lut = False
self.upcast_every = 16
self.upcast_every = min(self.C, self.upcast_every)
# important otherwise wrong summation
assert self.upcast_every in (1, 2, 4, 8, 16, 32, 64, 128, 256)
self.accumulate_how = "mean" # sum
def _learn_hash_buckets_and_prototypes(self, A: np.ndarray) -> None:
_, D = A.shape
if D < self.C:
raise Exception("D < C: {} < {}".format(D, self.C))
self.splits_lists, self.prototypes, _ = learn_proto_and_hash_function(
A, self.C, lut_work_const=self.lut_work_const
)
def _encode_A(self, A: np.ndarray) -> np.ndarray:
idxs = maddness_encode(A, self.splits_lists)
# offsets = [ 0 16 32 48 64 80 96 112 128 144 160 176 192 208 224 240]
offsets = np.arange(self.C, dtype=np.int32) * self.K
return idxs + offsets
def _create_lut(self, B: np.ndarray) -> Tuple[np.ndarray, float, float]:
B = np.atleast_2d(B)
luts = np.zeros((B.shape[0], self.C, self.K))
for i, q in enumerate(B):
luts[i] = maddness_lut(q, self.prototypes)
if self.quantize_lut:
luts, offset, scale = maddness_quantize_luts(luts)
return luts, offset, scale
return luts, 0, 1
def _calc_matmul(
self,
A_enc: np.ndarray,
B_luts: np.ndarray,
offset: float,
scale: float,
) -> np.ndarray:
A_enc = np.ascontiguousarray(A_enc)
total_result = np.empty((len(B_luts), len(A_enc)), dtype=np.float32)
for i, lut in enumerate(B_luts):
read_lut = lut.ravel()[A_enc.ravel()].reshape(A_enc.shape)
if self.upcast_every < 2 or not self.quantize_lut:
read_lut = read_lut.sum(axis=-1)
else:
# TODO: there is probably room for improvement here
read_lut = read_lut.reshape(read_lut.shape[0], -1, self.upcast_every)
if self.accumulate_how == "sum":
# sum upcast_every vals, then clip to mirror saturating
# unsigned addition, then sum without saturation (like u16)
read_lut = read_lut.sum(2)
read_lut = np.clip(read_lut, 0, 255).sum(axis=-1)
elif self.accumulate_how == "mean":
# mirror hierarchical avg_epu8
while read_lut.shape[-1] > 2:
read_lut = (read_lut[:, :, ::2] + read_lut[:, :, 1::2] + 1) // 2
read_lut = (read_lut[:, :, 0] + read_lut[:, :, 1] + 1) // 2
read_lut = read_lut.sum(axis=-1) # clipping not needed
# undo biasing; if low bits are {0,0} or {1,1}, no bias
# from the averaging; but if {0,1}, then rounds up by
# .5; happens with prob ~=~ .5, so each avg op adds .25;
# the other tricky thing here is that rounding up when
# you're averaging averages biases it even farther
read_lut *= self.upcast_every # convert mean to sum
# I honestly don't know why this is the formula, but wow
# does it work well
bias = self.C / 4 * np.log2(self.upcast_every)
read_lut -= int(bias)
else:
raise ValueError("accumulate_how must be 'sum' or 'mean'")
if self.quantize_lut:
read_lut = (read_lut / scale) + offset
total_result[i] = read_lut
return total_result.T
def _set_A(self, A: np.ndarray) -> None:
self.A_enc = self._encode_A(A)
def _set_B(self, B: np.ndarray) -> None:
self.luts, self.offset, self.scale = self._create_lut(B.T)
# public function
def learn_A(self, A: np.ndarray) -> None:
self._learn_hash_buckets_and_prototypes(A)
def learn_offline(self, A: np.ndarray, B: np.ndarray) -> None:
self._learn_hash_buckets_and_prototypes(A)
self._set_B(B)
def apply_matmul_e2e(
self, A: np.ndarray, B: np.ndarray, A_learn: np.ndarray = None
) -> np.ndarray:
if A_learn is None:
self._learn_hash_buckets_and_prototypes(A)
else:
self._learn_hash_buckets_and_prototypes(A_learn)
self._set_A(A)
self._set_B(B)
return self._calc_matmul(
self.A_enc, # type: ignore[arg-type]
self.luts, # type: ignore[arg-type]
offset=self.offset,
scale=self.scale,
)
def matmul_online(self, A: np.ndarray) -> np.ndarray:
self._set_A(A)
return self._calc_matmul(
self.A_enc, self.luts, offset=self.offset, scale=self.scale # type: ignore[arg-type]
)
def reset(self) -> None:
self.A_enc = None
self.luts = None
def get_speed_metrics(
self, A: np.ndarray, B: np.ndarray, fixedA: bool = False, fixedB: bool = False
) -> None:
N, D = A.shape
D, M = B.shape
# data encoding and LUT costs
nmuls = 0
nmuls += 0 if fixedA else N * D # offset + scale before quantize
nmuls_per_codebook_per_output = self.K * D
nmuls_per_output = nmuls_per_codebook_per_output * self.C
nmuls += 0 if fixedB else nmuls_per_output * M
# lookups given encoded data + luts
nlookups = N * M * self.C
print("nmuls: ", nmuls, "KEY_NLOOKUPS:", nlookups)
def matmul(
A: np.ndarray,
B: np.ndarray,
C: int = 16,
lut_work_const: int = -1,
A_learn: Optional[np.ndarray] = None,
) -> np.ndarray:
return MaddnessMatmul(C=C, lut_work_const=lut_work_const).apply_matmul_e2e(
A, B, A_learn=A_learn
)
if __name__ == "__main__":
print("only use as import")
```
#### File: maddness/util/hash_function_helper.py
```python
import copy
import numba
import numpy as np
@numba.njit(fastmath=True, cache=True, parallel=False)
def _cumsse_cols(X):
# TODO: can be optimized with numpy
N, D = X.shape
cumsses = np.empty((N, D), X.dtype)
cumX_column = np.empty(D, X.dtype)
cumX2_column = np.empty(D, X.dtype)
for j in range(D):
cumX_column[j] = X[0, j]
cumX2_column[j] = X[0, j] * X[0, j]
cumsses[0, j] = 0 # no err in bucket with 1 element
for i in range(1, N):
one_over_count = 1.0 / (i + 1)
for j in range(D):
cumX_column[j] += X[i, j]
cumX2_column[j] += X[i, j] * X[i, j]
meanX = cumX_column[j] * one_over_count
cumsses[i, j] = cumX2_column[j] - (cumX_column[j] * meanX)
return cumsses
# @_memory.cache
def optimal_split_val(
X,
dim,
X_orig=None,
):
X_orig = X if X_orig is None else X_orig
if X_orig.shape != X.shape:
assert X_orig.shape == X.shape
N, _ = X.shape
sort_idxs = np.argsort(X_orig[:, dim])
X_sort = X[sort_idxs]
# cumulative SSE (sum of squared errors)
sses_head = _cumsse_cols(X_sort)
sses_tail = _cumsse_cols(X_sort[::-1])[::-1]
sses = sses_head
sses[:-1] += sses_tail[1:]
sses = sses.sum(axis=1)
best_idx = np.argmin(sses)
next_idx = min(N - 1, best_idx + 1)
col = X[:, dim]
best_val = (col[sort_idxs[best_idx]] + col[sort_idxs[next_idx]]) / 2
return best_val, sses[best_idx]
@numba.njit(fastmath=True, cache=True, parallel=False)
def calculate_loss(
X: np.ndarray, split_n: int, dim: int, idxs: np.ndarray
) -> tuple[float, float]:
X = X[idxs].copy()
sorted_ids = np.argsort(X[:, dim])
X = X[sorted_ids]
X0 = X[:split_n]
X1 = X[split_n:]
assert X0.shape[0] + X1.shape[0] == X.shape[0]
X0_div = X0.shape[0] if X0.shape[0] > 0 else 1
X1_div = X1.shape[0] if X1.shape[0] > 0 else 1
# print(X0, X1, X0.shape, X1.shape)
X0_error = 1 / X0_div * np.sum(X0, axis=0)
X1_error = 1 / X1_div * np.sum(X1, axis=0)
bucket_0_error = np.sum(np.square(X0 - X1_error))
bucket_1_error = np.sum(np.square(X1 - X0_error))
# print(
# X.shape,
# X0.shape,
# X1.shape,
# X0_error.shape,
# X1_error.shape,
# bucket_0_error,
# bucket_1_error,
# )
return X[split_n, dim], bucket_0_error + bucket_1_error
@numba.njit(cache=True, parallel=True)
def optimal_split_val_new(
X: np.ndarray, dim: int, idxs: np.ndarray
) -> tuple[float, float]:
losses = np.zeros(X.shape[0])
split_vals = np.zeros(X.shape[0])
# pylint: disable=not-an-iterable
for i in numba.prange(X.shape[0]):
split_vals[i], losses[i] = calculate_loss(X, i, dim, idxs)
arg_min = np.argmin(losses)
print("returned loss", split_vals[arg_min], losses[arg_min])
return split_vals[arg_min], losses[arg_min]
class Bucket:
"""
sumX and sumX2 are (IDxs_per_codebook, 1) e.g (512 // 16, 1)
"""
__slots__ = "N D id sumX sumX2 point_ids support_add_and_remove".split()
def __init__(
self,
D=None,
N=0,
sumX=None,
sumX2=None,
point_ids=None,
bucket_id=0,
support_add_and_remove=False,
):
# self.reset(D=D, sumX=sumX, sumX2=sumX2)
# assert point_ids is not None
if point_ids is None:
assert N == 0
point_ids = (
set() if support_add_and_remove else np.array([], dtype=np.int64)
)
self.N = len(point_ids)
self.id = bucket_id
if self.N == 0:
print("created empty bucket: ", self.id)
# this is just so that we can store the point ids as array instead of
# set, while still retaining option to run our old code that needs
# them to be a set for efficient inserts and deletes
self.support_add_and_remove = support_add_and_remove
if support_add_and_remove:
self.point_ids = set(point_ids)
else:
self.point_ids = np.asarray(point_ids)
# figure out D
if (D is None or D < 1) and (sumX is not None):
D = len(sumX)
elif (D is None or D < 1) and (sumX2 is not None):
D = len(sumX2)
assert D is not None
self.D = D
# figure out + sanity check stats arrays
self.sumX = np.zeros(D, dtype=np.float32) if (sumX is None) else sumX
self.sumX2 = np.zeros(D, dtype=np.float32) if (sumX2 is None) else sumX2 # noqa
# print("D: ", D)
# print("sumX type: ", type(sumX))
assert len(self.sumX) == D
assert len(self.sumX2) == D
self.sumX = np.asarray(self.sumX).astype(np.float32)
self.sumX2 = np.asarray(self.sumX2).astype(np.float32)
def add_point(self, point, point_id=None):
assert self.support_add_and_remove
# TODO replace with more numerically stable updates if necessary
self.N += 1
self.sumX += point
self.sumX2 += point * point
if point_id is not None:
self.point_ids.add(point_id)
def remove_point(self, point, point_id=None):
assert self.support_add_and_remove
self.N -= 1
self.sumX -= point
self.sumX2 -= point * point
if point_id is not None:
self.point_ids.remove(point_id)
def deepcopy(self, bucket_id=None): # deep copy
bucket_id = self.id if bucket_id is None else bucket_id
return Bucket(
sumX=np.copy(self.sumX),
sumX2=np.copy(self.sumX2),
point_ids=copy.deepcopy(self.point_ids),
bucket_id=bucket_id,
)
def split(self, X=None, dim=None, val=None, X_orig=None):
id0 = 2 * self.id
id1 = id0 + 1
if X is None or self.N < 2: # copy of this bucket + an empty bucket
return (self.deepcopy(bucket_id=id0), Bucket(D=self.D, bucket_id=id1))
assert self.point_ids is not None
my_idxs = np.asarray(self.point_ids)
X = X_orig[my_idxs]
X_orig = X if X_orig is None else X_orig[my_idxs]
mask = X[:, dim] > val #
not_mask = ~mask
X0 = X[not_mask]
X1 = X[mask]
ids0 = my_idxs[not_mask]
ids1 = my_idxs[mask]
def create_bucket(points, ids, bucket_id):
sumX = points.sum(axis=0) if len(ids) else None
sumX2 = (points * points).sum(axis=0) if len(ids) else None
return Bucket(
D=self.D, point_ids=ids, sumX=sumX, sumX2=sumX2, bucket_id=bucket_id
)
return create_bucket(X0, ids0, id0), create_bucket(X1, ids1, id1)
def optimal_split_val(self, X, dim, X_orig=None):
if self.N < 2 or self.point_ids is None:
return 0, 0
my_idxs = np.asarray(self.point_ids)
if X_orig is not None:
X_orig = X_orig[my_idxs]
return optimal_split_val(X[my_idxs], dim, X_orig=X_orig)
# return optimal_split_val_new(X, dim, my_idxs)
def col_means(self):
return self.sumX.astype(np.float64) / max(1, self.N)
def col_variances(self, safe=False):
if self.N < 1:
return np.zeros(self.D, dtype=np.float32)
E_X2 = self.sumX2 / self.N
E_X = self.sumX / self.N
ret = E_X2 - (E_X * E_X)
return np.maximum(0, ret) if safe else ret
def col_sum_sqs(self):
return self.col_variances() * self.N
@property
def loss(self):
# more stable version, that also clamps variance at 0
return max(0, np.sum(self.col_sum_sqs()))
def create_codebook_start_end_idxs(X, number_of_codebooks, algo="start"):
"""
returns vector (C, 2)
[
start_idx_0, end_idx_0,
start_idx_1, end_idx_1,
...
]
"""
assert algo in ("start", "end")
_, D = X.shape
number_of_codebooks = int(number_of_codebooks)
assert D >= number_of_codebooks
idxs = np.empty((number_of_codebooks, 2), dtype=np.int64)
full_subvec_len = D // number_of_codebooks
start_idx = 0
for c in range(number_of_codebooks):
subvec_len = full_subvec_len
if algo == "start": # wider codebooks at the start
if c < (D % number_of_codebooks):
subvec_len += 1
elif algo == "end": # wider codebooks at the end
if (number_of_codebooks - c - 1) < (D % number_of_codebooks):
subvec_len += 1
end_idx = min(D, start_idx + subvec_len)
idxs[c, 0] = start_idx
idxs[c, 1] = end_idx
start_idx = end_idx
assert idxs[0, 0] == 0
assert idxs[-1, -1] == D
return idxs
class MultiSplit:
__slots__ = "dim vals scaleby offset".split()
def __init__(self, dim, vals, scaleby=None, offset=None):
self.dim = dim
self.vals = np.asarray(vals)
self.scaleby = scaleby
self.offset = offset
def __repr__(self) -> str:
return f"<{self.get_params()}>"
def __str__(self) -> str:
return self.get_params()
def get_params(self) -> str:
params = (
f"Multisplit: dim({self.dim}), vals({self.vals}), "
f"scaleby({self.scaleby}), offset({self.offset})"
)
return params
def preprocess_x(self, x: np.ndarray) -> np.ndarray:
if self.offset is not None:
x = x - self.offset
if self.scaleby is not None:
x = x * self.scaleby
return x
```
#### File: src/python/analysis.py
```python
import glob
import os, sys
import re
import argparse
import json
from subprocess import call
from typing import Any, Dict
import torchvision
import torch
from torchvision import transforms as T
import pandas as pd
from models.resnet import ResNet50_Weights, resnet50
from halutmatmul.model import HalutHelper
from halutmatmul.halutmatmul import EncodingAlgorithm, HalutModuleConfig
def sys_info() -> None:
print("__Python VERSION:", sys.version)
print("__pyTorch VERSION:", torch.__version__)
print(
"__CUDA VERSION",
)
# ! nvcc --version
print("__CUDNN VERSION:", torch.backends.cudnn.version())
print("__Number CUDA Devices:", torch.cuda.device_count())
print("__Devices")
call(
[
"nvidia-smi",
"--format=csv",
"--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free",
]
)
print("Active CUDA Device: GPU", torch.cuda.current_device())
print("Available devices ", torch.cuda.device_count())
print("Current cuda device ", torch.cuda.current_device())
def halut_analysis_helper(
cuda_id: int,
batch_size_store: int,
halut_modules: dict[str, list[int]],
halut_data_path: str,
dataset_path: str,
learned_path: str,
) -> dict[str, Any]:
torch.cuda.set_device(cuda_id)
sys_info()
device = torch.device(
"cuda:" + str(cuda_id) if torch.cuda.is_available() else "cpu"
)
state_dict = ResNet50_Weights.IMAGENET1K_V2.get_state_dict(progress=True)
imagenet_val = torchvision.datasets.ImageNet(
root=dataset_path, # "/scratch/janniss/imagenet/",
split="val",
transform=ResNet50_Weights.IMAGENET1K_V2.transforms(),
)
model = resnet50(weights=state_dict, progress=True)
model.cuda()
model.to(device)
halut_model = HalutHelper(
model,
state_dict,
imagenet_val,
batch_size_inference=128,
batch_size_store=batch_size_store,
data_path=halut_data_path,
device=device,
learned_path=learned_path,
report_error=True,
)
halut_model.print_available_module()
for k, v in halut_modules.items():
print("activate", k, v)
halut_model.activate_halut_module(
k,
C=v[HalutModuleConfig.C],
rows=v[HalutModuleConfig.ROWS],
K=v[HalutModuleConfig.K],
encoding_algorithm=v[HalutModuleConfig.ENCODING_ALGORITHM],
)
halut_model.run_inference()
print(halut_model.get_stats())
return halut_model.get_stats()
def run_test(
cuda_id: int,
halut_data_path: str,
dataset_path: str,
learned_path: str,
C: int,
layer_start_offset: int = 0,
) -> None:
# pylint: disable=unused-variable
tests = [
"layer4.2.conv3",
"layer1.0.conv1",
"layer1.0.conv3",
"layer1.1.conv1",
"layer1.1.conv3",
"layer1.2.conv1",
"layer1.2.conv3",
"layer2.0.conv1",
"layer2.0.conv3",
"layer2.1.conv1",
"layer2.1.conv3",
"layer2.2.conv1",
"layer2.2.conv3",
"layer2.3.conv1",
"layer2.3.conv3",
"layer3.0.conv1",
"layer3.0.conv3",
"layer3.1.conv1",
"layer3.1.conv3",
"layer3.2.conv1",
"layer3.2.conv3",
"layer3.3.conv1",
"layer3.3.conv3",
"layer3.4.conv1",
"layer3.4.conv3",
"layer3.5.conv1",
"layer3.5.conv3",
"layer4.0.conv1",
"layer4.0.conv3",
"layer4.1.conv1",
"layer4.1.conv3",
"layer4.2.conv1",
]
tests_bad = [
"layer4.0.conv1",
"layer4.0.conv3",
"layer4.1.conv1",
"layer3.0.conv3",
"layer3.1.conv1",
"layer3.1.conv3",
"layer2.0.conv1",
"layer3.0.conv1",
]
downsampled = [
"layer1.0.downsample.0",
"layer2.0.downsample.0",
"layer3.0.downsample.0",
"layer4.0.downsample.0",
]
conv3x3 = [
"layer1.0.conv2",
"layer1.1.conv2",
"layer1.2.conv2",
"layer2.0.conv2",
"layer2.1.conv2",
"layer2.2.conv2",
"layer2.3.conv2",
"layer3.0.conv2",
"layer3.1.conv2",
"layer3.2.conv2",
"layer3.3.conv2",
"layer3.4.conv2",
"layer3.5.conv2",
"layer4.0.conv2",
"layer4.1.conv2",
"layer4.2.conv2",
]
layers_interesting = [
"layer1.0.conv2",
"layer1.1.conv3",
"layer2.0.conv1",
"layer2.0.conv2",
"layer2.0.downsample.0",
"layer2.3.conv3",
"layer3.0.conv1",
"layer3.0.conv2",
"layer3.0.conv3",
"layer3.3.conv3",
"layer3.4.conv2",
"layer3.5.conv1",
"layer3.5.conv2",
"layer4.0.conv1",
"layer4.0.conv2",
"layer4.0.conv3",
"layer4.1.conv1",
"layer4.2.conv1",
"layer4.2.conv3",
]
rows = [
1,
2,
4,
8,
16,
32,
# 64,
# 128,
# 256,
]
rows.reverse()
result_base_path = "./results/data/accuracy/single_layer/c_k_sweep/"
layers_test = layers_interesting[layer_start_offset:]
for l in layers_test:
layer_loc = l.split(".", maxsplit=1)[0]
rows_adapted = []
if layer_loc in ["layer1"]:
rows_adapted = [1, 2, 4, 8]
elif layer_loc == "layer2":
rows_adapted = [2, 4, 8, 16]
elif layer_loc == "layer3":
rows_adapted = [8, 16, 32, 64]
elif layer_loc == "layer4":
rows_adapted = [32, 64, 128, 256]
for r in rows_adapted:
# for C in [8, 16, 32, 64]:
for e in [
EncodingAlgorithm.FOUR_DIM_HASH,
EncodingAlgorithm.DECISION_TREE,
EncodingAlgorithm.FULL_PQ,
]:
for K in (
[8, 16, 32]
if e == EncodingAlgorithm.FOUR_DIM_HASH
else [4, 8, 12, 16, 24, 32, 64]
):
files = glob.glob(result_base_path + "/*.json")
files_res = []
regex = rf"{l}_{C}_{K}_{e}-{r}\.json"
pattern = re.compile(regex)
files_res = [x for x in files if pattern.search(x)]
if len(files_res) == 1:
print("alread done")
continue
# learned_files = check_file_exists_and_return_path(
# learned_path, k, "learned", C, r
# )
# if len(learned_files) == 0:
# print(f"not learned {k} C: {C}, r: {r}")
# continue
res = halut_analysis_helper(
cuda_id,
batch_size_store=256,
halut_modules=dict({l: [C, r, K, e]}),
halut_data_path=halut_data_path,
dataset_path=dataset_path,
learned_path=learned_path,
)
with open(
result_base_path
+ l
+ "_"
+ str(C)
+ "_"
+ str(K)
+ "_"
+ str(e)
+ "-"
+ str(r)
+ ".json",
"w",
) as fp:
json.dump(res, fp, sort_keys=True, indent=4)
all_layers = [
"layer1.0.conv1",
"layer1.0.conv2",
"layer1.0.conv3",
"layer1.0.downsample.0",
"layer1.1.conv1",
"layer1.1.conv2",
"layer1.1.conv3",
"layer1.2.conv1",
"layer1.2.conv2",
"layer1.2.conv3",
"layer2.0.conv1",
"layer2.0.conv2",
"layer2.0.conv3",
"layer2.0.downsample.0",
"layer2.1.conv1",
"layer2.1.conv2",
"layer2.1.conv3",
"layer2.2.conv1",
"layer2.2.conv2",
"layer2.2.conv3",
"layer2.3.conv1",
"layer2.3.conv2",
"layer2.3.conv3",
"layer3.0.conv1",
"layer3.0.conv2",
"layer3.0.conv3",
"layer3.0.downsample.0",
"layer3.1.conv1",
"layer3.1.conv2",
"layer3.1.conv3",
"layer3.2.conv1",
"layer3.2.conv2",
"layer3.2.conv3",
"layer3.3.conv1",
"layer3.3.conv2",
"layer3.3.conv3",
"layer3.4.conv1",
"layer3.4.conv2",
"layer3.4.conv3",
"layer3.5.conv1",
"layer3.5.conv2",
"layer3.5.conv3",
"layer4.0.conv1",
"layer4.0.conv2",
"layer4.0.conv3",
"layer4.0.downsample.0",
"layer4.1.conv1",
"layer4.1.conv2",
"layer4.1.conv3",
"layer4.2.conv1",
"layer4.2.conv2",
"layer4.2.conv3",
]
def json_to_dataframe(
path: str, layer_name: str, max_C: int = 128, prefix: str = ""
) -> pd.DataFrame:
files = glob.glob(path + "/*.json")
regex = rf"{layer_name}_.+\.json"
pattern = re.compile(regex)
files_res = [x for x in files if pattern.search(x)]
dfs = [] # an empty list to store the data frames
for file in files_res:
data = pd.read_json(file) # read data frame from json file
if layer_name + ".learned_n" not in data.columns:
data[layer_name + ".learned_n"] = data.iloc[0][
layer_name + ".learned_a_shape"
]
data[layer_name + ".learned_d"] = data.iloc[1][
layer_name + ".learned_a_shape"
]
K = data.iloc[0][layer_name + ".K"]
C = data.iloc[0][layer_name + ".C"]
data[layer_name + ".learned_m"] = int(
data.iloc[0][layer_name + ".L_size"] / (4 * K * C)
)
C = data.iloc[0][layer_name + ".C"]
if C > max_C:
continue
if layer_name + ".learned_a_shape" in data.columns:
data = data.drop([1])
data = data.drop(
columns=[
layer_name + ".learned_a_shape",
layer_name + ".learned_b_shape",
]
)
data["hue_string"] = prefix + str(C)
data["test_name"] = layer_name + "-" + str(data.iloc[0][layer_name + ".C"])
data["layer_name"] = layer_name + (
" (3x3)" if "conv2" in layer_name else " (1x1)"
)
data["row_name"] = layer_name.split(".")[0]
data["col_name"] = layer_name[len(layer_name.split(".")[0]) + 1 :]
dfs.append(data) # append the data frame to the list
df = pd.concat(
dfs, ignore_index=True
) # concatenate all the data frames in the list.
df = df.drop(columns="halut_layers")
df["top_1_accuracy_100"] = df["top_1_accuracy"] * 100
final_dfs = []
for C in [16, 32, 64]:
df_C = df[df[layer_name + ".C"] == C]
df_C.sort_values(
by=["top_1_accuracy"], inplace=True, ignore_index=True, ascending=False
)
final_dfs.append(df_C.iloc[[0]])
df = pd.concat(final_dfs, ignore_index=True)
df.columns = df.columns.str.replace(layer_name + ".", "")
return df
def multilayer_analysis(
cuda_id: int, halut_data_path: str, dataset_path: str, learned_path: str, C: int
) -> None:
data_path = "results/data/accuracy/single_layer/training_data"
dfs = []
i = 0
for l in all_layers:
i = i + 1
# if i > 6:
# break
df = json_to_dataframe(data_path, l)
dfs.append(df)
df = pd.concat(dfs)
C = 64
df_64 = df[df["C"] == C]
df_64.sort_values(
by=["top_1_accuracy"], inplace=True, ignore_index=True, ascending=False
)
print(df_64)
df_64.to_csv("test.csv")
result_base_path = "./results/data/accuracy/multi_layer/"
for i in range(20, 27):
layer_dict: Dict[str, list[int]] = dict({})
for k in range(i):
layer_dict |= {
df_64.iloc[k]["layer_name"].split(" ")[0]: [
C,
int(df_64.iloc[k]["rows"]),
]
}
print(layer_dict)
res = halut_analysis_helper(
cuda_id,
batch_size_store=256,
halut_modules=layer_dict,
halut_data_path=halut_data_path,
dataset_path=dataset_path,
learned_path=learned_path,
)
with open(
f'{result_base_path}{i}_{str(C)}_{res["top_1_accuracy"]:2.3f}.json',
"w",
) as fp:
json.dump(res, fp, sort_keys=True, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Start analysis")
parser.add_argument("cuda_id", metavar="N", type=int, help="id of cuda_card")
parser.add_argument("-dataset", type=str, help="dataset path")
parser.add_argument("-halutdata", type=str, help="halut data path")
parser.add_argument("-learned", type=str, help="halut learned path")
parser.add_argument("-C", type=int, help="C")
parser.add_argument("-offset", type=int, help="start_layer_offset", default=0)
args = parser.parse_args()
run_test(
args.cuda_id, args.halutdata, args.dataset, args.learned, args.C, args.offset
)
# run_test(
# 0,
# "/scratch2/janniss/resnet_input_data",
# "/scratch2/janniss/imagenet",
# "/scratch2/janniss/learned",
# C
# )
# multilayer_analysis(
# args.cuda_id, args.halutdata, args.dataset, args.learned, args.C
# )
```
#### File: halutmatmul/cuda/functions.py
```python
import math
from typing import Optional, Union
import cupy as cp # type: ignore[import]
import torch
import numpy as np
from torch.nn.common_types import _size_any_t
from halutmatmul.decision_tree_and_pq import halut_encode_pq_tensor
MAX_THREADS = 1024
SHARED_MEM_PER_BLOCK = 49152
READ_ACC_LUT_KERNEL_SPLIT_FACTOR = 8
def run_encode_kernel(
kernel: cp.RawKernel,
N: int,
D: int,
A: torch.Tensor,
hash_info: torch.Tensor,
C: int,
) -> torch.Tensor:
rows_per_block = 64 // (C // 16)
blocks = N // rows_per_block + (1 if N % rows_per_block else 0)
block_dim = (rows_per_block, C)
encoded = cp.zeros((N, C), dtype=cp.int32)
cupy_A = cp.ascontiguousarray(cp.from_dlpack(A.detach()))
cupy_hash_info = cp.ascontiguousarray(cp.from_dlpack(hash_info.detach()))
kernel(
(blocks,),
block_dim,
(cupy_A, cupy_hash_info, encoded, N, D, N * D),
# shared_mem=4 * (8 + 3) * C * 4,
)
torch_A = torch.from_dlpack(encoded)
return torch_A
def calc_rows_per_block_read_acc_lut_kernel(
split_factor: int, C: int, K: int
) -> tuple[int, int]:
rows_per_block = MAX_THREADS // split_factor
used_shared_mem = rows_per_block * C * 4 + C * K * split_factor * 4
while used_shared_mem > SHARED_MEM_PER_BLOCK:
rows_per_block //= 2
used_shared_mem = rows_per_block * C * 4 + C * K * split_factor * 4
if rows_per_block == 2:
split_factor //= 2
rows_per_block = MAX_THREADS // split_factor
return (rows_per_block, split_factor)
def run_read_acc_lut_kernel(
kernel: cp.RawKernel,
N: int,
M: int,
lut: torch.Tensor,
A_enc: torch.Tensor,
C: int,
K: int,
) -> torch.Tensor:
split_factor = READ_ACC_LUT_KERNEL_SPLIT_FACTOR
rows_per_block, split_factor = calc_rows_per_block_read_acc_lut_kernel(
split_factor, C, K
)
block_dim = (rows_per_block, split_factor)
blocks_x = N // rows_per_block + (1 if N % rows_per_block else 0)
blocks_y = M // split_factor + (1 if M % split_factor else 0)
grid_dim = (blocks_x, blocks_y)
result = cp.zeros((N, M), dtype=cp.float32)
cupy_A_enc = cp.ascontiguousarray(cp.from_dlpack(A_enc.detach()))
cupy_lut = cp.ascontiguousarray(cp.from_dlpack(lut.detach()))
used_shared_mem = rows_per_block * C * 4 + C * K * split_factor * 4
assert used_shared_mem <= SHARED_MEM_PER_BLOCK
kernel(
grid_dim,
block_dim,
(cupy_lut, cupy_A_enc, result, N, M),
# shared_mem=4 * (8 + 3) * C * 4,
)
torch_res = torch.from_dlpack(result)
return torch_res
def halutmatmul_gpu(
encode_kernel: cp.RawKernel,
read_acc_lut_kernel: cp.RawKernel,
A: torch.Tensor,
L: torch.Tensor,
H: torch.Tensor,
) -> torch.Tensor:
# encode
cupy_A = cp.ascontiguousarray(cp.from_dlpack(A.detach()))
cupyheight = cp.ascontiguousarray(cp.from_dlpack(H.detach()))
# read accumulate LUTs
cupy_L = cp.ascontiguousarray(cp.from_dlpack(L.detach()))
result = halutmatmul_gpu_cupy(
encode_kernel=encode_kernel,
read_acc_lut_kernel=read_acc_lut_kernel,
A=cupy_A,
L=cupy_L,
H=cupyheight,
)
torch_res = torch.from_dlpack(result)
return torch_res
def halut_encode_pq_tensor_interface(
_blocks: tuple,
_block_dim_encode: tuple,
args: tuple[cp.ndarray, cp.ndarray, cp.ndarray, int, int, int],
) -> None:
A, H, encoded, N, D, _ = args
C = encoded.shape[1]
encoded_result = halut_encode_pq_tensor(
torch.reshape(torch.from_dlpack(A), (N, -1)),
torch.reshape(torch.from_dlpack(H), (C, -1, D)),
)
cp.copyto(encoded, cp.from_dlpack(encoded_result.detach()))
def halutmatmul_gpu_cupy(
encode_kernel: cp.RawKernel,
read_acc_lut_kernel: cp.RawKernel,
A: cp.ndarray,
L: cp.ndarray,
H: cp.ndarray,
) -> cp.ndarray:
N = A.shape[0]
D = A.shape[1]
M = L.shape[0]
C = L.shape[1]
K = L.shape[2]
# encode
rows_per_block_encode = 64 // ((C // 16) if C >= 16 else 1)
blocks = N // rows_per_block_encode + (1 if N % rows_per_block_encode else 0)
block_dim_encode = (rows_per_block_encode, C)
encoded = cp.zeros((N, C), dtype=cp.int32)
encode_kernel(
(blocks,),
block_dim_encode,
(A, H, encoded, N, D, N * D),
)
# read accumulate LUTs
split_factor = READ_ACC_LUT_KERNEL_SPLIT_FACTOR
rows_per_block_ral, split_factor = calc_rows_per_block_read_acc_lut_kernel(
split_factor, C, K
)
block_dim_ral = (rows_per_block_ral, split_factor)
blocks_x = N // rows_per_block_ral + (1 if N % rows_per_block_ral else 0)
blocks_y = M // split_factor + (1 if M % split_factor else 0)
grid_dim = (blocks_x, blocks_y)
result = cp.zeros((N, M), dtype=cp.float32)
used_shared_mem = rows_per_block_ral * C * 4 + C * K * split_factor * 4
assert used_shared_mem <= SHARED_MEM_PER_BLOCK
read_acc_lut_kernel(
grid_dim,
block_dim_ral,
(L, encoded, result, N, M),
)
return result
def halut_conv2d_gpu(
_input: torch.Tensor,
weights: torch.Tensor,
encode_kernel: cp.RawKernel,
read_acc_lut_kernel: cp.RawKernel,
L: torch.Tensor,
H: torch.Tensor,
kernel_size: _size_any_t = (1, 1),
stride: _size_any_t = (1, 1),
padding: _size_any_t = 0,
dilation: _size_any_t = 1,
bias: Optional[torch.Tensor] = None,
return_reshaped_inputs: bool = False, # needed for storage
) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
unfold_ops = torch.nn.Unfold(
kernel_size=kernel_size, dilation=dilation, padding=padding, stride=stride
)
unfolded = unfold_ops(_input).transpose(1, 2)
unfolded = torch.reshape(unfolded, (-1, unfolded.size(2)))
if return_reshaped_inputs:
weights_prepared = weights.view(weights.size(0), -1).t()
return (unfolded, weights_prepared)
unfolded_cupy = cp.asarray(cp.from_dlpack(unfolded.detach()))
H_cupy = cp.asarray(cp.from_dlpack(H.detach()))
L_cupy = cp.asarray(cp.from_dlpack(L.detach()))
ret = halutmatmul_gpu_cupy(
encode_kernel=encode_kernel,
read_acc_lut_kernel=read_acc_lut_kernel,
A=unfolded_cupy,
L=L_cupy,
H=H_cupy,
)
batch_size = _input.size(0)
result_tensor = torch.from_dlpack(ret)
result_tensor = torch.reshape(
result_tensor, (batch_size, -1, result_tensor.size(1))
).transpose(1, 2)
stride = (stride, stride) if isinstance(stride, int) else (stride[0], stride[1])
dilation = (
(dilation, dilation)
if isinstance(dilation, int)
else (dilation[0], dilation[1])
)
padding = (
(padding,) * 4
if isinstance(padding, int)
else (padding[0], padding[0], padding[1], padding[1])
)
cout, _, kernel_height, kernel_width = weights.shape
stride_y, stride_x = stride
# reference https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
out_y, out_x = (
math.floor(
(
(_input.shape[2] + padding[3] + padding[2])
- dilation[0] * (kernel_height - 1)
- 1
)
/ stride_y
+ 1
),
math.floor(
(
(_input.shape[3] + padding[0] + padding[1])
- dilation[1] * (kernel_width - 1)
- 1
)
/ stride_x
+ 1
),
)
ret = torch.reshape(result_tensor, (batch_size, cout, out_y, out_x))
if bias is not None:
bias = torch.broadcast_to(
bias.repeat(out_y * out_x).reshape((cout, out_y, out_x)),
(batch_size, cout, out_y, out_x),
)
ret = ret + bias
return ret
def error_cupy(
actual: torch.Tensor,
desired: torch.Tensor,
) -> np.ndarray:
actual_cupy = cp.asarray(cp.from_dlpack(actual.detach()))
desired_cupy = cp.asarray(cp.from_dlpack(desired.detach()))
_min = cp.min(desired_cupy)
_max = cp.max(desired_cupy)
actual_cupy_std = (actual_cupy - _min) / (_max - _min)
desired_cupy_std = (desired_cupy - _min) / (_max - _min)
_range = (-1, 1)
actual_cupy_scaled = actual_cupy_std * (_range[1] - _range[0]) + _range[0]
desired_cupy_scaled = desired_cupy_std * (_range[1] - _range[0]) + _range[0]
mae = cp.asnumpy(cp.mean(cp.abs((actual_cupy - desired_cupy))))
mse = cp.asnumpy(cp.mean((actual_cupy - desired_cupy) ** 2))
mape = cp.asnumpy(
cp.mean(cp.abs(actual_cupy - desired_cupy) / (1 + cp.abs(desired_cupy)))
)
scaled_absolut_error = cp.asnumpy(
cp.mean(cp.abs(actual_cupy_scaled - desired_cupy_scaled))
)
scaled_shift = cp.asnumpy(cp.mean(actual_cupy_scaled - desired_cupy_scaled))
return np.array((mae, mse, mape, scaled_absolut_error, scaled_shift))
```
#### File: python/halutmatmul/decision_tree_and_pq.py
```python
from typing import Literal
import resource
import sys
from pathlib import Path
import multiprocessing
from unittest.mock import AsyncMockMixin
import warnings
from joblib import Parallel, delayed
import numpy as np
import numba
from numba import prange
import torch
from scipy.cluster.vq import kmeans2
from sklearn.cluster import KMeans, BisectingKMeans
from sklearn import tree
from sklearn.tree import _tree
from sklearn.model_selection import cross_val_score
from halutmatmul.functions import create_codebook_start_end_idxs
sys.path.append(
str(Path(__file__).parent) + "/../../../maddness/python/"
) # for maddness import
from maddness.util.least_squares import ( # type: ignore[attr-defined]
encoded_lstsq,
_XW_encoded,
)
class DecisionTreeOffset:
DIMS = 0
THRESHOLDS = 1
CLASSES = 2
TOTAL = 3
DEFAULT_NEG_VALUE = -4419
@numba.jit(nopython=True, parallel=False)
def apply_hash_function_decision_tree(
X: np.ndarray, decision_tree: np.ndarray
) -> np.ndarray:
N, _ = X.shape
group_ids = np.zeros(N, dtype=np.int64) # needs to be int64 because of index :-)
B = decision_tree.shape[0] // 3
n_decisions = int(np.log2(B))
for depth in range(n_decisions):
index_offet = 2**depth - 1
split_thresholds = decision_tree[group_ids + B + index_offet]
dims = decision_tree[group_ids + index_offet].astype(np.int64)
# x = X[np.arange(N), dims]
# make it numba compatible
x = np.zeros(group_ids.shape[0], np.float32)
for i in range(x.shape[0]):
x[i] = X[i, dims[i]]
indicators = x > split_thresholds
group_ids = (group_ids * 2) + indicators
group_ids = decision_tree[group_ids + 2 * B].astype(np.int32)
return group_ids
@numba.jit(nopython=True, parallel=True)
def halut_encode_decision_tree(X: np.ndarray, numpy_array: np.ndarray) -> np.ndarray:
N, _ = X.shape
C = numpy_array.shape[0]
A_enc = np.empty((C, N), dtype=np.int32) # column-major
for c in prange(C):
A_enc[c] = apply_hash_function_decision_tree(X, numpy_array[c])
return np.ascontiguousarray(A_enc.T)
def apply_hash_function_pq(X: np.ndarray, prototypes: np.ndarray) -> np.ndarray:
group_ids = np.argsort(
np.array([np.linalg.norm(X - x, axis=1) for x in prototypes]).T, axis=1
)[:, :1].flatten()
return group_ids
def apply_hash_function_pq_tensor(
X: torch.Tensor, prototypes: torch.Tensor
) -> torch.Tensor:
group_ids = torch.argsort(
torch.stack([torch.linalg.norm(X - x, axis=1) for x in prototypes]).T, dim=1
)[:, :1].flatten()
return group_ids
def halut_encode_pq(X: np.ndarray, prototypes: np.ndarray) -> np.ndarray:
N, _ = X.shape
C = prototypes.shape[0]
A_enc = np.empty((C, N), dtype=np.int32) # column-major
pq_idxs = create_codebook_start_end_idxs(X.shape[1], C, algo="start")
for c in prange(C):
start_idx, end_idx = pq_idxs[c]
idxs = np.arange(start_idx, end_idx)
X_cut = X[:, idxs]
A_enc[c] = apply_hash_function_pq(X_cut, prototypes[c][:, idxs])
return np.ascontiguousarray(A_enc.T)
def halut_encode_pq_tensor(X: torch.Tensor, prototypes: torch.Tensor) -> torch.Tensor:
N, _ = X.shape
C = prototypes.shape[0]
K = prototypes.shape[1]
A_enc = torch.empty((C, N), dtype=torch.int32, device=str(X.device)) # column-major
pq_idxs = create_codebook_start_end_idxs(X.shape[1], C, algo="start")
for c in prange(C):
start_idx, end_idx = pq_idxs[c]
idxs = torch.arange(start_idx, end_idx, device=str(X.device))
X_cut = X[:, idxs]
A_enc[c] = apply_hash_function_pq_tensor(X_cut, prototypes[c][:, idxs])
offsets = torch.arange(C, dtype=torch.int32, device=str(X.device)) * K
return torch.Tensor.contiguous(A_enc.T) + offsets
def tree_to_numpy(
decision_tree: tree.DecisionTreeClassifier, depth: int = 4
) -> np.ndarray:
tree_ = decision_tree.tree_
class_names = decision_tree.classes_
B = 2**depth
total_length = B * DecisionTreeOffset.TOTAL
numpy_array = np.ones(total_length, np.float32) * DEFAULT_NEG_VALUE
def _add_leaf(value: int, class_name: int, depth: int, tree_id: int) -> None:
if tree_id >= B:
numpy_array[tree_id - B + DecisionTreeOffset.CLASSES * B] = class_name
else:
_add_leaf(value, class_name, depth + 1, 2 * tree_id)
_add_leaf(value, class_name, depth + 1, 2 * tree_id + 1)
def recurse_tree(node: int, depth: int, tree_id: int) -> None:
value = None
if tree_.n_outputs == 1:
value = tree_.value[node][0]
else:
value = tree_.value[node].T[0]
class_name = np.argmax(value)
if tree_.n_classes[0] != 1 and tree_.n_outputs == 1:
class_name = class_names[class_name]
# pylint: disable=c-extension-no-member
if tree_.feature[node] != _tree.TREE_UNDEFINED:
dim = tree_.feature[node]
threshold = tree_.threshold[node]
numpy_array[tree_id - 1] = dim
numpy_array[tree_id - 1 + DecisionTreeOffset.THRESHOLDS * B] = threshold
recurse_tree(tree_.children_left[node], depth + 1, 2 * tree_id)
recurse_tree(tree_.children_right[node], depth + 1, 2 * tree_id + 1)
else:
_add_leaf(value, class_name, depth, tree_id) # type: ignore[arg-type]
recurse_tree(0, 1, 1)
for i in range(B):
assert numpy_array[DecisionTreeOffset.CLASSES * B + i] != DEFAULT_NEG_VALUE
if numpy_array[i] == DEFAULT_NEG_VALUE:
numpy_array[i] = 0 # adding default dimension TODO: optimize
return numpy_array
def learn_decision_tree(
X: np.ndarray, K: int = 16, depth: int = 4, iterations: int = 25
) -> tuple[np.ndarray, np.ndarray]:
X = X.copy().astype(np.float32)
decision_tree_args = {
"min_samples_split": 2,
"max_depth": depth,
"min_samples_leaf": 20,
"max_leaf_nodes": 2**depth,
"splitter": "best",
# "criterion": "log_loss",
# "class_weight": "balanced",
}
centroids_list = []
assignments_list = []
scores = []
warnings.filterwarnings(
"ignore", category=UserWarning
) # ignores empty cluster warning for kmeans
# pylint: disable=import-outside-toplevel
from timeit import default_timer as timer
for _ in range(iterations):
start = timer()
centroids_, assignments_ = kmeans2(X, K, minit="points", iter=5)
end = timer()
print(f"kmeans time {end - start}")
# kmeans = KMeans(n_clusters=K, n_init=1).fit(X)
# kmeans = BisectingKMeans(n_clusters=K, n_init=1).fit(X)
# centroids_, assignments_ = kmeans.cluster_centers_, kmeans.labels_
clf_ = tree.DecisionTreeClassifier(**decision_tree_args)
start = timer()
score_ = cross_val_score(clf_, X, assignments_, cv=2, n_jobs=2)
end = timer()
print(f"cross_val_score time {end - start}", score_)
centroids_list.append(centroids_)
assignments_list.append(assignments_)
scores.append(np.mean(score_))
best_score = np.argsort(scores)[::-1]
centroids = centroids_list[best_score[0]]
assignments = assignments_list[best_score[0]]
clf = tree.DecisionTreeClassifier(**decision_tree_args)
clf = clf.fit(X, assignments)
# additional Infos
PRINT_DEBUG = False
numpy_array = tree_to_numpy(clf, depth=depth)
prediction = clf.predict(X)
bincount_pred = np.bincount(prediction)
if PRINT_DEBUG:
r = tree.export_text(clf)
print(r)
hist = np.bincount(assignments)
print(hist)
print(bincount_pred)
l2_error = np.mean(np.sqrt((centroids[prediction] - X) ** 2))
l1_error = np.mean((centroids[prediction] - X))
score = cross_val_score(clf, X, assignments, cv=5)
print("L2 error: ", l2_error)
print("L1 error: ", l1_error)
# Rebase
for i in range(bincount_pred.shape[0]):
if bincount_pred[i] > 0:
prediction_where = prediction == i
select_rows = X[prediction_where]
new_centroid = np.mean(select_rows, axis=0)
centroids[i] = new_centroid
if PRINT_DEBUG:
l2_error = np.mean(np.sqrt((centroids[prediction] - X) ** 2))
l1_error = np.mean((centroids[prediction] - X))
score = cross_val_score(clf, X, assignments, cv=5)
scores_2 = clf.score(X, assignments)
print("L2 error after: ", l2_error)
print("L1 error after: ", l1_error)
print("Prediction score: ", scores_2, score)
return centroids, numpy_array
def decision_tree_per_codebook(
c: int, pq_idxs: np.ndarray, X: np.ndarray, K: int, depth: int, C: int, D: int
) -> tuple[np.ndarray, np.ndarray]:
start_idx, end_idx = pq_idxs[c]
idxs = np.arange(start_idx, end_idx)
X_cut = X[:, idxs]
centroids, tree = learn_decision_tree(X_cut, K=K, depth=depth, iterations=5)
for i in range(K):
tree[i] = idxs[int(tree[i])]
centroids_extended = np.zeros((K, D), np.float32)
centroids_extended[:, idxs] = centroids
ram_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print(
f"Learning progress {X.shape}-{C}-{K}: {c + 1}/{C} "
f"({(ram_usage / (1024 * 1024)):.3f} GB)"
)
return tree, centroids_extended
def init_and_learn_hash_function_decision_tree(
X: np.ndarray,
C: int,
pq_perm_algo: Literal["start", "end"] = "start",
K: int = 16,
depth: int = 4,
) -> tuple[np.ndarray, np.ndarray]:
D = X.shape[1]
depth = int(np.ceil(np.log2(K)))
B = 2**depth
X = X.astype(np.float32)
all_prototypes = np.zeros((C, K, D), dtype=np.float32)
pq_idxs = create_codebook_start_end_idxs(X.shape[1], C, algo=pq_perm_algo)
decision_trees = np.zeros((C, B * 3), dtype=np.float32)
num_cores = np.min((4, multiprocessing.cpu_count()))
results = Parallel(n_jobs=num_cores, max_nbytes=None)(
delayed(decision_tree_per_codebook)(i, pq_idxs, X, K, depth, C, D)
for i in range(C)
)
# results = []
# for i in range(C):
# results.append(decision_tree_per_codebook(i, pq_idxs, X, K, depth, C, D))
for c in range(C):
decision_trees[c] = results[c][0]
all_prototypes[c] = results[c][1]
return decision_trees, all_prototypes
def learn_proto_and_hash_function_decision_tree(
X: np.ndarray,
C: int,
K: int = 16,
# pylint: disable=unused-argument
lut_work_const: int = -1, # same interface as other learning functions
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
D = X.shape[1]
used_perm_algo: Literal["start", "end"] = "start" # or end
X_orig = X.astype(np.float32)
# X_error = X_orig - centroid shape: [N, D]
decision_trees, all_prototypes = init_and_learn_hash_function_decision_tree(
X, C, K=K, pq_perm_algo=used_perm_algo
)
A_enc = halut_encode_decision_tree(X, decision_trees)
offsets = np.arange(C, dtype=np.int32) * K
prototypes_reshape = all_prototypes.reshape((-1, all_prototypes.shape[2]))
A_enc_offset = A_enc + offsets
offset = prototypes_reshape[A_enc_offset]
offset = np.sum(offset, axis=1)
X_error = X_orig - offset
msv_orig = (X_orig * X_orig).mean()
mse_error = (X_error * X_error).mean()
# mse_error_pq = (X_error_pq * X_error_pq).mean()
print(
"X_error mse / X mean squared value: ",
mse_error / msv_orig,
mse_error,
msv_orig,
np.mean(X_orig),
)
squared_diff = np.square(X_orig - X_error).mean()
print("Error to Original squared diff", squared_diff)
# optimize prototypes discriminatively conditioned on assignments
# applying g(A) [N, C] with values from 0-K (50000, 16)
# optimizing prototypes
W = encoded_lstsq(A_enc=A_enc, Y=X_error, K=K)
all_prototypes_delta = W.reshape(C, K, D)
all_prototypes += all_prototypes_delta
# check how much improvement we got
X_error -= _XW_encoded(A_enc, W, K=K) # if we fit to X_error
mse_res = (X_error * X_error).mean()
print("X_error mse / X mse after lstsq: ", mse_res / msv_orig)
ram_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print(
f"After Ridge regression {X.shape}-{C}-{K}"
f"({(ram_usage / (1024 * 1024)):.3f} GB)"
)
report_array = np.array(
[
mse_error,
msv_orig,
mse_error / msv_orig,
np.mean(X_orig),
mse_res,
mse_res / msv_orig,
ram_usage / (1024 * 1024),
]
)
return decision_trees, all_prototypes, report_array
def centroids_per_codebook(
c: int, pq_idxs: np.ndarray, X: np.ndarray, K: int, C: int, D: int
) -> np.ndarray:
start_idx, end_idx = pq_idxs[c]
idxs = np.arange(start_idx, end_idx)
X_cut = X[:, idxs]
centroids, _ = kmeans2(X_cut, K, minit="points", iter=25)
centroids_extended = np.zeros((K, D), np.float32)
centroids_extended[:, idxs] = centroids
ram_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print(
f"Learning progress {X.shape}-{C}-{K}: {c + 1}/{C} "
f"({(ram_usage / (1024 * 1024)):.3f} GB)"
)
return centroids_extended
def init_and_learn_hash_function_full_pq(
X: np.ndarray,
C: int,
pq_perm_algo: Literal["start", "end"] = "start",
K: int = 16,
) -> np.ndarray:
D = X.shape[1]
X = X.astype(np.float32)
all_prototypes = np.zeros((C, K, D), dtype=np.float32)
pq_idxs = create_codebook_start_end_idxs(X.shape[1], C, algo=pq_perm_algo)
num_cores = np.min((2, multiprocessing.cpu_count()))
print("NUM cores", num_cores)
results = Parallel(n_jobs=num_cores, max_nbytes=None)(
delayed(centroids_per_codebook)(i, pq_idxs, X, K, C, D) for i in range(C)
)
# results = []
# for i in range(C):
# results.append(centroids_per_codebook(i, pq_idxs, X, K, C, D))
for c in range(C):
all_prototypes[c] = results[c]
return all_prototypes
def learn_proto_and_hash_function_full_pq(
X: np.ndarray,
C: int,
K: int = 16,
# pylint: disable=unused-argument
lut_work_const: int = -1, # same interface as other learning functions
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
D = X.shape[1]
used_perm_algo: Literal["start", "end"] = "start" # or end
X_orig = X.astype(np.float32)
all_prototypes = init_and_learn_hash_function_full_pq(
X, C, K=K, pq_perm_algo=used_perm_algo
)
prototypes_reshape = all_prototypes.reshape((-1, all_prototypes.shape[2]))
A_enc = halut_encode_pq(X, all_prototypes)
offsets = np.arange(C, dtype=np.int32) * K
A_enc_offset_pq = A_enc + offsets
offset = prototypes_reshape[A_enc_offset_pq]
offset = np.sum(offset, axis=1)
X_error = X_orig - offset
msv_orig = (X_orig * X_orig).mean()
mse_error = (X_error * X_error).mean()
print(
"X_error mse / X mean squared value: ",
mse_error / msv_orig,
mse_error,
msv_orig,
np.mean(X_orig),
)
squared_diff = np.square(X_orig - X_error).mean()
print("Error to Original squared diff", squared_diff)
# optimizing prototypes
W = encoded_lstsq(A_enc=A_enc, Y=X_error, K=K)
all_prototypes_delta = W.reshape(C, K, D)
all_prototypes += all_prototypes_delta
# check how much improvement we got
X_error -= _XW_encoded(A_enc, W, K=K) # if we fit to X_error
mse_res = (X_error * X_error).mean()
print("X_error mse / X mse after lstsq: ", mse_res / msv_orig)
ram_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print(
f"After Ridge regression {X.shape}-{C}-{K}"
f"({(ram_usage / (1024 * 1024)):.3f} GB)"
)
report_array = np.array(
[
mse_error,
msv_orig,
mse_error / msv_orig,
np.mean(X_orig),
mse_res,
mse_res / msv_orig,
ram_usage / (1024 * 1024),
]
)
return np.ndarray([]), all_prototypes, report_array
```
#### File: python/halutmatmul/halutmatmul.py
```python
from __future__ import annotations
from functools import reduce
from typing import Any, Dict
import numpy as np
import numba
from halutmatmul.decision_tree_and_pq import (
halut_encode_decision_tree,
halut_encode_pq,
learn_proto_and_hash_function_decision_tree,
learn_proto_and_hash_function_full_pq,
)
from halutmatmul.functions import (
get_str_hash_buckets,
halut_encode_opt,
read_luts_opt,
read_luts_quantized_opt,
)
from halutmatmul.maddness_legacy import (
learn_proto_and_hash_function,
maddness_lut,
maddness_quantize_luts,
)
class HalutOfflineStorage:
HASH_TABLES = 0
LUT = 1
CONFIG = 2
PROTOTYPES = 3
MAX = 4
class HalutConfig:
LUT_OFFSET = 0
LUT_SCALE = 1
RUN_OPTIMIZED = 2
QUANTIZE_LUT = 3
UPCAST_EVERY = 4
ENCODING_ALGORITHM = 5
MAX = 6
class ProtoHashReport:
MSE_ERROR = 0
MSV_ORIG = 1
MSE_ERROR_DIV_MSV_ORIG = 2
MEAN_X = 3
MSE_RES = 4
MSE_RES_DIV_MSV_ORIG = 5
RAM_USAGE = 6
class EncodingAlgorithm:
FOUR_DIM_HASH = 0
DECISION_TREE = 1
FULL_PQ = 2
class HalutModuleConfig:
C = 0
ROWS = 1
K = 2
ENCODING_ALGORITHM = 3
MAX = 4
def learn_halut_offline_report(
A: np.ndarray,
B: np.ndarray,
C: int = 16,
K: int = 16,
lut_work_const: int = -1,
quantize_lut: bool = False,
run_optimized: bool = True,
encoding_algorithm: int = EncodingAlgorithm.FOUR_DIM_HASH,
) -> tuple[np.ndarray, Dict[str, Any]]:
mn = HalutMatmul(
C,
K=K,
lut_work_const=lut_work_const,
quantize_lut=quantize_lut,
run_optimized=run_optimized,
encoding_algorithm=encoding_algorithm,
)
mn.learn_offline(A, B)
# print(mn.get_params())
# print(mn.get_stats())
return mn.to_numpy(), mn.get_stats()
def learn_halut_offline(
A: np.ndarray,
B: np.ndarray,
C: int = 16,
K: int = 16,
lut_work_const: int = -1,
quantize_lut: bool = False,
run_optimized: bool = True,
encoding_algorithm: int = EncodingAlgorithm.FOUR_DIM_HASH,
) -> np.ndarray:
mn = HalutMatmul(
C,
K=K,
lut_work_const=lut_work_const,
quantize_lut=quantize_lut,
run_optimized=run_optimized,
encoding_algorithm=encoding_algorithm,
)
mn.learn_offline(A, B)
return mn.to_numpy()
ENCODING_FUNCTIONS = [
halut_encode_opt, # FOUR_DIM_HASH
halut_encode_decision_tree, # DECISION_TREE
halut_encode_pq, # FULL_PQ
]
LEARNING_FUNCTIONS = [
learn_proto_and_hash_function, # FOUR_DIM_HASH
learn_proto_and_hash_function_decision_tree, # DECISION_TREE
learn_proto_and_hash_function_full_pq, # FULL_PQ
]
class HalutMatmul:
def __init__(
self,
C: int = 16,
K: int = 16,
lut_work_const: int = -1,
quantize_lut: bool = False,
run_optimized: bool = True,
encoding_algorithm: int = EncodingAlgorithm.FOUR_DIM_HASH,
) -> None:
self.C = C
self.K = K
self.encoding_algorithm = encoding_algorithm
self.prototypes: np.ndarray = np.array([])
self.luts: np.ndarray = np.array([])
self.optimized = run_optimized
self.encoding_function = ENCODING_FUNCTIONS[self.encoding_algorithm]
self.learning_function = LEARNING_FUNCTIONS[self.encoding_algorithm]
self.lut_work_const = lut_work_const
self.A_enc: np.ndarray = np.array([])
# EncodingAlgorithm.FOUR_DIM_HASH
self.splits_lists: np.ndarray = np.array([])
# EncodingAlgorithm.DECISION_TREE
self.decision_trees: np.ndarray = np.array([])
self.quantize_lut = quantize_lut
self.upcast_every = 16
self.upcast_every = min(self.C, self.upcast_every)
self.offset: float = 0.0
self.scale: float = 1.0
# important otherwise wrong summation
assert self.upcast_every in (1, 2, 4, 8, 16, 32, 64, 128, 256)
self.accumulate_how = "mean" # sum
self.stats_dict: Dict[str, Any] = dict([])
def __repr__(self) -> str:
return f"<HalutMatmul {self.get_params()}>"
def __str__(self) -> str:
return self.get_params()
def get_stats(self) -> Dict[str, Any]:
return self.stats_dict
def get_params(self) -> str:
params = "=============== \nHalutmatmul parameters\n"
params += f"C: {self.C}, K: {self.K}, lut_work_const: {self.lut_work_const} \n"
params += f"is_learned: {self.is_learned()} \n"
hash_bucket_strings = ""
if self.splits_lists is not None:
if self.prototypes.size > 0:
D = self.prototypes.shape[2]
i = 0
for c in self.splits_lists:
if self.prototypes.size > 0:
hash_bucket_strings += (
f"Bucket {i} dims: "
f"{int(i * D / self.C)} - {int((i + 1) * D / self.C - 1)} \n"
)
hash_bucket_strings += get_str_hash_buckets(c) + "\n"
i += 1
params += (
f"split_lists: {len(self.splits_lists)}, "
f"hash_buckets for prototypes: \n"
f"{hash_bucket_strings} \n"
)
if self.prototypes.size > 0:
params += (
f"prototypes: {self.prototypes.shape}, " f"{self.prototypes.dtype} \n"
)
params += (
f"luts: {self.luts.shape}, "
f"{self.luts.dtype if self.luts is not None else ''} \n"
)
params += f"lut_offset: {self.offset}, lut_scale: {self.scale} \n"
params += (
f"quantize_lut: {self.quantize_lut}, upcast_every: {self.upcast_every} \n"
)
params += "===============\n"
return params
def is_learned(self) -> bool:
return (
self.splits_lists is not None
# and self.prototypes is not None
and self.luts is not None
and self.offset is not None
and self.scale is not None
)
def learn_hash_buckets_and_prototypes(self, A: np.ndarray) -> None:
D = A.shape[1]
if D < self.C:
raise Exception("D < C: {} < {}".format(D, self.C))
(
return_split_list_or_decison_trees,
self.prototypes,
report_array,
) = self.learning_function(
A, self.C, self.K, lut_work_const=self.lut_work_const
) # type: ignore[operator]
if self.encoding_algorithm == EncodingAlgorithm.FOUR_DIM_HASH:
self.splits_lists = return_split_list_or_decison_trees
elif self.encoding_algorithm in [
EncodingAlgorithm.DECISION_TREE,
EncodingAlgorithm.FULL_PQ,
]:
self.decision_trees = return_split_list_or_decison_trees
self.stats_dict["MSE_ERROR"] = report_array[ProtoHashReport.MSE_ERROR]
self.stats_dict["MSV_ORIG"] = report_array[ProtoHashReport.MSV_ORIG]
self.stats_dict["MSE_ERROR_DIV_MSV_ORIG"] = report_array[
ProtoHashReport.MSE_ERROR_DIV_MSV_ORIG
]
self.stats_dict["MSE_RES"] = report_array[ProtoHashReport.MSE_RES]
self.stats_dict["MEAN_X"] = report_array[ProtoHashReport.MEAN_X]
self.stats_dict["MSE_RES_DIV_MSV_ORIG"] = report_array[
ProtoHashReport.MSE_RES_DIV_MSV_ORIG
]
self.stats_dict["RAM_USAGE"] = report_array[ProtoHashReport.RAM_USAGE]
def _check_if_learned(self) -> None:
if not self.is_learned():
raise Exception("Halut online tried but not learned!")
def to_numpy(self) -> np.ndarray:
self._check_if_learned()
if self.encoding_algorithm == EncodingAlgorithm.FOUR_DIM_HASH:
splits = self.splits_lists
elif self.encoding_algorithm in [
EncodingAlgorithm.DECISION_TREE,
EncodingAlgorithm.FULL_PQ,
]:
splits = self.decision_trees.astype(np.float32)
store_array = np.array(
[
splits.astype(np.float32),
self.luts.astype(np.float32),
np.array(
[
self.offset,
self.scale,
self.optimized,
self.quantize_lut,
self.upcast_every,
self.encoding_algorithm,
],
dtype=np.float32,
),
self.prototypes.astype(np.float32),
],
dtype=object,
)
return store_array
def from_numpy(self, numpy_array: np.ndarray) -> HalutMatmul:
config = numpy_array[HalutOfflineStorage.CONFIG]
self.encoding_algorithm = int(config[HalutConfig.ENCODING_ALGORITHM])
self.encoding_function = ENCODING_FUNCTIONS[self.encoding_algorithm]
self.learning_function = LEARNING_FUNCTIONS[self.encoding_algorithm]
if self.encoding_algorithm == EncodingAlgorithm.FOUR_DIM_HASH:
splits_numpy = numpy_array[HalutOfflineStorage.HASH_TABLES]
self.splits_lists = splits_numpy
elif self.encoding_algorithm in [
EncodingAlgorithm.DECISION_TREE,
EncodingAlgorithm.FULL_PQ,
]:
self.decision_trees = numpy_array[HalutOfflineStorage.HASH_TABLES]
self.luts = numpy_array[HalutOfflineStorage.LUT]
self.offset = config[HalutConfig.LUT_OFFSET]
self.scale = config[HalutConfig.LUT_SCALE]
upcast_every = int(config[HalutConfig.UPCAST_EVERY])
self.optimized = bool(config[HalutConfig.RUN_OPTIMIZED])
self.quantize_lut = bool(config[HalutConfig.QUANTIZE_LUT])
if self.encoding_algorithm == EncodingAlgorithm.FULL_PQ:
self.prototypes = numpy_array[HalutOfflineStorage.PROTOTYPES]
# assert self.splits_lists and self.luts.shape[1]
_, C, K = self.luts.shape
self.C = C
self.K = K
self.upcast_every = min(self.C, upcast_every)
assert self.upcast_every in (1, 2, 4, 8, 16, 32, 64, 128, 256)
return self
# redefinition for convenience public function
def learn_A(self, A: np.ndarray) -> None:
self.learn_hash_buckets_and_prototypes(A)
def learn_offline(self, A: np.ndarray, B: np.ndarray) -> None:
self.learn_hash_buckets_and_prototypes(A)
self._set_B(B)
self._check_if_learned()
def apply_matmul_e2e(
self, A: np.ndarray, B: np.ndarray, A_learn: np.ndarray = None
) -> np.ndarray:
if A_learn is None:
self.learn_hash_buckets_and_prototypes(A)
else:
self.learn_hash_buckets_and_prototypes(A_learn)
self._set_A(A)
self._set_B(B)
return self._calc_matmul(
self.A_enc,
self.luts,
offset=self.offset,
scale=self.scale,
)
def encode(self, A: np.ndarray) -> np.ndarray:
idxs = np.zeros((A.shape[0], self.C), np.int32)
if self.encoding_algorithm == EncodingAlgorithm.FOUR_DIM_HASH:
idxs = halut_encode_opt(A, self.splits_lists)
elif self.encoding_algorithm == EncodingAlgorithm.DECISION_TREE:
idxs = halut_encode_decision_tree(A, self.decision_trees)
elif self.encoding_algorithm == EncodingAlgorithm.FULL_PQ:
idxs = halut_encode_pq(A, self.prototypes)
# offsets = [ 0 16 32 48 64 80 96 112 128 144 160 176 192 208 224 240]
offsets = np.arange(self.C, dtype=np.int32) * self.K
return idxs + offsets
def _set_A(self, A: np.ndarray) -> None:
self.A_enc = self.encode(A)
def _set_B(self, B: np.ndarray) -> None:
self.luts, self.offset, self.scale = self._create_lut(B.T)
def _create_lut(self, B: np.ndarray) -> tuple[np.ndarray, float, float]:
B = np.atleast_2d(B)
luts = np.zeros((B.shape[0], self.C, self.K))
print("SHAPES", self.prototypes.shape, luts.shape, B.shape)
for i, q in enumerate(B):
luts[i] = maddness_lut(q, self.prototypes)
if self.quantize_lut:
luts, offset, scale = maddness_quantize_luts(luts)
return luts, offset, scale
return luts, 0, 1
def _calc_matmul(
self,
A_enc: np.ndarray,
B_luts: np.ndarray,
offset: float,
scale: float,
) -> np.ndarray:
A_enc = np.ascontiguousarray(A_enc)
total_result = np.empty((len(B_luts), len(A_enc)), dtype=np.float32)
A_raveled = A_enc.ravel()
if self.optimized:
if self.upcast_every < 2 or not self.quantize_lut:
total_result = read_luts_opt(
A_raveled, A_enc.shape, B_luts, total_result
)
else:
total_result = read_luts_quantized_opt(
A_raveled,
A_enc.shape,
B_luts,
total_result,
self.upcast_every,
self.C,
scale,
offset,
)
else:
for i, lut in enumerate(B_luts):
read_lut = lut.ravel()[A_raveled].reshape(A_enc.shape)
if self.upcast_every < 2 or not self.quantize_lut:
read_lut = read_lut.sum(axis=-1)
else:
# TODO: there is probably room for improvement here
read_lut = read_lut.reshape(
read_lut.shape[0], -1, self.upcast_every
)
if self.accumulate_how == "sum":
# sum upcast_every vals, then clip to mirror saturating
# unsigned addition, then sum without saturation (like u16)
read_lut = read_lut.sum(2)
read_lut = np.clip(read_lut, 0, 255).sum(axis=-1)
elif self.accumulate_how == "mean":
# mirror hierarchical avg_epu8
while read_lut.shape[-1] > 2:
read_lut = (
read_lut[:, :, ::2] + read_lut[:, :, 1::2] + 1
) // 2
read_lut = (read_lut[:, :, 0] + read_lut[:, :, 1] + 1) // 2
read_lut = read_lut.sum(axis=-1) # clipping not needed
# undo biasing; if low bits are {0,0} or {1,1}, no bias
# from the averaging; but if {0,1}, then rounds up by
# .5; happens with prob ~=~ .5, so each avg op adds .25;
# the other tricky thing here is that rounding up when
# you're averaging averages biases it even farther
read_lut *= self.upcast_every # convert mean to sum
# I honestly don't know why this is the formula, but wow
# does it work well
bias = self.C / 4 * np.log2(self.upcast_every)
read_lut -= int(bias)
else:
raise ValueError("accumulate_how must be 'sum' or 'mean'")
if self.quantize_lut:
read_lut = (read_lut / scale) + offset
total_result[i] = read_lut
return total_result.T
def matmul_online(self, A: np.ndarray) -> np.ndarray:
self._check_if_learned()
numba.set_num_threads(min(32, numba.get_num_threads()))
self._set_A(A)
return self._calc_matmul(
self.A_enc, self.luts, offset=self.offset, scale=self.scale
)
def stats(self) -> str:
if self.is_learned():
ret_str = f"Shape LUT: {self.luts.shape}, "
ret_str += f"elements: {reduce(lambda x, y: x * y, self.luts.shape)} \n"
ret_str += f"Actual storage LUT: {self.luts.nbytes / 1024} KB ({self.luts.dtype}) \n"
if self.encoding_algorithm == EncodingAlgorithm.FOUR_DIM_HASH:
numpy_array = self.splits_lists
ret_str += f"Shaple splits_list: {numpy_array.shape}, "
ret_str += (
f"elements: {reduce(lambda x, y: x * y, numpy_array.shape)} \n"
)
ret_str += (
f"Actual storage splits_list: {numpy_array.nbytes / 1024} KB "
f"({numpy_array.dtype}) \n"
)
elif self.encoding_algorithm in [
EncodingAlgorithm.FOUR_DIM_HASH,
EncodingAlgorithm.FULL_PQ,
]:
pass # TODO: add print function here
return ret_str
else:
return "not learned"
```
#### File: models/dscnn/main.py
```python
from copy import deepcopy
import time
import torch
import nemo
from torchinfo import summary
from models.dscnn.dataset import AudioProcessor
from models.dscnn.model import DSCNN
from models.dscnn.utils import remove_txt, parameter_generation
# from pthflops import count_ops
from models.dscnn.train import Train
def run_kws_main() -> None:
# Device setup
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
print(torch.version.__version__)
print(device)
# Parameter generation
(
training_parameters,
data_processing_parameters,
) = parameter_generation() # To be parametrized
# Dataset generation
audio_processor = AudioProcessor(training_parameters, data_processing_parameters)
train_size = audio_processor.get_size("training")
valid_size = audio_processor.get_size("validation")
test_size = audio_processor.get_size("testing")
print(
"Dataset split (Train/valid/test): "
+ str(train_size)
+ "/"
+ str(valid_size)
+ "/"
+ str(test_size)
)
# Model generation and analysis
model = DSCNN(use_bias=True)
model.to(device)
summary(
model,
input_size=(1, 1, 49, data_processing_parameters["feature_bin_count"]),
verbose=2,
col_width=16,
col_names=["kernel_size", "output_size", "num_params", "mult_adds"],
device=device,
row_settings=["var_names"],
)
# Training initialization
trainining_environment = Train(audio_processor, training_parameters, model, device)
# Removing stored inputs and activations
remove_txt()
start = time.clock_gettime(0)
# trainining_environment.train(model)
print(
"Finished Training on GPU in {:.2f} seconds".format(
time.clock_gettime(0) - start
)
)
# Ignoring training, load pretrained model
model.load_state_dict(torch.load("./model.pth", map_location=torch.device("cuda")))
# model_int8 = torch.quantization.quantize_dynamic(
# model, # the original model
# {torch.nn.Linear, torch.nn.Conv2d}, # a set of layers to dynamically quantize
# dtype=torch.qint8,
# )
trainining_environment.validate(model)
# acc_int8 = trainining_environment.validate(model_int8)
# print("accuracies", acc_fp32, acc_int8)
# Accuracy on the training set.
# # print ("Training acc")
# acc = trainining_environment.validate(model, mode='training', batch_size=-1, statistics=False)
# # Accuracy on the validation set.
# print ("Validation acc")
# acc = trainining_environment.validate(
# model, mode='validation', batch_size=-1, statistics=False)
# # Accuracy on the testing set.
# print ("Testing acc")
# acc = trainining_environment.validate(model, mode='testing', batch_size=-1, statistics=False)
# Initiating quantization process: making the model quantization aware
# quantized_model = nemo.transform.quantize_pact(
# deepcopy(model), dummy_input=torch.randn((1, 1, 49, 10)).to(device)
# )
# precision_8 = {
# "conv1": {"W_bits": 7},
# "relu1": {"x_bits": 8},
# "conv2": {"W_bits": 7},
# "relu2": {"x_bits": 8},
# "conv3": {"W_bits": 7},
# "relu3": {"x_bits": 8},
# "conv4": {"W_bits": 7},
# "relu4": {"x_bits": 8},
# "conv5": {"W_bits": 7},
# "relu5": {"x_bits": 8},
# "conv6": {"W_bits": 7},
# "relu6": {"x_bits": 8},
# "conv7": {"W_bits": 7},
# "relu7": {"x_bits": 8},
# "conv8": {"W_bits": 7},
# "relu8": {"x_bits": 8},
# "conv9": {"W_bits": 7},
# "relu9": {"x_bits": 8},
# "fc1": {"W_bits": 7},
# }
# quantized_model.change_precision(
# bits=1, min_prec_dict=precision_8, scale_weights=True, scale_activations=True
# )
# Calibrating model's scaling by collecting largest activations
# with quantized_model.statistics_act():
# trainining_environment.validate(
# model=quantized_model, mode="validation", batch_size=128
# )
# quantized_model.reset_alpha_act()
# Remove biases after FQ stage
# quantized_model.remove_bias()
# print("\nFakeQuantized @ 8b accuracy (calibrated):")
# acc = trainining_environment.validate(
# model=quantized_model, mode="testing", batch_size=-1
# )
# quantized_model.qd_stage(eps_in=255.0 / 255) # The activations are already in 0-255
# print("\nQuantizedDeployable @ mixed-precision accuracy:")
# acc = trainining_environment.validate(
# model=quantized_model, mode="testing", batch_size=-1
# )
# quantized_model.id_stage()
# print("\nIntegerDeployable @ mixed-precision accuracy:")
# acc = trainining_environment.validate(
# model=quantized_model, mode="testing", batch_size=-1, integer=True
# )
# Saving the model
# nemo.utils.export_onnx("model.onnx", quantized_model, quantized_model, (1, 49, 10))
# Saving the activations for comparison within Dory
# acc = trainining_environment.validate(
# model=quantized_model, mode="testing", batch_size=1, integer=True, save=True
# )
```
#### File: models/dscnn/model.py
```python
import torch
from torch import nn
from models.dscnn.utils import npy_to_txt
class DSCNN(torch.nn.Module):
def __init__(self, use_bias: bool=False) -> None:
super().__init__()
self.pad1 = nn.ConstantPad2d((1, 1, 5, 5), value=0.0)
self.conv1 = torch.nn.Conv2d(
in_channels=1,
out_channels=64,
kernel_size=(10, 4),
stride=(2, 2),
bias=use_bias,
)
self.bn1 = torch.nn.BatchNorm2d(64)
self.relu1 = torch.nn.ReLU()
self.pad2 = nn.ConstantPad2d((1, 1, 1, 1), value=0.0)
self.conv2 = torch.nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
groups=64,
bias=use_bias,
)
self.bn2 = torch.nn.BatchNorm2d(64)
self.relu2 = torch.nn.ReLU()
self.conv3 = torch.nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=(1, 1),
stride=(1, 1),
bias=use_bias,
)
self.bn3 = torch.nn.BatchNorm2d(64)
self.relu3 = torch.nn.ReLU()
self.pad4 = nn.ConstantPad2d((1, 1, 1, 1), value=0.0)
self.conv4 = torch.nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
groups=64,
bias=use_bias,
)
self.bn4 = torch.nn.BatchNorm2d(64)
self.relu4 = torch.nn.ReLU()
self.conv5 = torch.nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=(1, 1),
stride=(1, 1),
bias=use_bias,
)
self.bn5 = torch.nn.BatchNorm2d(64)
self.relu5 = torch.nn.ReLU()
self.pad6 = nn.ConstantPad2d((1, 1, 1, 1), value=0.0)
self.conv6 = torch.nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
groups=64,
bias=use_bias,
)
self.bn6 = torch.nn.BatchNorm2d(64)
self.relu6 = torch.nn.ReLU()
self.conv7 = torch.nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=(1, 1),
stride=(1, 1),
bias=use_bias,
)
self.bn7 = torch.nn.BatchNorm2d(64)
self.relu7 = torch.nn.ReLU()
self.pad8 = nn.ConstantPad2d((1, 1, 1, 1), value=0.0)
self.conv8 = torch.nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
groups=64,
bias=use_bias,
)
self.bn8 = torch.nn.BatchNorm2d(64)
self.relu8 = torch.nn.ReLU()
self.conv9 = torch.nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=(1, 1),
stride=(1, 1),
bias=use_bias,
)
self.bn9 = torch.nn.BatchNorm2d(64)
self.relu9 = torch.nn.ReLU()
self.avg = torch.nn.AvgPool2d(kernel_size=(25, 5), stride=1)
self.fc1 = torch.nn.Linear(64, 12, bias=use_bias)
# self.soft = torch.nn.Softmax(dim=1)
# self.soft = F.log_softmax(x, dim=1)
# CONV2D replacing Block1 for evaluation purposes
# self.pad2 = nn.ConstantPad2d((1, 1, 1, 1), value=0.)
# self.conv2 = torch.nn.Conv2d(in_channels = 64,
# out_channels = 64, kernel_size = (3, 3), stride = (1, 1), groups = 1, bias = use_bias)
# self.bn2 = torch.nn.BatchNorm2d(64)
# self.relu2 = torch.nn.ReLU()
def forward(self, x: torch.Tensor, save: bool=False) -> torch.Tensor:
if save:
x = self.pad1(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
npy_to_txt(0, x.int().cpu().detach().numpy())
print("Sum: ", str(torch.sum(x.int())))
x = self.pad2(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
npy_to_txt(1, x.int().cpu().detach().numpy())
print("Sum: ", str(torch.sum(x.int())))
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
npy_to_txt(2, x.int().cpu().detach().numpy())
print("Sum: ", str(torch.sum(x.int())))
x = self.pad4(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu4(x)
npy_to_txt(3, x.int().cpu().detach().numpy())
print("Sum: ", str(torch.sum(x.int())))
x = self.conv5(x)
x = self.bn5(x)
x = self.relu5(x)
npy_to_txt(4, x.int().cpu().detach().numpy())
print("Sum: ", str(torch.sum(x.int())))
x = self.pad6(x)
x = self.conv6(x)
x = self.bn6(x)
x = self.relu6(x)
npy_to_txt(5, x.int().cpu().detach().numpy())
print("Sum: ", str(torch.sum(x.int())))
x = self.conv7(x)
x = self.bn7(x)
x = self.relu7(x)
npy_to_txt(6, x.int().cpu().detach().numpy())
print("Sum: ", str(torch.sum(x.int())))
x = self.pad8(x)
x = self.conv8(x)
x = self.bn8(x)
x = self.relu8(x)
npy_to_txt(7, x.int().cpu().detach().numpy())
print("Sum: ", str(torch.sum(x.int())))
x = self.conv9(x)
x = self.bn9(x)
x = self.relu9(x)
npy_to_txt(8, x.int().cpu().detach().numpy())
print("Sum: ", str(torch.sum(x.int())))
x = self.avg(x)
npy_to_txt(9, x.int().cpu().detach().numpy())
x = torch.flatten(x, 1)
x = self.fc1(x)
npy_to_txt(10, x.int().cpu().detach().numpy())
else:
x = self.pad1(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.pad2(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.pad4(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu4(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu5(x)
x = self.pad6(x)
x = self.conv6(x)
x = self.bn6(x)
x = self.relu6(x)
x = self.conv7(x)
x = self.bn7(x)
x = self.relu7(x)
x = self.pad8(x)
x = self.conv8(x)
x = self.bn8(x)
x = self.relu8(x)
x = self.conv9(x)
x = self.bn9(x)
x = self.relu9(x)
x = self.avg(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
return x # To be compatible with Dory
# return F.log_softmax(x, dim=1)
# return F.softmax(x, dim=1)
```
#### File: models/dscnn/train.py
```python
from typing import Any
import torch
import torch.nn.functional as F
from models.dscnn.dataset import AudioProcessor, AudioGenerator
from models.dscnn.utils import conf_matrix, npy_to_txt
class Train:
def __init__(
self,
audio_processor: AudioProcessor,
training_parameters: dict[str, Any],
model: torch.nn.Module,
device: torch.device,
) -> None:
self.audio_processor = audio_processor
self.training_parameters = training_parameters
self.model = model
self.device = device
# Training hyperparameters
self.criterion = torch.nn.CrossEntropyLoss()
intitial_lr = 0.001
self.optimizer = torch.optim.Adam(model.parameters(), lr=intitial_lr)
# pylint: disable=unnecessary-lambda-assignment
lambda_lr = (
lambda epoch: 1
if epoch < 15
else 1 / 5
if epoch < 25
else 1 / 10
if epoch < 35
else 1 / 20
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer, lambda_lr)
def validate(
self,
model: torch.nn.Module,
mode: str = "validation",
batch_size: int = -1,
statistics: bool = False,
integer: bool = False,
save: bool = False,
) -> float:
# Validate model
training_parameters = self.training_parameters
training_parameters["batch_size"] = batch_size
data = AudioGenerator(mode, self.audio_processor, training_parameters)
model.eval()
correct = 0
total = 0
with torch.no_grad():
inputs_, labels_ = data[0]
inputs = torch.Tensor(inputs_[:, None, :, :]).to(self.device)
labels = torch.Tensor(labels_).long().to(self.device)
model = model.to(self.device)
if integer:
model = model.cpu()
inputs = inputs * 255.0 / 255
inputs = inputs.type(torch.uint8).type(torch.float).cpu()
if save:
model = model.cpu()
inputs = inputs.type(torch.uint8).type(torch.float).cpu()
outputs = F.softmax(model(inputs, save), dim=1)
outputs = outputs.to(self.device)
npy_to_txt(-1, inputs.int().cpu().detach().numpy())
else:
outputs = F.softmax(model(inputs), dim=1)
outputs = outputs.to(self.device)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
if statistics is True:
conf_matrix(labels, predicted, self.training_parameters)
print(
"Accuracy of the network on the %s set: %.2f %%"
% (mode, 100 * correct / total)
)
return 100 * correct / total
def train(self, model: torch.nn.Module) -> None:
# Train model
best_acc = 0.0
for epoch in range(0, self.training_parameters["epochs"]):
print(
"Epoch: "
+ str(epoch + 1)
+ "/"
+ str(self.training_parameters["epochs"])
)
data = AudioGenerator(
"training", self.audio_processor, self.training_parameters
)
model.train()
self.scheduler.step()
running_loss = 0.0
total = 0
correct = 0
for minibatch in range(len(data)):
inputs_, labels_ = data[0]
inputs = torch.Tensor(inputs_[:, None, :, :]).to(self.device)
labels = torch.Tensor(labels_).to(self.device).long()
# Zero out the parameter gradients after each mini-batch
self.optimizer.zero_grad()
# Train, compute loss, update optimizer
model = model.to(self.device)
outputs = F.softmax(model(inputs), dim=1)
loss = self.criterion(outputs, labels)
loss.backward()
self.optimizer.step()
# Compute training statistics
running_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
# Print information every 20 minibatches
if minibatch % 20 == 0:
print(
"[%3d / %3d] loss: %.3f accuracy: %.3f"
% (
minibatch + 1,
len(data),
running_loss / 10,
100 * correct / total,
)
)
running_loss = 0.0
tmp_acc = self.validate(model, "validation", 128)
# Save best performing network
if tmp_acc > best_acc:
best_acc = tmp_acc
PATH = "./model_acc_" + str(best_acc) + ".pth"
torch.save(model.state_dict(), PATH)
PATH = "./model.pth"
torch.save(model.state_dict(), PATH)
```
#### File: python/test/test_halut.py
```python
import functools
import timeit
from test.utils.utils import check_if_error_normal_dist_around_zero, error_hist_numpy
import numpy as np
import pytest
import halutmatmul.halutmatmul as hm
def helper_halut(
N: int = 128,
D: int = 64,
M: int = 16,
C: int = 16,
lut_work_const: int = -1,
a: float = 1.0,
b: float = 0.0,
K: int = 16,
quantize_lut: bool = False,
run_optimized: bool = True,
encoding_algorithm: int = hm.EncodingAlgorithm.FOUR_DIM_HASH,
) -> None:
print("=====TEST=====")
print(
f"params: ({N}, {D}, {M}), C: {C}, a: {a}, b: {b}, quantize_lut: {quantize_lut}, "
f"run_optimized: {run_optimized}, K: {K}, encoding_algorithm: {encoding_algorithm}"
)
A = (np.random.random((N, D)) + b) * a
B = (np.random.random((D, M)) + b) * a
store_array = hm.learn_halut_offline(
A,
B,
C=C,
K=K,
lut_work_const=lut_work_const,
quantize_lut=quantize_lut,
run_optimized=run_optimized,
encoding_algorithm=encoding_algorithm,
)
new_halut = hm.HalutMatmul()
new_halut.from_numpy(store_array)
# time_learning = (
# timeit.Timer(functools.partial(hm.learn_halut_offline, *[A, B, C])).timeit(5)
# * 1000
# / 5
# )
# import cProfile
# from pstats import SortKey
# with cProfile.Profile() as pr:
# hm.learn_halut_offline(A, B, C)
# pr.disable()
# pr.print_stats(sort=SortKey.CUMULATIVE)
# print("Time learning: %.2f ms" % (time_learning))
print(new_halut.stats())
# print(new_halut.get_params())
# accuracy test
A_2 = (np.random.random((N // 4, D)) + b) * a
res_halut = new_halut.matmul_online(A_2)
res_numpy = np.matmul(A_2, B)
error_hist_numpy(res_halut, res_numpy)
check_if_error_normal_dist_around_zero(res_halut, res_numpy)
time_halut = (
timeit.Timer(functools.partial(new_halut.matmul_online, *[A_2])).timeit(5)
* 1000
/ 5
)
time_numpy = (
timeit.Timer(functools.partial(np.matmul, *[A_2, B])).timeit(5) * 1000 / 5
)
print(
"time calculation numpy/halutmatmul fp: %.2f / %.2f ms"
% (time_numpy, time_halut)
)
mse = np.square(res_halut - res_numpy).mean()
mae = np.abs(res_halut - res_numpy).mean()
# pylint: disable=E1307
print("mse: %.4f / mae: %.4f" % (mse, mae))
@pytest.mark.parametrize(
"N, D, M, K, C, a, b, encoding_algorithm",
[
(N, D, M, K, C, a, b, e)
for N in [2048]
for D in [512]
for M in [64, 128]
for C in [16, 32, 64]
for a in [1.0] # 5.0
for b in [0.0]
for e in [
hm.EncodingAlgorithm.FOUR_DIM_HASH,
hm.EncodingAlgorithm.DECISION_TREE,
hm.EncodingAlgorithm.FULL_PQ,
]
for K in (
[8, 16, 32, 64]
if e == hm.EncodingAlgorithm.FOUR_DIM_HASH
else [4, 8, 12, 16, 24, 32, 64]
)
# for q in [True, False]
# for r in [True, False]
],
)
def test_learn_offline(
N: int, D: int, M: int, K: int, C: int, a: float, b: float, encoding_algorithm: int
) -> None:
np.random.seed(4419)
quantize_lut = False
run_optimized = True
helper_halut(
N,
D,
M,
C,
K=K,
a=a,
b=b,
quantize_lut=quantize_lut,
run_optimized=run_optimized,
encoding_algorithm=encoding_algorithm,
)
```
#### File: python/test/test_linear.py
```python
from collections import OrderedDict
from test.utils.utils import helper_test_module
import torch
import pytest
from halutmatmul.modules import HalutLinear
import halutmatmul.halutmatmul as hm
def linear_helper(
in_features: int,
out_features: int,
bias: bool,
n_row_learn: int,
n_row_test: int,
C: int,
a: float = 1.0,
b: float = 0.0,
) -> None:
torch.manual_seed(4419)
weights = torch.rand((out_features, in_features))
bias_weights = torch.rand((out_features))
input_learn = (torch.rand((n_row_learn, in_features)) + b) * a
input_test = (torch.rand((n_row_test, in_features)) + b) * a
learn_numpy = input_learn.detach().cpu().numpy()
weights_numpy = weights.detach().cpu().numpy().transpose(1, 0)
store_array = hm.learn_halut_offline(
learn_numpy, weights_numpy, C=C, lut_work_const=-1
)
torch_module = torch.nn.Linear(
in_features=in_features, out_features=out_features, bias=bias
)
halutmatmul_module = HalutLinear(
in_features=in_features, out_features=out_features, bias=bias
)
state_dict = OrderedDict({"weight": weights})
if bias:
state_dict = OrderedDict(state_dict | OrderedDict({"bias": bias_weights}))
torch_module.load_state_dict(state_dict, strict=False)
state_dict = OrderedDict(
state_dict
| OrderedDict(
{
"store_input": torch.zeros(1, dtype=torch.bool),
"halut_active": torch.ones(1, dtype=torch.bool),
"hash_buckets_or_prototypes": torch.from_numpy(
store_array[hm.HalutOfflineStorage.HASH_TABLES]
),
"lut": torch.from_numpy(store_array[hm.HalutOfflineStorage.LUT]),
"halut_config": torch.from_numpy(
store_array[hm.HalutOfflineStorage.CONFIG]
),
}
)
)
halutmatmul_module.load_state_dict(state_dict, strict=False)
print("======== TEST =========")
print(
f"params: C: {C}, in: {in_features}, out: {out_features}, bias: {bias}, "
f"n_row_learn: {n_row_learn}, n_row_test: {n_row_test}, a: {a}, b: {b}"
)
helper_test_module(input_test, torch_module, halutmatmul_module)
@pytest.mark.parametrize(
"in_features, out_features, C, a, b, bias",
[
(in_features, out_features, C, a, b, bias)
for in_features in [512, 2048]
for out_features in [10, 1000]
for C in [4, 16, 64]
for a in [1.0, 10.0]
for b in [0.0, 10.0]
for bias in [True, False]
],
)
def test_linear_module(
in_features: int, out_features: int, C: int, a: float, b: float, bias: bool
) -> None:
n_row_learn = 10000
n_row_test = 2000
linear_helper(
in_features,
out_features,
bias,
n_row_learn,
n_row_test,
C,
a,
b,
)
``` |
{
"source": "JoenyBui/django-folder-tree",
"score": 2
} |
#### File: django-folder-tree/folder_tree/models.py
```python
import os
import shutil
import json
import logging
from django.conf import settings
from django.core.mail import send_mail, EmailMessage
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from mptt.models import MPTTModel, TreeForeignKey
from mptt.utils import tree_item_iterator
from . import toolkit as utk
from . import global_setting as gs
__author__ = 'jbui'
log = logging.getLogger(__name__)
def user_file_path(instance, filename):
"""
User File Path
:param instance:
:param filename:
:return:
"""
d = timezone.now()
return '{0}/{1}/{2}/{3}/{4}'.format(
instance.user.id,
d.year,
d.month,
d.day,
filename)
class TreeFolder(MPTTModel):
"""
Tree folder is used to link a file tree structure that is used to replicate what will be stored in
the servers. The user will create a new folder (or remove) and then progress afterwards.
:type name: folder name
:type parent: parent key
:type user: user model
:type is_locked: If folder/files locked from changes.
:type created: created date
:type modified: modified date
"""
name = models.CharField(max_length=255)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', default=0)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
is_locked = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True, null=False, blank=True)
modified = models.DateTimeField(auto_now_add=True, null=False, blank=True)
def __str__(self):
"""
:return:
"""
return 'Folder: %s' % self.name
def save(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
self.modified = timezone.now()
super(TreeFolder, self).save(*args, **kwargs)
class MPTTMeta:
"""
That MPTTMeta class adds some tweaks to django-mptt - in this case, just order_insertion_by.
This indicates the natural ordering of the data in the tree.
"""
order_insertion_by = ['name']
def get_file_type(self):
"""
Return the folder file type.
:return:
"""
if hasattr(self, 'projectfolder'):
return self.projectfolder.get_file_type()
else:
return 'folder'
def is_valid(self, error, **kwargs):
"""
Is valid for the user.
:param error:
:param kwargs:
:return:
"""
valid = True
# Need to make sure that the parent folder key is the same user as the current folder key.
if self.parent:
if self.parent.user != self.user:
valid = False
error['user'] = 'Folder does not belong to user.'
if kwargs.get('path'):
parent = TreeProfile.get_tree_folder(self.user, kwargs.get('path'))
if not parent:
valid = False
error['path'] = '%s is not valid' % kwargs.get('path')
else:
self.parent = parent
name = kwargs.get('name')
if parent and name:
# Path already exists.
for folder in parent.get_children():
if folder.name == name:
error['name'] = 'Path already exists: %s%s%s' % (parent.virtual_folder, os.pathsep, name)
valid = False
return valid
def get_path(self):
"""
Get the path of the folder including the home folder.
:return:
"""
path = self.name
new_folder = self.parent
while new_folder:
path = os.path.join(new_folder.name, path)
new_folder = new_folder.parent
return path
@property
def virtual_folder(self):
"""
Return the virtual folder of the path.
:return:
"""
folders = [self.name]
new_folder = self.parent
while new_folder:
folders.append(new_folder.name)
new_folder = new_folder.parent
path = ""
for name in folders[:-1]:
path = os.path.join(name, path)
return path
def create_folder(self):
"""
Create the folder of the path.
"""
path = os.path.join(gs.LOCATION_USER_STORAGE, self.get_path())
if not os.path.isdir(path):
os.mkdir(path)
def delete_folder(self):
"""
Get the path with the delete folder.
"""
path = os.path.join(gs.LOCATION_USER_STORAGE, self.get_path())
if os.path.isdir(path):
shutil.rmtree(path)
self.delete()
class TreeProfile(models.Model):
"""
Tree Profile is used to link with django user. This gives the user the ability to create a MPTT file structure
in the database quickly.
The User Profile model inherits from Django's Model class and linked to the base User class through a one-to-one
relationship.
:type user: user model
:type root_folder: folder root
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL)
root_folder = models.ForeignKey(TreeFolder, null=True, blank=True, default=True)
def __str__(self):
return self.user.username
def get_children(self):
"""
Get children
:return:
"""
root = self.root_folder
return utk.tree_item_to_dict(root)
def get_jstree(self):
"""
:return:
"""
# { id : 'ajson1', parent : '#', text : 'Simple root node', state: { opened: true} },
# { id : 'ajson2', parent : '#', text : 'Root node 2', state: { opened: true} },
# { id : 'ajson3', parent : 'ajson2', text : 'Child 1', state: { opened: true} },
# { id : 'ajson4', parent : 'ajson2', text : 'Child 2' , state: { opened: true}}
root = self.root_folder
jstree = [dict(
id=root.id,
parent='#',
text=root.name,
state=dict(opened=True)
)]
utk.jstree_item_to_dict(root, jstree)
return jstree
@staticmethod
def get_tree_folder(user, path):
"""
Get the tree folder given the path.
:param user:
:param path:
:return:
"""
folder = None
uprof = TreeProfile.objects.get(user=user)
root_folder = uprof.root_folder
if root_folder:
folder = root_folder
paths = utk.split_path(path)
for folder_name in paths[:]:
if folder_name == '' or folder_name == user.username:
continue
else:
for cur_folder in folder.get_children():
if cur_folder.name == folder_name:
folder = cur_folder
# Found the folder, so we leave the folder.
break
# If we can't find the folder, then we exit loop.
if not folder:
return None
return folder
@property
def root_path(self):
"""
Root path.
:return:
"""
return self.root_folder.name
@property
def root_virtual_path(self):
"""
Root virtual path.
:return:
"""
return os.path.join(self.root_folder.name)
def create_root(self):
"""
Create a root node in the database, and the folder in the storage disk.
"""
self.root_folder = TreeFolder.objects.create(user=self.user, name='root', parent=None)
def delete_root(self):
"""
Delete the root folder with everything underneath.
"""
pass
def create_tree_folder(self, name, parent):
"""
Create tree folder.
:param name: Name of folder
:param parent: Parent tree folder.
:return:
"""
folder = TreeFolder.objects.create(name=name, user=self.user, parent=parent)
folder.save()
return folder
def create_folder(self, path, force_path=True):
"""
Given a path, create a TreeFolder.
:param path: path of the folder to create.
:param force_path: if the intermediary folder does not exists, create it
"""
texts = utk.split_path(path)
new_folder = self.root_folder
folder_path = self.root_path
for folder in texts[1:]:
# Look inside the storage to see if the system has the folder.
folder_found = False
# Get the folders item.
for folder_item in new_folder.get_children():
if folder_item.name == folder:
new_folder = folder_item
if utk.is_dir(folder_path, folder):
folder_path = os.path.join(folder_path, folder)
folder_found = True
else:
if force_path:
folder_path = utk.make_dir(folder_path, folder)
folder_found = True
else:
return False
# Exit loop
break
# If there is no children folder - force the folder create.
if not folder_found:
if force_path:
# Create a new folder.
new_folder = TreeFolder.objects.create(name=folder, parent=new_folder, is_locked=False)
folder_path = utk.make_dir(folder_path, folder)
else:
return False
return True
def delete_folder(self, folder):
"""
Delete a folder given a path.
:param folder: path of the folder to delete.
"""
if isinstance(folder, TreeFolder):
trash = Trash.objects.create(profile=self, folder=folder, previous_folder=folder.parent.id)
trash.save()
folder.parent = None
folder.save()
else:
#TODO: Check if it's a primary key
#TODO: Otherwise check if it's a path.
pass
return True
def get_folder(self, path):
"""
Return the tree folder given the path.
:param path:
:return:
"""
folder_names = utk.split_path(path)
folder = self.root_folder
for name in folder_names[1:]:
for folder_child in folder.get_children():
if folder_child.name == name:
folder = folder_child
pass
return folder
def get_path(self, path):
"""
Pass a path and then we parse it to the real path.
:param path:
:return:
"""
texts = utk.split_path(path)
texts[0] = self.root_folder.name
return os.sep.join(texts)
def get_folder_json(self, show_files):
"""
Get the json folder structure.
:param show_files:
:return:
"""
data = {
'data': utk.tree_item_to_dict(self.root_folder, show_files)
}
# Change the first root node label to the current user name.
data['data']['text'] = self.user.username
return json.dumps(data)
class ProjectFolder(TreeFolder):
"""
Project folder.
:type app_type: application type
"""
app_type = models.IntegerField(choices=gs.JOB_TYPE, default=1)
def get_file_type(self):
"""
Return the folder file type.
:return:
"""
return 'project_folder'
def get_path(self):
"""
Get the path of the folder including the home folder.
:return:
"""
path = self.name
new_folder = self.parent
while new_folder:
path = os.path.join(new_folder.name, path)
new_folder = new_folder.parent
return path
class TreeFile(models.Model):
"""
Parent tree file for application type file.
File will only exists within project folders, ensuring that there is no subdirectory outside
of the the project folder app.
:type name:
:type user:
:type folder: project folder model
:type is_executable: check if the files is executable.
:type is_locked: folder/files locked from changes.
:type created:
:type modified:
"""
name = models.CharField(max_length=255, null=True, blank=True)
user = models.ForeignKey(User)
folder = models.ForeignKey(ProjectFolder, null=True, blank=True)
is_executable = models.BooleanField(default=False, blank=True)
is_locked = models.BooleanField(default=False)
created = models.DateTimeField(null=False, blank=True, auto_now_add=True)
modified = models.DateTimeField(null=False, blank=True, auto_now_add=True)
def __str__(self):
return 'File: %s' % self.name
class Meta:
abstract = True
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
# Update modified date.
self.modified = timezone.now()
super(TreeFile, self).save(force_insert=force_insert, force_update=force_update, using=using,
update_fields=update_fields)
def is_valid(self, error, **kwargs):
return True
@property
def real_path(self):
"""
Find the real path of the code.
:return:
"""
return os.path.join(gs.LOCATION_USER_STORAGE, self.folder.get_path(), self.get_file_name())
@property
def virtual_path(self):
"""
Virtual path.
:return:
"""
return os.path.join(self.folder.get_path(), self.get_file_name())
def create_file(self):
"""
Create a new file.
"""
root_folder = self.folder
def get_file_name(self):
"""
Base class needs to override this method.
OVERRIDE THIS METHOD
:return:
"""
return self.name
def delete_file(self):
pass
class Trash(models.Model):
"""
Trash folder.
:type profile: one-to-many relationship
:type prev: original parent folder
"""
profile = models.ForeignKey(TreeProfile, on_delete=models.CASCADE)
prev = models.ForeignKey(TreeFolder, null=True, blank=True)
class InputFile(TreeFile):
"""
Input File.
:type name:
:type user:
:type folder: project folder model - one input file equals to one project folder
:type is_executable: check if the files is executable.
:type is_locked: folder/files locked from changes.
:type created:
:type modified:
"""
name = models.CharField(max_length=255, null=True, blank=True)
user = models.ForeignKey(User)
folder = models.OneToOneField(ProjectFolder, on_delete=models.CASCADE)
is_executable = models.BooleanField(default=False, blank=True)
is_locked = models.BooleanField(default=False)
created = models.DateTimeField(null=False, blank=True, auto_now_add=True)
modified = models.DateTimeField(null=False, blank=True, auto_now_add=True)
def header(self):
return "#!^%s^!#"
def folder_name(self):
return "%s_%d_%s" % (self.header(), self.id, self.name)
class Meta:
abstract = True
@property
def real_folder(self):
"""
Read folder.
:return:
"""
return os.path.join(gs.LOCATION_USER_STORAGE, self.folder.get_path())
@property
def virtual_folder(self):
"""
Virtual folder
:return:
"""
return os.path.join(self.folder.get_path())
@property
def real_path(self):
"""
Find the real path of the code.
:return:
"""
return os.path.join(self.real_folder, self.get_file_name())
# @property
# def virtual_path(self):
# """
# Virtual path of the input path.
# :return:
# """
# return os.path.join(self.virtual_folder, self.get_file_name())
def create_input_folder(self):
"""
Create input folder.
"""
path = self.real_folder
if not os.path.isdir(path):
os.mkdir(path)
class ImageFile(TreeFile):
"""
Create an image file.
:type file_type:
:type photo:
"""
file_type = models.IntegerField(choices=gs.IMAGE_TYPE, default=-1)
photo = models.ImageField(upload_to=user_file_path)
class GeneralFile(TreeFile):
"""
Create results field for the files that exist in the storage bin.
:type file_type:
:type file:
"""
file_type = models.IntegerField(choices=gs.FILE_TYPE, default=-1)
file = models.FileField(upload_to=user_file_path, default='default.txt')
def set_ext(self, ext_name):
"""
determine the extensions from the last name.
:param ext_name:
"""
for id, name in gs.FILE_TYPE:
if name == ext_name.lower()[1:]:
self.file_type = id
break
def get_file_name(self):
"""
Return the filename with extension.
:return:
"""
return self.name + '.' + gs.FILE_TYPE[self.file_type][1]
def get_file_type(self):
"""
Return file type.
:return:
"""
return gs.FILE_TYPE[self.file_type][1]
def get_mime(self):
"""
Return the mime type for the file.
:return:
"""
return gs.get_mime(self.file_type)
def send_message(self, email):
"""
Send message of the file.
:param email: email address
:return:
"""
subject = 'Subject here'
message = 'Here is the message'
try:
attachment = self.folder
mail = EmailMessage(subject, message, settings.DEFAULT_FROM_EMAIL, [email])
mail.send()
except SystemError:
log.error('Send Message.')
``` |
{
"source": "joenye/cfn-python-lint",
"score": 2
} |
#### File: rules/mappings/ApproachingLimitAttributes.py
```python
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
from cfnlint.helpers import LIMITS
class LimitAttributes(CloudFormationLintRule):
"""Check maximum Mapping attribute limit"""
id = 'I7012'
shortdesc = 'Mapping attribute limit'
description = 'Check if the amount of Mapping attributes in the template is approaching the upper limit'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html'
tags = ['mappings', 'limits']
def match(self, cfn):
"""Check CloudFormation Mappings"""
matches = []
mappings = cfn.template.get('Mappings', {})
for mapping_name, mapping in mappings.items():
for mapping_attribute_name, mapping_attribute in mapping.items():
path = ['Mappings', mapping_name, mapping_attribute_name]
if LIMITS['threshold'] * LIMITS['mappings']['attributes'] < len(mapping_attribute) <= LIMITS['mappings']['attributes']:
message = 'The amount of mapping attributes ({0}) is approaching the limit ({1})'
matches.append(RuleMatch(path, message.format(
len(mapping_attribute), LIMITS['mappings']['attributes'])))
return matches
```
#### File: rules/parameters/ApproachingLimitNumber.py
```python
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
from cfnlint.helpers import LIMITS
class LimitNumber(CloudFormationLintRule):
"""Check maximum Parameter limit"""
id = 'I2010'
shortdesc = 'Parameter limit'
description = 'Check the number of Parameters in the template is approaching the upper limit'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html'
tags = ['parameters', 'limits']
def match(self, cfn):
"""Check CloudFormation Parameters"""
matches = []
# Check number of parameters against the defined limit
parameters = cfn.template.get('Parameters', {})
if LIMITS['threshold'] * LIMITS['parameters']['number'] < len(parameters) <= LIMITS['parameters']['number']:
message = 'The number of parameters ({0}) is approaching the limit ({1})'
matches.append(RuleMatch(['Parameters'], message.format(
len(parameters), LIMITS['parameters']['number'])))
return matches
```
#### File: rules/resources/DependsOn.py
```python
import six
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
class DependsOn(CloudFormationLintRule):
"""Check Base Resource Configuration"""
id = 'E3005'
shortdesc = 'Check DependsOn values for Resources'
description = 'Check that the DependsOn values are valid'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-dependson.html'
tags = ['resources', 'dependson']
def check_value(self, key, path, resources):
"""Check resource names for DependsOn"""
matches = []
if not isinstance(key, (six.text_type, six.string_types)):
message = 'DependsOn values should be of string at {0}'
matches.append(RuleMatch(path, message.format('/'.join(map(str, path)))))
return matches
if key not in resources:
message = 'DependsOn should reference other resources at {0}'
matches.append(RuleMatch(path, message.format('/'.join(map(str, path)))))
return matches
def match(self, cfn):
"""Check CloudFormation Resources"""
matches = []
resources = cfn.get_resources()
for resource_name, resource_values in resources.items():
depends_ons = resource_values.get('DependsOn')
if depends_ons:
path = ['Resources', resource_name, 'DependsOn']
self.logger.debug('Validating DependsOn for %s base configuration', resource_name)
if isinstance(depends_ons, list):
for index, depends_on in enumerate(depends_ons):
matches.extend(self.check_value(depends_on, path[:] + [index], resources))
else:
matches.extend(self.check_value(depends_ons, path, resources))
return matches
``` |
{
"source": "Joeoc2001/chess-ai",
"score": 4
} |
#### File: Joeoc2001/chess-ai/ai.py
```python
import random
import chess
def make_move(board: chess.Board, time_remaining: float) -> str:
"""
`board` gives the current board state from which you should make your move.
See https://python-chess.readthedocs.io/en/v1.4.0/ for full documentation,
or below for some example usage.
`time_remaining` gives the number of seconds left on your chess clock.
You should return a uci-formatted move representing the move that your AI
wishes to make.
For example, to move your pawn from e2 to e4, you should return the string 'e2e4'.
If you make an invalid move, or if your chess clock times out, you forfeit the game.
Note that you are passed a copy of the board object, so methods such as `board.reset()`
will not change the master copy, only your local version.
"""
# Get some interesting information
opponent_color = chess.BLACK if board.turn == chess.WHITE else chess.WHITE
legal_moves = list(board.legal_moves)
best_move = None
best_cost = -1000000
for move in legal_moves:
taken = board.piece_type_at(move.to_square)
taken_cost = [0, 1, 3, 3, 5, 10, 100][taken] if taken is not None else 0
lost_cost = [0, 1, 3, 3, 5, 10, 100][board.piece_type_at(move.from_square)]
cost = taken_cost
if board.is_attacked_by(opponent_color, move.to_square):
cost -= lost_cost
if taken is not None and cost > best_cost:
best_move = move
best_cost = cost
if best_move is None:
best_move = random.choice(legal_moves)
# Return the code of the move
return best_move.uci()
``` |
{
"source": "joeogl/deploymentmanager-samples",
"score": 2
} |
#### File: community/sql_fci/sql_network.py
```python
import common
import default
import utils
def FirewallRule(name, net_name, protocol, deployment, sources, ports=None):
"""Creates a Firewall Rule definition.
Returns a firewall definition based on arguments that is compatible with
the gcloud.
Args:
name: string name of the firewall rule
net_name: string name of the network that this rule will apply to.
protocol: The network protocol, e.g. 'ICMP', 'TCP', 'UDP'
deployment: name of this deployment.
sources: list of strings cidrs of traffic to be allowed.
ports: the TCP or UDP ports that this firewall rule will apply to.
Returns:
Firewall Rule definition compatible with gcloud deployment launcher.
"""
allowed = {
default.IP_PROTO: protocol
}
if ports:
allowed.update({default.PORTS: [ports]})
properties = {
default.NETWORK: common.Ref(net_name).format(net_name),
default.ALLOWED: [allowed],
default.SRC_RANGES: sources
}
firewall_rule_name = "{deployment}-{name}".format(
deployment=deployment,
name=name)
return {
default.NAME: firewall_rule_name,
default.TYPE: default.FIREWALL,
default.PROPERTIES: properties
}
def GenerateConfig(context):
"""Generates the network configuration for the gcloud deployment.
Args:
context: context of the deployment.
Returns:
List of resources that the deployment manager will create.
"""
region = context.properties["region"]
sql_cidr = context.properties.get("sql_cidr", utils.DEFAULT_DEPLOYMENT_CIDR)
deployment = context.env["deployment"]
net_name = utils.NetworkName(deployment)
sub_name = utils.SubnetName(deployment)
is_test = context.properties.get("dev_mode", "false")
resources = [
{
default.NAME: net_name,
default.TYPE: default.NETWORK_TYPE,
default.PROPERTIES: {
default.AUTO_CREATE_SUBNETWORKS: False,
}
},
{
default.NAME: sub_name,
default.TYPE: default.SUBNETWORK_TYPE,
default.PROPERTIES: {
default.NETWORK: common.Ref(net_name),
default.REGION: region,
default.IP_CIDR_RANGE: sql_cidr
}
},
# Allow ICMP for debugging
FirewallRule(
"allow-all-icmp", net_name, "ICMP", deployment, sources=[sql_cidr]),
# Allow RDP, SQL, and Load Balancer Health Check from anywhere
FirewallRule(
"allow-rdp-port",
net_name,
"TCP",
deployment,
sources=["0.0.0.0/0"],
ports="3389"),
FirewallRule(
"allow-health-check-port",
net_name,
"TCP",
deployment,
# The Google ILB health check service IP ranges.
sources=["172.16.58.3/22", "172.16.17.32/16"],
ports=utils.HEALTH_CHECK_PORT),
# Allow ALL TCP and UDP traffic from within the same network. We should
# only have cluster and AD nodes on this network so the traffic is
# trusted.
FirewallRule(
"allow-all-udp",
net_name,
"UDP",
deployment,
sources=[sql_cidr],
ports="0-65535"),
FirewallRule(
"allow-all-tcp",
net_name,
"TCP",
deployment,
sources=[sql_cidr],
ports="0-65535"),
]
if is_test:
resources.append(
FirewallRule(
"allow-sql-port",
net_name,
"TCP",
deployment,
sources=["0.0.0.0/0"],
ports=utils.APPLICATION_PORT))
return {"resources": resources}
``` |
{
"source": "JoeOIVOV/ArnePilot",
"score": 2
} |
#### File: controls/lib/latcontrol_indi.py
```python
import math
import numpy as np
from cereal import log
from common.realtime import DT_CTRL
from common.numpy_fast import clip, interp
from common.op_params import opParams
from selfdrive.car.toyota.values import SteerLimitParams
from selfdrive.car import apply_toyota_steer_torque_limits
from selfdrive.controls.lib.drive_helpers import get_steer_max
class LatControlINDI():
def __init__(self, CP, OP=None):
self.angle_steers_des = 0.
A = np.array([[1.0, DT_CTRL, 0.0],
[0.0, 1.0, DT_CTRL],
[0.0, 0.0, 1.0]])
C = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
# Q = np.matrix([[1e-2, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 10.0]])
# R = np.matrix([[1e-2, 0.0], [0.0, 1e3]])
# (x, l, K) = control.dare(np.transpose(A), np.transpose(C), Q, R)
# K = np.transpose(K)
K = np.array([[7.30262179e-01, 2.07003658e-04],
[7.29394177e+00, 1.39159419e-02],
[1.71022442e+01, 3.38495381e-02]])
self.K = K
self.A_K = A - np.dot(K, C)
self.x = np.array([[0.], [0.], [0.]])
self.enforce_rate_limit = CP.carName == "toyota"
if OP is None:
OP = opParams()
self.op_params = OP
self.sat_count_rate = 1.0 * DT_CTRL
self.reset()
def reset(self):
self.delayed_output = 0.
self.output_steer = 0.
self.sat_count = 0.0
def _check_saturation(self, control, check_saturation, limit):
saturated = abs(control) == limit
if saturated and check_saturation:
self.sat_count += self.sat_count_rate
else:
self.sat_count -= self.sat_count_rate
self.sat_count = clip(self.sat_count, 0.0, 1.0)
return self.sat_count > self.sat_limit
def update(self, active, CS, CP, path_plan):
if self.op_params.get('enable_indi_live'):
self.sat_limit = self.op_params.get('steer_limit_timer')
act_bp = self.op_params.get('indi_actuator_effectiveness_bp')
act_v = self.op_params.get('indi_actuator_effectiveness_v')
outer_bp = self.op_params.get('indi_outer_gain_bp')
outer_v = self.op_params.get('indi_outer_gain_v')
inner_bp = self.op_params.get('indi_inner_gain_bp')
inner_v = self.op_params.get('indi_inner_gain_v')
time_bp = self.op_params.get('indi_time_constant_bp')
time_v = self.op_params.get('indi_time_constant_v')
elif CP.lateralTuning.which() == 'indi':
act_bp = CP.lateralTuning.indi.actuatorEffectivenessBP
act_v = CP.lateralTuning.indi.actuatorEffectivenessV
outer_bp = CP.lateralTuning.indi.outerLoopGainBP
outer_v = CP.lateralTuning.indi.outerLoopGainV
inner_bp = CP.lateralTuning.indi.innerLoopGainBP
inner_v = CP.lateralTuning.indi.innerLoopGainV
time_bp = CP.lateralTuning.indi.timeConstantBP
time_v = CP.lateralTuning.indi.timeConstantV
self.sat_limit = CP.steerLimitTimer
self.G = interp(CS.vEgo, act_bp, act_v)
self.outer_loop_gain = interp(CS.vEgo, outer_bp, outer_v)
self.inner_loop_gain = interp(CS.vEgo, inner_bp, inner_v)
self.RC = interp(CS.vEgo, time_bp, time_v)
self.alpha = 1. - DT_CTRL / (self.RC + DT_CTRL)
# Update Kalman filter
y = np.array([[math.radians(CS.steeringAngle)], [math.radians(CS.steeringRate)]])
self.x = np.dot(self.A_K, self.x) + np.dot(self.K, y)
indi_log = log.ControlsState.LateralINDIState.new_message()
indi_log.steerAngle = math.degrees(self.x[0])
indi_log.steerRate = math.degrees(self.x[1])
indi_log.steerAccel = math.degrees(self.x[2])
if CS.vEgo < 0.3 or not active:
indi_log.active = False
self.output_steer = 0.0
self.delayed_output = 0.0
else:
self.angle_steers_des = path_plan.angleSteers
self.rate_steers_des = path_plan.rateSteers
steers_des = math.radians(self.angle_steers_des)
rate_des = math.radians(self.rate_steers_des)
# Expected actuator value
self.delayed_output = self.delayed_output * self.alpha + self.output_steer * (1. - self.alpha)
# Compute acceleration error
rate_sp = self.outer_loop_gain * (steers_des - self.x[0]) + rate_des
accel_sp = self.inner_loop_gain * (rate_sp - self.x[1])
accel_error = accel_sp - self.x[2]
# Compute change in actuator
g_inv = 1. / self.G
delta_u = g_inv * accel_error
# If steering pressed, only allow wind down
if CS.steeringPressed and (delta_u * self.output_steer > 0):
delta_u = 0
# Enforce rate limit
if self.enforce_rate_limit:
steer_max = float(SteerLimitParams.STEER_MAX)
new_output_steer_cmd = steer_max * (self.delayed_output + delta_u)
prev_output_steer_cmd = steer_max * self.output_steer
new_output_steer_cmd = apply_toyota_steer_torque_limits(new_output_steer_cmd, prev_output_steer_cmd, prev_output_steer_cmd, SteerLimitParams)
self.output_steer = new_output_steer_cmd / steer_max
else:
self.output_steer = self.delayed_output + delta_u
steers_max = get_steer_max(CP, CS.vEgo)
self.output_steer = clip(self.output_steer, -steers_max, steers_max)
indi_log.active = True
indi_log.rateSetPoint = float(rate_sp)
indi_log.accelSetPoint = float(accel_sp)
indi_log.accelError = float(accel_error)
indi_log.delayedOutput = float(self.delayed_output)
indi_log.delta = float(delta_u)
indi_log.output = float(self.output_steer)
check_saturation = (CS.vEgo > 10.) and not CS.steeringRateLimited and not CS.steeringPressed
indi_log.saturated = self._check_saturation(self.output_steer, check_saturation, steers_max)
return float(self.output_steer), float(self.angle_steers_des), indi_log
```
#### File: selfdrive/golden/can_bridge.py
```python
import os
import time
import math
import atexit
import numpy as np
import threading
import random
import cereal.messaging as messaging
import argparse
from common.params import Params
from common.realtime import Ratekeeper
from selfdrive.golden.can import can_function, sendcan_function
import queue
import subprocess
import sys
import signal
def main():
os.system('echo 1 > /tmp/op_simulation')
os.system('echo 1 > /tmp/force_calibration')
os.system('service call audio 3 i32 3 i32 0 i32 1')
global pm
pm = messaging.PubMaster(['can', 'health'])
gps = messaging.sub_sock('gpsLocation')
live_params = messaging.sub_sock('liveParameters')
#live_calibartion = messaging.sub_sock('liveCalibration')
# can loop
sendcan = messaging.sub_sock('sendcan')
rk = Ratekeeper(100, print_delay_threshold=None)
steer_angle = 0.0
gps_speed = 30.0 / 3.6
cal_status = 0
posenet_speed = 0.0
btn_list = []
while 1:
gps_data = messaging.recv_sock(gps)
params = messaging.recv_sock(live_params)
calibration = None
#calibration = messaging.recv_sock(live_calibartion)
if gps_data:
gps_speed = gps_data.gpsLocation.speed
if params:
posenet_speed = params.liveParameters.posenetSpeed
#print ('posenet_speed=' + str(posenet_speed*3.6) + ' kph')
if calibration:
cal_status = calibration.liveCalibration.calStatus
engage = False
if rk.frame != 0 and rk.frame % 500 == 0:
engage = cal_status == 1
speed = gps_speed
if speed < 0.00001:
speed = posenet_speed
#can_function(pm, speed, steer_angle, rk.frame, rk.frame%500 == 499)
if os.path.exists('/tmp/op_start'):
if len(btn_list) == 0:
for x in range(10):
btn_list.append(3)
os.system('rm /tmp/op_start')
if os.path.exists('/tmp/op_stop'):
if len(btn_list) == 0:
for x in range(10):
btn_list.append(2)
os.system('rm /tmp/op_stop')
btn = 0
if len(btn_list) > 0:
btn = btn_list[0]
btn_list.pop(0)
can_function(pm, speed * 3.6, steer_angle, rk.frame, cruise_button=btn, is_engaged=1)
#if rk.frame%5 == 0:
# throttle, brake, steer = sendcan_function(sendcan)
# steer_angle += steer/10000.0 # torque
# # print(speed * 3.6, steer, throttle, brake)
dat = messaging.new_message('health')
dat.valid = True
dat.health = {
'ignitionLine': True,
'hwType': "blackPanda",
'controlsAllowed': True
}
pm.send('health', dat)
rk.keep_time()
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
global pm
dat = messaging.new_message('health')
dat.valid = True
dat.health = {
'ignitionLine': False,
'hwType': "greyPanda",
'controlsAllowed': True
}
for seq in range(10):
pm.send('health', dat)
time.sleep(0.1)
print ("exiting")
sys.exit(0)
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
main()
``` |
{
"source": "joepaolicelli/cabi-prediction-api",
"score": 3
} |
#### File: cabi/prepare_data/complete.py
```python
from cabi.prepare_data.utils import bal, get_and_adjust_data
import datetime
import numpy as np
import pandas as pd
from pandas.tseries.offsets import Hour
def complete(
db_engine, station_id, start, end, sample_size=int(1.0e5),
balance=None):
"""
sample_size will be ignored if balance is not None.
"""
data = get_and_adjust_data(
db_engine, station_id, start, end)
# Balance or set to sample_size
if balance is None:
if data.size > sample_size:
data = data.sample(n=sample_size)
else:
data = bal(data, balance)
# Ensure shuffling.
data = data.iloc[np.random.permutation(len(data))]
X = []
yempty = []
yfull = []
weather_isd = pd.read_sql_query(
"SELECT * FROM weather_isd", db_engine, index_col="ts")
weather = pd.read_sql_query(
"SELECT * FROM weather", db_engine, index_col="ts")
weather = pd.concat([weather_isd, weather])
weather.index = weather.index.tz_localize(None)
# Get rid of duplicates
weather = weather.groupby(level=0).first()
weather = weather.asfreq(Hour(), method="pad")
no_weather_count = 0
for row in data.iteritems():
hour = row[0].replace(
minute=0, second=0, microsecond=0, tzinfo=None)
try:
temp_hour = hour
temp = float(weather.loc[temp_hour].temp)
while pd.isnull(temp):
temp_hour = temp_hour - datetime.timedelta(hours=1)
temp = float(weather.loc[temp_hour].temp)
precip_hour = hour
precip = float(weather.loc[hour].precip)
while pd.isnull(precip):
precip_hour = precip_hour - datetime.timedelta(hours=1)
precip = float(weather.loc[precip_hour].precip)
features = [
(1 if row[0].dayofweek == 0 else 0),
(1 if row[0].dayofweek == 1 else 0),
(1 if row[0].dayofweek == 2 else 0),
(1 if row[0].dayofweek == 3 else 0),
(1 if row[0].dayofweek == 4 else 0),
(1 if row[0].dayofweek == 5 else 0),
(1 if row[0].dayofweek == 6 else 0),
float(((row[0].hour * 60) + row[0].minute)) / 1440.0,
float(row[0].month) / 12.0,
temp / 50.0,
precip / 15.0
]
X.append(features)
yempty.append(1 if row[1] == "empty" else 0)
yfull.append(1 if row[1] == "full" else 0)
except KeyError as ex:
no_weather_count += 1
print("Weather not found for", no_weather_count, "rows.")
return {'X': X, 'yempty': yempty, 'yfull': yfull}
```
#### File: cabi/prepare_data/utils.py
```python
import datetime
import numpy as np
import pandas as pd
from sqlalchemy import sql
def get_and_adjust_data(db_engine, station_id, start, end):
"""
Get data from the database in both the bike count format and the outage
format, between the passed dates. If bike count data and outage data is
available for the same time, bike count data takes precedence.
If no data is available for a subset of the passed period of time, it will
be left out of the returned dataset.
"""
data_list = []
# Create empty DateTimeIndex with frequency of five minutes, and assign it
# to an empty series.
# "5T" is five minutes.
dti = pd.date_range(0, -1, freq="5T")
data = pd.Series(None, index=dti)
# Add data in the bike count format.
bike_counts = pd.read_sql_query(
"SELECT ts, bikes, spaces FROM bike_count "
+ "WHERE station_id = %(station_id)s AND "
+ "ts >= %(start)s AND ts <= %(end)s;",
db_engine, params={
"station_id": station_id, "start": start, "end": end})
# bike_count[0] is the index, [1..3] are the columns in the order
# selected in the above query
for bike_count in bike_counts.itertuples():
# Do not insert counts with no bikes or spaces (inactive stations).
if not (bike_count[2] == 0 and bike_count[3] == 0):
ts = pd.to_datetime(bike_count[1], infer_datetime_format=True)
# Round the timestamp to the nearest five minute mark.
ts += datetime.timedelta(seconds=150)
ts = ts.replace(
minute=(ts.minute - (ts.minute % 5)), second=0, microsecond=0)
# A status of np.nan means the station is neither full nor empty.
status = np.nan
if bike_count[2] == 0:
status = "empty"
elif bike_count[3] == 0:
status = "full"
# Create index with only one entry, ts.
index = pd.date_range(ts, ts, freq="5T")
data_list.append(pd.Series(status, index=index))
if len(data_list) > 0:
data = pd.concat(data_list)
try:
data_list = []
# Add data in the outage format.
outages = pd.read_sql_query(
"SELECT outage_type, outage_start, outage_end FROM outage "
+ "WHERE station_id = %(station_id)s AND "
+ "outage_start >= %(start)s AND outage_end <= %(end)s;",
db_engine, params={
"station_id": station_id, "start": start, "end": end})
# Merge each outage into dataframe.
for outage in outages.itertuples():
ostart = pd.to_datetime(outage[2], infer_datetime_format=True)
ostart += datetime.timedelta(seconds=150)
ostart = ostart.replace(
minute=(ostart.minute - (ostart.minute % 5)),
second=0, microsecond=0)
oend = pd.to_datetime(outage[3], infer_datetime_format=True)
oend += datetime.timedelta(seconds=150)
oend = oend.replace(
minute=(oend.minute - (oend.minute % 5)),
second=0, microsecond=0)
index = pd.date_range(ostart, oend, freq="5T")
data_list.append(pd.Series(outage[1], index=index))
outage_data = pd.concat(data_list)
outage_data = outage_data.groupby(outage_data.index).first()
conn = db_engine.connect()
# Determine timeframe where outages were recorded for this station.
query = sql.text(
"SELECT MIN(outage_start) FROM outage "
"WHERE station_id = :station_id")
outage_data_start = conn.execute(
query, station_id=station_id).fetchone()[0]
query = sql.text(
"SELECT MAX(outage_end) FROM outage "
"WHERE station_id = :station_id")
outage_data_end = conn.execute(
query, station_id=station_id).fetchone()[0]
outage_data_start = pd.to_datetime(
outage_data_start, infer_datetime_format=True)
outage_data_end = pd.to_datetime(
outage_data_end, infer_datetime_format=True)
range_start = outage_data_start if outage_data_start > start else start
range_end = outage_data_end if outage_data_end < end else end
range_start = range_start.replace(
minute=(range_start.minute - (range_start.minute % 5)),
second=0, microsecond=0)
range_end = range_end.replace(
minute=(range_end.minute - (range_end.minute % 5)),
second=0, microsecond=0)
# Add NaN for those times when the station is not full or empty.
outage_data = outage_data.reindex(pd.date_range(
range_start, range_end, freq="5T"))
# Remove any timestamps from outage_data that are in the bike_count
# data.
unique = outage_data.index.difference(data.index)
outage_data = outage_data.reindex(unique)
except ValueError as ex:
outage_data = None
print(ex)
# Merge the two series together.
if outage_data is not None:
data = pd.concat([data, outage_data])
# Remove any remaining stray duplicates.
data = data.groupby(data.index).first()
data.sort_index(inplace=True)
return data
def bal(s, balance):
"""
s: The series to balance.
balance: The status to balance on. "empty" or "full"
"""
if(balance == "empty"):
df_empty = s[s == "empty"]
df_not_empty = s[s != "empty"]
return pd.concat([
df_not_empty.sample(len(df_empty)),
df_empty])
elif(balance == "full"):
df_full = s[s == "full"]
df_not_full = s[s != "full"]
return pd.concat([
df_not_full.sample(len(df_full)),
df_full])
``` |
{
"source": "joeparis/CS161-Demos-Winter-19",
"score": 4
} |
#### File: 19.01.09/8am/chaos-08.py
```python
def main():
"""
Calculate the values of the chaotic function.
"""
x = float(input("Enter a number between 1 and 0: "))
# We will avoid using eval() because it is a security concern.
# x = eval(input("Enter a number between 1 and 0: "))
for _ in range(10):
x = 3.9 * x * (1 - x)
print(x)
# identifiers that start and end with double underscores (sometimes called
# "dunders") are special in Python.
if __name__ == "__main__":
main()
```
#### File: 19.01.16/8am/abort_demo.py
```python
def validate_integer_input():
"""
Demonstrates validating that user input is an integer (does not validate
that it is within any given range).
"""
# isnumeric() simply checks if there are any non-digit characters in the
# string and returns false if any are found.
user_int = input("Please enter an integer: ")
while not user_int.isnumeric():
print("Invalid input. Please enter a valid integer value: ")
user_int = input("Please enter an integer: ")
return user_int
def validate_real_input():
"""
Demonstrates validating that user input is a real (does not validate
that it is within any given range).
"""
# Because isnumeric() checks for any non-digit characters it fails on the
# decimal place. We try to convert the user's input using float() which
# raises an exception (which we then catch) on bad input.
while True:
user_real = input("Please enter a real number: ")
try:
user_real = float(user_real)
break
except ValueError:
print("Invalid input. Please enter a valid real number: ")
return user_real
def check_integer_range(lower=0, upper=100):
"""
Demonstrates validating that user input is a number type within the
specified range.
Keyword Arguments:
lower {int} -- The lower bound of the range (default: {0})
upper {int} -- The bound of the range (default: {100})
"""
# We'll re-use the work we've already done to ensure what the user entered
# was an integer by leveraging our integer validation function above.
value = validate_integer_input()
# If we get here we know value is an integer. Now, we check to make sure it
# is in the required range.
while True:
if value >= lower and value < upper:
break
print(
f"Please enter a value greater than or equal to {lower} and less than {upper}."
)
value = validate_integer_input()
def main():
# demo validating that input is numeric (but not within a given range)
print(validate_integer_input())
print(validate_real_input())
if __name__ == "__main__":
main()
```
#### File: demos/19.01.23/input_validation.py
```python
def get_color_value(color, lower_limit=0.0, upper_limit=1.0):
"""
Get a value for the color.
Arguments:
color {string} -- The name of the color value to get.
lower_limit {float} -- The lower bound of the legal color value.
upper_limit {float} -- The upper bound of the legal color value.
Returns:
float -- The color value.
"""
# while True:
# value = float(input(f"Please enter the {color} value: "))
# if value >= 0 and value <= 1:
# break
# print(f"{value} is not valid, please enter a value betwen 0 and 1.")
while True:
try:
value = float(input(f"Please enter the {color} value: "))
if not (value >= lower_limit and value <= upper_limit):
raise ValueError
break
except ValueError:
print(
f"Invalid input, please enter a number betwen {lower_limit} and {upper_limit}."
)
return value
def main():
"""
Driver for demoing getting and validating user input for color values
for turtle. Color values must be between >=0 and <=1.
"""
red_value = get_color_value("red")
green_value = get_color_value("green", 0.5, 1.0)
blue_value = get_color_value("blue", 0.0, 0.6)
print(red_value, green_value, blue_value)
if __name__ == "__main__":
main()
```
#### File: 19.02.06/8am/problem_3.py
```python
def method_1(n):
print(n * 1 + n * 11 + n * 111)
def method_2(n):
value_1 = int(f"{n}")
value_2 = int(f"{n}{n}")
value_3 = int(f"{n}{n}{n}")
print(f"{value_1} + {value_2} + {value_3} = {value_1 + value_2 + value_3}")
def method_3(n):
nums = []
for i in range(1, 4):
num = n * i
nums.append(int(num))
print(f"{nums[0]} + {nums[1]} + {nums[2]} = {sum(nums)}")
def method_4(n):
print(sum([int(n * i) for i in range(1, 4)]))
if __name__ == "__main__":
n = input("Please enter a number 0 <= n < 10: ")
# method_1(n)
# method_2(n)
# method_3(n)
method_4(n)
```
#### File: demos/assignment_4/problem_2.py
```python
def main():
for n in range(1, 11):
print(f"{n:2} {n**2:3} {n**3:4}")
if __name__ == "__main__":
main()
```
#### File: demos/assignment_4/problem_3.py
```python
def method_1(n):
value_1 = int(f"{n}")
value_2 = int(f"{n}{n}")
value_3 = int(f"{n}{n}{n}")
print(f"{value_1} + {value_2} + {value_3} = {value_1 + value_2 + value_3}")
def method_2(n):
nums = []
for i in range(1, 4):
num = n * i
nums.append(int(num))
print(f"{nums[0]} + {nums[1]} + {nums[2]} = {sum(nums)}")
def method_3(n):
print(sum([int(n * idx) for idx in range(1, 4)]))
if __name__ == "__main__":
# n = input("Please enter a value, x, and I will computer x + xx + xxx: ")
n = "5"
method_1(n)
method_2(n)
method_3(n)
``` |
{
"source": "JoeParmigiano/advent-of-code-2021",
"score": 4
} |
#### File: advent-of-code-2021/day-1/main.py
```python
input_filename = "day-1/input.txt"
# Load data into an array
file = open(input_filename, "r")
data = [int(x) for x in file.read().split('\n')]
def count_depth_increments(depths, window_width):
increments = 0
for i in range(len(depths) - window_width):
window_depth_current = sum(depths[i:i+window_width])
window_depth_next = sum(depths[i+1:window_width+i+1])
increments += int(window_depth_next > window_depth_current)
return increments
print(f"Answer part 1: {count_depth_increments(data, 1)}")
print(f"Answer part 2: {count_depth_increments(data, 3)}")
```
#### File: advent-of-code-2021/day-6/main.py
```python
input_filename = "day-6/input.txt"
# Load data into an array
file = open(input_filename, "r")
data = [int (x) for x in file.read().split(',')]
# Part 2 requires a more optimal way of handling the data
# New list format: each number represents the amount of fishes with an amount
# of days left to give birth equal to its index
schedule = [0] * 9
for fish in data:
schedule[fish] += 1
def evolve(schedule):
next_step = [0] * 9
# New fishes
next_step[8] = schedule[0]
# Fishes that gave birth have now 6 days left
next_step[6] += schedule[0]
# All fishes have a day less to give birth
for i in range(8):
next_step[i] += schedule[i+1]
return next_step
days = 256
for day in range(days):
schedule = evolve(schedule)
fishes = sum(schedule)
print(f"Fishes after {days} days: {fishes}.")
``` |
{
"source": "JoePatchett/stock_scrape",
"score": 3
} |
#### File: JoePatchett/stock_scrape/stock_data.py
```python
from bs4 import BeautifulSoup
import requests
import csv
def stock_get(nyse_codes, csv_file):
"""
This function takes New York stock exchange tickers (i.e. 'AAPL' for Apple) and the csv file location where to store this data.
The result is a csv file with company code, price, previous open, market volume, and market value. All of this data is from bloomberg.com
At the moment it pauses for 3 seconds after each request so that you don't get flagged. Change as necessary. Perhaps will move to Scrapy.
"""
stock_data_file = open(csv_file,'a', newline='')
stock_data_writer = csv.writer(stock_data_file)
for code in nyse_codes:
page = 'https://www.bloomberg.com/quote/' + code + ':US'
#query1 = urllib3.urlopen(page)
query1 = requests.get(page)
url_data = query1.text
parsed_text = BeautifulSoup(url_data, 'html.parser')
location_of_final_close = parsed_text.find('span', attrs={'class':'priceText__1853e8a5'})
final_close = location_of_final_close.text.split()
databox = []
databox.append(code)
databox.append(final_close)
for i in range(5):
databox.append(parsed_text.findAll('div',{'class':'value__b93f12ea'})[i].string)
stock_data_writer.writerow()
stock_data_file.close()
return 0
def calculate_daily_differences(csv_file_day1, csv_file_day2):
"""
This function takes the stock data from one day, subtracts each value and appends the day 1, day 2, and delta value
into a list. The structure is organized as follows: [[Company 1, [day 1 value, day 2 value, delta], [], []...], [Company 2,...]]
"""
day1_values = []
day2_values = []
final_delta_values = []
with open(csv_file_day1, newline = '') as day1:
day_1_reader = csv.reader(day1, delimiter = ',')
for row in day_1_reader:
day1_values.append(row[:4])
with open(csv_file_day2, newline = '') as day2:
day_2_reader = csv.reader(day2, delimiter = ',')
for row in day_2_reader:
day2_values.append(row[:4])
delta_values = []
for i range(len(day_1_values)):
temp_list = []
for j in day_1_values[i]:
temp_delta = day_2_values[i][j]-day_1_values[i][j]
temp_list.append([day_1_values[i][j], day_2_values[i][j], temp_delta])
final_delta_values.append(temp_list)
return final_delta_values
def kNN_comparison(test_point, rest_of_data, k):
"""
This function takes a test date open value which you want to predict if the stock will increase or not. The 'test_point' is
the percent change in stock on a previous day. The aptly named 'rest_of_data' will include the percent change on a previous day,
and then a '0' or '1' if that stock decreased or increased on a given date. This given date should not be the same date as the 'test_point',
the goal is to see if changes in stocks on previous days can have lasting effects on stocks in the future.
"""
[training_set, test_set] = prep_data(rest_of_data)
for x in range(len(training_set)):
distances.append(training_set[x], euclidean_distance(test_point, training_set[x]))
distances.sort()
neighbors = []
for x in range(k):
neighors.append(distances(k))
def prep_data(csv_file, split_value, seed_number):
"""
This function places the data into two lists, training set and test set.
"""
return 0
def euclidean_distance(test_point, data_set):
for i in range(len(data_set)):
distances = ((test_point[i] - data_set[i])^(1/2))
return distances
#I Slowly adding old versions.
``` |
{
"source": "joepatmckenna/fem",
"score": 2
} |
#### File: doc/scripts/langevin_2.py
```python
import numpy as np
import numpy.linalg as nplin
import scipy as sp
from scipy.special import erf as sperf
from scipy.linalg import pinv as spinv
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import multiprocessing
import os
import sys
dt, T, N_var = 0.1, 30000, 30 #float(sys.argv[1]),float(sys.argv[2]),int(sys.argv[3])
# seed = 23
# np.random.seed(seed)
if len(sys.argv) > 4:
seed = int(sys.argv[4])
np.random.seed(seed)
L = np.int(np.ceil(T / dt))
dts = np.sqrt(dt)
sqrt2 = np.sqrt(2)
def gen_W(nvar=N_var):
"""coupling matrix"""
# return scale*((np.array(np.random.rand(nvar*nvar)).reshape(nvar,nvar))-0.5)-np.eye(nvar)
scale = 1.0 / np.sqrt(float(nvar))
return scale * (np.random.rand(nvar, nvar) - 0.5 - 2 * np.eye(nvar))
def gen_X(w, nvar=N_var, ll=L):
"""time series"""
x = np.zeros((ll, nvar))
eta = np.random.randn(ll - 1, nvar)
x[0] = 2 * np.array(np.random.rand(nvar) - 0.5).reshape(1, nvar)
for t in range(1, ll):
x[t] = x[t - 1] + dt * np.dot(x[t - 1], w) + dts * eta[t - 1]
return x
def gen_X_trial(w, x, nvar=N_var, ll=L):
"""time series trial"""
X = np.zeros(ll, nvar)
eta = np.random.randn((ll - 1, nvar))
X[0] = x[0]
for t in range(1, ll):
X[t] = x[t - 1] + dt * np.dot(x[t - 1], w) + dts * eta[t - 1]
return X
W = gen_W()
X = gen_X(W)
Y = np.sign(X[1:] - X[:-1]) #X(t+1)-X(t)
C_j = np.mean(X, axis=0)
XC = (X - C_j)[:-1]
C_jk = np.cov(X, rowvar=False)
def iteration(index, w_in, x, xc, y, num_iter=10):
sperf_init = sperf(np.dot(x, w_in)[:-1, index] * dts / sqrt2)
for iterate in range(num_iter):
h = np.dot(x, w_in[:, index])[:-1]
h_ratio = h * y[:, index] / sperf(h * dts / sqrt2)
w_in[:, index] = sp.linalg.solve(C_jk,
np.mean(
h_ratio[:, np.newaxis] * xc,
axis=0))
sperf_next = sperf(np.dot(x, w_in)[:-1, index] * dts / sqrt2)
if (nplin.norm(sperf_next - sperf_init)**2 < 1e-4): break
sperf_init = np.copy(sperf_next)
# print(iterate,nplin.norm((x[1:,index]-x[:-1,index])-sperf_init)**2/float(L-1))
# return w_in
cov_plus = np.cov(X[:-1], X[1:], rowvar=False)
#w_try = sp.linalg.solve(cov_plus[:N_var,:N_var]*dt,cov_plus[:N_var,N_var:]-cov_plus[:N_var,:N_var])#gen_W()
w_try = gen_W()
print('initial MSE', nplin.norm(W - w_try)**2 / float(N_var**2))
for index in range(N_var):
iteration(index, w_try, X, XC, Y)
print('final MSE for ',index,nplin.norm(W[:,index]-w_try[:,index])**2/float(N_var),\
nplin.norm(Y[:,index]-sperf(np.dot(X,w_try)[:-1,index])*dts/sqrt2)**2/float(L-1))
plt.scatter(W.flatten(), w_try.flatten(), c='k', s=0.1)
plt.show()
# with PdfPages('langevin-' + str(dt) + '-' + str(T) + '-' + str(N_var) + '-' +
# str(seed) + '.pdf') as pdf:
# fig = plt.figure()
# plt.imshow(w_try)
# plt.colorbar()
# pdf.savefig(fig)
# plt.close()
# fig = plt.figure()
# print('initial MSE', nplin.norm(W - w_try)**2 / float(N_var**2))
# for index in range(N_var):
# iteration(index, w_try, X, XC, Y)
# print('final MSE for ',index,nplin.norm(W[:,index]-w_try[:,index])**2/float(N_var),\
# nplin.norm(Y[:,index]-sperf(np.dot(X,w_try)[:-1,index])*dts/sqrt2)**2/float(L-1))
# ## X_try = gen_X(w_try2)
# ## fig=plt.figure()
# ## plt.imshow(np.cov(X[:-1],X[1:],rowvar=False)-np.cov(X_try[:-1],X_try[1:],rowvar=False))
# ## plt.colorbar()
# ## pdf.savefig(fig)
# ## plt.close()
# ## X_try = gen_X(w_try)
# ## fig=plt.figure()
# ## plt.imshow(np.cov(X[:-1],X[1:],rowvar=False)-np.cov(X_try[:-1],X_try[1:],rowvar=False))
# ## plt.colorbar()
# ## pdf.savefig(fig)
# ## plt.close()
# fig = plt.figure()
# plt.imshow(W)
# plt.colorbar()
# pdf.savefig(fig)
# plt.close()
# fig = plt.figure()
# plt.imshow(w_try)
# plt.colorbar()
# pdf.savefig(fig)
# plt.close()
# fig = plt.figure()
# plt.imshow(W - w_try)
# plt.colorbar()
# pdf.savefig(fig)
# plt.close()
# fig = plt.figure()
# plt.imshow(C_jk)
# plt.colorbar()
# pdf.savefig(fig)
# plt.close()
# fig = plt.figure()
# plt.imshow(np.cov(gen_X(w_try), rowvar=False))
# plt.colorbar()
# pdf.savefig(fig)
# plt.close()
# fig = plt.figure()
# plt.imshow(C_jk - np.cov(gen_X(w_try), rowvar=False))
# plt.colorbar()
# pdf.savefig(fig)
# plt.close()
# for index in range(3): #not N_var, too many graphs!
# fig = plt.figure()
# plt.plot(np.arange(100), Y[:100, index], 'b-')
# # plt.plot(np.arange(100),X[1:101,1],'r-')
# plt.plot(np.arange(100), (X[1:] - X[:-1])[:100, index], 'g-')
# plt.plot(np.arange(100), sperf(np.dot(X, w_try)[:100, index]), 'k-')
# pdf.savefig(fig)
# plt.close()
```
#### File: fem/discrete/simulate.py
```python
import numpy as np
import combinatorics
from .. import fortran_module
def model_parameters(n, m, degs=[1], dist=None, dist_par=None):
"""Draw random model parameters
Args:
n (int):
m (int):
degs (list):
dist (callable)
dist_par (tuple):
Returns:
dict: keys `degs`
"""
try:
len(m)
except:
m = np.repeat(m, n)
m_sum = m.sum()
m_cumsum = np.insert(m.cumsum(), 0, 0)
degs = np.array(degs)
max_deg = degs.max()
if (dist is None) or (dist_par is None):
dist = np.random.normal
dist_par = (0.0, 1.0 / np.sqrt(m.sum()))
idx_by_deg = [combinatorics.multiindices(n, deg) for deg in degs]
mi = [np.array([np.prod(m[i]) for i in idx]) for idx in idx_by_deg]
mi_sum = [mii.sum() for mii in mi]
mi_cumsum = [np.insert(mii.cumsum(), 0, 0) for mii in mi]
w = {
deg: dist(*dist_par, size=(m_sum, mi_sum[i]))
for i, deg in enumerate(degs)
}
for (i, deg) in enumerate(degs):
for (m1, m2) in zip(m_cumsum[:-1], m_cumsum[1:]):
w[deg][m1:m2] -= w[deg][m1:m2].mean(0)
for (m1, m2) in zip(mi_cumsum[i][:-1], mi_cumsum[i][1:]):
w[deg][:, m1:m2] -= w[deg][:, m1:m2].mean(1)[:, np.newaxis]
return w
def time_series(w, n, m, l=None, o=1.0):
"""Simulate discrete time series data
Args:
w (dict):
n (int):
m (int):
l (int):
o (float)
Returns:
ndarray: time series data
"""
try:
len(m)
except:
m = np.repeat(m, n)
degs = np.sort(w.keys())
w = np.hstack([w[deg] for deg in degs])
if l is None:
l = int(o * np.prod(w.shape))
return fortran_module.fortran_module.simulate_time_series(w, m, l, degs)
def mutations(w, n, m, l=None, o=1.0):
try:
len(m)
except:
m = np.repeat(m, n)
degs = np.sort(w.keys())
w = np.hstack([w[deg] for deg in degs])
if l is None:
l = int(o * np.prod(w.shape))
return fortran_module.fortran_module.simulate_mutations(w, m, l, degs)
``` |
{
"source": "joepatmckenna/joepatmckenna.github.io",
"score": 3
} |
#### File: joepatmckenna.github.io/_includes/sigmoid-derivs.py
```python
import numpy as np
import matplotlib.pyplot as plt
def f(x):
"""sigmoid function"""
return 1.0 / (1.0 + np.exp(-x))
def df(n, x):
"""nth derivative of sigmoid function"""
# compute coeffs
c = np.zeros(n + 1, dtype=int)
c[0] = 1
for i in range(1, n + 1):
for j in range(i, -1, -1):
c[j] = -j * c[j - 1] + (j + 1) * c[j]
# compute derivative as series
res = 0.0
for i in range(n, -1, -1):
res = f(x) * (c[i] + res)
return res
x = np.linspace(-5, 5, 1000)[:, np.newaxis].repeat(16, axis=1).T
y = np.array([df(n, x[n]) for n in range(16)])
fig, ax = plt.subplots(4, 4, figsize=(8, 6))
for xi, yi, i in zip(x, y, range(16)):
ax[i / 4, i % 4].plot(xi, yi, 'k-', label="n = %i" % (i))
ax[i / 4, i % 4].legend()
plt.savefig('../assets/img/sigmoid-derivs.png')
``` |
{
"source": "joepatmckenna/normal_forms",
"score": 2
} |
#### File: normal_forms/examples/bif.py
```python
# # ppp and pp3
# def f(x, y, z, p=[0, .25, .5, 4, 3, 5]):
# f1 = x * (1 - x) - p[3] * x * y
# f2 = -p[1] * y + p[3] * x * y - p[4] * y * z - p[0] * (1 - exp(-p[5] * y))
# f3 = -p[2] * z + p[4] * y * z
# return f1, f2, f3
# J = nf.jet(f, (0, 0, 0), 3)
# print J.series[0]
# print J.series[1]
# print J.series[2]
```
#### File: examples/normal_form/07.py
```python
from normal_forms import normal_form
import sympy
# Murdock, Normal Forms and Unfoldings of Local Dynamical Systems, Example 4.5.24
def f(x, y, z):
f1 = 6 * x + x**2 + x * y + x * z + y**2 + y * z + z**2
f2 = 2 * y + x**2 + x * y + x * z + y**2 + y * z + z**2
f3 = 3 * z + x**2 + x * y + x * z + y**2 + y * z + z**2
return f1, f2, f3
h = normal_form(f, (0, 0, 0), 2)
# coeff of z**2
print h.fun[0].coeff(h.jet.var[2]**2)
```
#### File: examples/normal_form/09.py
```python
from normal_forms import normal_form
import sympy
# ppp and pp3
def f(x, y, z, p=[0, .25, .5, 4, 3, 5]):
f1 = x * (1 - x) - p[3] * x * y
f2 = -p[1] * y + p[3] * x * y - p[4] * y * z - p[0] * (
1 - sympy.exp(-p[5] * y))
f3 = -p[2] * z + p[4] * y * z
return f1, f2, f3
h = normal_form(f, (0, 0, 0), 4)
```
#### File: normal_forms/normal_forms/jet.py
```python
import numpy as np
import sympy
import combinatorics
from multiindex import multiindex
import bases
class jet(object):
"""Truncated Taylor's series.
The jet is represented in both a closed and expanded form. The closed form is ``fun``:math:`=\\sum_{0\\leq deg \\leq k}` ``fun_deg[deg]`` where ``fun_deg[deg]=coeff[deg]*pb[deg]`` is a symbolic representation of the degree ``deg`` term. The expanded form is the list ``fun_deg`` of ``sympy.Matrix(m,1)`` objects where ``coeff`` is a list of ``k+1 numpy.array`` objects with shapes :math:`(m,{n+j-1 \\choose j})` for :math:`0\leq j\leq k`. ``pb`` is a dictionary indexed by degree of ``sympy.Matrix`` objects with ``pb[j]`` representing a basis for homogenous :math:`j^{th}` degree polynomials in the variables :math:`x_0,\ldots,x_{n-1}` of the form :math:`\\begin{pmatrix}x_0^j & x_0^{j-1}x_1 & \\cdots & x_{n-1}^j \\end{pmatrix}^T`. ``coeff[deg][coord,term]`` is the coefficient of the monomial ``pb[deg][term]`` in coordinate ``coord`` of the partial derivative of :math:`f` indexed by the ``term`` th ``normal_forms.multiindex.multiindex(deg,n)``.
Parameters
----------
f : callable
function that accepts ``n`` arguments and returns tuple of length ``m``, corresponding to mathematical function :math:`f:\\mathbb{R}^n\\rightarrow\\mathbb{R}^m`
x : number if ``n==1`` or tuple of length ``n`` if ``n>=1``
center about which jet is expanded
k : int
maximum degree of jet
Attributes
----------
n : int
dimension of domain of :math:`f`
m : int
dimension of codomain of :math:`f`
var : list of ``n sympy.symbol`` objects
``x_0``, ``x_1``, ..., ``x_{n-1}`` representing arguments of :math:`f`
pb : ``normal_forms.bases.poly_basis``
a basis for polynomials in the variables ``var``
coeff : list of ``k+1 numpy.array`` objects of shape :math:`(m,{n+j-1\\choose j})` for :math:`0\leq j\leq k`
jet coefficients indexed as ``coeff[deg][coord,term]`` where :math:`0\leq` ``deg`` :math:`\leq k`, :math:`0\leq` ``coord`` :math:`\leq m`, and :math:`0\leq` ``term`` :math:`<{m-1+deg \\choose deg}`.
fun_deg : list of ``k+1 sympy.Matrix(m,1)`` objects
symbolic representation of each term in the jet indexed as ``fun_deg[deg]`` for ``deg=0,...,k``
fun : ``sympy.Matrix(m,1)``
symbolic representation of jet
fun_lambdified : callable
lambdified version of fun
"""
def __init__(self, f, x, k, f_args=None, var=None, pb=None):
"""initialize the jet"""
self.f = f
self.x = x
self.k = k
if np.array(x).shape == ():
n, x = 1, [x]
else:
n = len(x)
# call to f
if f_args is None:
f_eval = f(*x)
else:
f_eval = f(*(list(x) + list(f_args)))
if np.array(f_eval).shape == ():
m = 1
else:
# call to f
m = len(f_eval)
self.m = m
self.n = n
if var is None:
var = sympy.symarray('x', (n, ))
if pb is None:
pb = bases.poly_basis(var)
self.var = var
self.pb = pb
# number of terms per degree of expanded form
n_terms = combinatorics.simplicial_list(n, k)
coeff = [np.empty([m, n_terms[deg]]) for deg in range(k + 1)]
basis = [sympy.ones(n_terms[deg], 1) for deg in range(k + 1)]
# call to f
if f_args is None:
f_eval = f(*var)
else:
f_eval = f(*(list(var) + list(f_args)))
coeff[0][:, 0] = list(sympy.Matrix([f_eval]).subs(zip(var, x)))
for deg in range(1, k + 1):
m_idx = multiindex(deg, n)
for term in range(n_terms[deg]):
# call to f
if f_args is None:
f_eval = f(*var)
else:
f_eval = f(*(list(var) + list(f_args)))
coeff[deg][:, term] = list(
sympy.diff(sympy.Matrix([f_eval]), *m_idx.to_var(var))
.subs(zip(var, x)) / m_idx.factorial())
basis[deg][term] = m_idx.to_polynomial(var, x)
m_idx.increment()
for deg in range(k + 1):
poly = list(sympy.Matrix(coeff[deg]) * basis[deg])
m_idx = multiindex(deg, n)
for term in range(n_terms[deg]):
for coord in range(m):
coeff[deg][coord, term] = poly[coord].coeff(pb[deg][term])
m_idx.increment()
self.coeff = coeff
self.update_fun()
def update_fun(self):
"""Compute symbolic and lambdified versions of the jet from the coefficients."""
# symbolic representation by degree
fun_deg = [
sympy.Matrix(self.coeff[deg]) * self.pb[deg]
for deg in range(self.k + 1)
]
self.fun_deg = fun_deg
for deg in range(self.k + 1):
self.fun_deg[deg] = sympy.Matrix(self.coeff[deg]) * self.pb[deg]
# symbolic representation, sum of elements in fun_deg
self.fun = sympy.zeros(self.m, 1)
for deg in range(self.k + 1):
self.fun += self.fun_deg[deg]
self.fun = list(self.fun)
if len(self.fun) == 1:
self.fun = self.fun[0]
# lambdified fun
self.fun_lambdified = sympy.lambdify(self.var, self.fun)
def __call__(self, *args):
"""Evaluate the jet."""
return self.fun_lambdified(*args)
def __getitem__(self, deg):
"""Return symbolic representation of ``deg``th degree jet term."""
res = list(self.fun_deg[deg])
if len(res) == 1:
return res[0]
else:
return res
```
#### File: normal_forms/normal_forms/normal_form.py
```python
import sympy
import numpy as np
import lie_operator
import jet
import bases
import combinatorics
class normal_form(object):
"""A normal form of an autonomous vector field :math:`f:\\mathbb{R}^n\\rightarrow\\mathbb{R}^m`.
Arguments
---------
f : callable
function that accepts ``n`` arguments and returns tuple of length ``m`` numbers, corresponding to mathematical function :math:`f:\\mathbb{R}^n\\rightarrow\\mathbb{R}^m`
x : number if ``n==1`` or tuple of length ``n`` if ``n>=1``
center about which normal form is computed
k : int
maximum degree of normal form
Attributes
----------
n : int
dimension of domain of :math:`f`
m : int
dimension of codomain of :math:`f`
jet : ``normal_forms.jet.jet``
series representation of normal form
L1 : ``normal_forms.lie_operator.lie_operator``
fundamental operator of the normal form, Lie bracket with the linear term :math:`f_1(x)=f'(x)x`, that is :math:`L_{f_1}(\cdot) = [f_1,\cdot]`, see ``normal_forms.lie_operator.lie_operator``
g : list of ``k-1`` ``sympy.Matrix(m,1)`` objects
generators, i.e. homogenous :math:`j^{th}` degree :math:`m`-dimensional polynomial vector fields :math:`g_j` for :math:`j\geq2` used to carry out sequence of near-identity transformations :math:`e^{L_{g_j}}` of :math:`f`
L : ``normal_forms.lie_operator.lie_operator``
Lie operators :math:`L_{g_j}` of the generators in ``g``, see ``normal_forms.lie_operator.lie_operator``
eqv : list of shape ``(k-1,2,.,.)``
coefficients and ``sympy.Matrix(m,1)`` object representation of normal form equivariant vector fields
fun : sympy.Matrix(m,1) object
symbolic representation of normal form
"""
def __init__(self, f, x, k, f_args=None):
self.f = f
self.x = x
self.k = k
if np.array(x).shape == ():
n, x = 1, [x]
else:
n = len(x)
# call to f
if f_args is None:
f_eval = f(*x)
else:
f_eval = f(*(list(x) + list(f_args)))
if np.array(f_eval).shape == ():
m = 1
else:
# call to f
m = len(f_eval)
self.m = m
self.n = n
# list of symbolic variables
var = sympy.symarray('x', (n, ))
# polynomial basis
pb = bases.poly_basis(var)
# vector basis
vb = bases.vf_basis(pb, m)
# k-jet of f centered at x
# call to f
self.jet = jet.jet(f, x, k, f_args, var, pb)
# fundamental operator of normal form theory, Lie bracket with f'
self.L1 = lie_operator.lie_operator(self.jet.fun_deg[1], var, 1, pb, vb)
# work space of coefficients
n_terms = combinatorics.simplicial_list(n, k)
wrk = [[np.zeros(m * n_terms[i + j + 1]) for j in range(k - i)]
for i in range(k)]
# initialize first row of workspace as k-jet
for j in range(k):
wrk[0][j] = np.concatenate(self.jet.coeff[j + 1])
# generators
g = []
# Lie brackets with generators
L = []
# equivariant vector fields
eqv = []
# list of factorials
fac = combinatorics.factorial_list(k)
# algorithm based on Murdock
for deg in range(2, k + 1):
# update workspace and solve for generator
for j, l in enumerate(L):
wrk[1][deg - 2] += l[deg - 1 - j].dot(wrk[0][deg - 2 - j])
f_coeff = np.zeros(m * n_terms[deg])
for i in range(deg):
f_coeff += wrk[i][deg - 1 - i] / fac[i]
g_coeff = np.linalg.lstsq(self.L1[deg], f_coeff)[0]
# normal form coefficients
h_coeff = f_coeff - self.L1[deg].dot(g_coeff)
# represent normal form term in L1.T nullspace basis
u, s, v = np.linalg.svd(self.L1[deg])
rank = min(self.L1[deg].shape) - np.isclose(s, 0).sum()
perp_basis = u[:, rank:]
e_coeff = perp_basis.T.conj().dot(h_coeff)
e = [
sympy.Matrix(perp_basis[:, i].reshape(
m, perp_basis[:, i].shape[0] / m)) * pb[deg]
for i in range(perp_basis.shape[1])
]
# truncate roundoff error
for coeff in [e_coeff, f_coeff, g_coeff, h_coeff]:
coeff[np.isclose(coeff, 0)] = 0
# store generator
g.append(
sympy.Matrix(np.reshape(g_coeff, (m, len(g_coeff) / m))) *
pb[deg])
# update series coeff
self.jet.coeff[deg] = np.reshape(h_coeff, (m, len(h_coeff) / m))
# store equivariant vector fields
eqv.append((e_coeff, e))
# store Lie operator
L.append(lie_operator.lie_operator(g[-1], var, deg, pb, vb))
# update workspace
wrk[1][deg - 2] += L[-1][1].dot(wrk[0][0])
for i in range(2, k - deg + 2):
for j, l in enumerate(L):
wrk[i][deg -
2] += l[deg -
2 + i - j].dot(wrk[i - 1][deg - 2 - j])
self.L = L
self.g = g
self.eqv = eqv
# update series symbolic and lambdified representation
self.jet.update_fun()
# make jet.fun accessible from this class
self.fun = self.jet.fun
def __call__(self, *args):
"""Evaluate the normal form."""
return self.jet.fun_lambdified(*args)
def __getitem__(self, deg):
"""Return symbolic representation of ``deg``th degree normal form term."""
return self.jet[deg]
``` |
{
"source": "joepatmckenna/ohmlr",
"score": 2
} |
#### File: ohmlr/ohmlr/ohmlr.py
```python
from numpy import matlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse.linalg import svds
from scipy.sparse import csc_matrix
class ohmlr(object):
def __init__(self, x_classes=None, y_classes=None, random_coeff=False):
self.x_classes = x_classes
self.y_classes = y_classes
self.random_coeff = random_coeff
if x_classes is not None and y_classes is not None:
n_y_classes = len(y_classes)
n_features = len(x_classes)
n_x_classes = np.asarray([len(x_class) for x_class in x_classes])
n_x_classes_sum = np.sum(n_x_classes)
y_map = {s: i for i, s in enumerate(np.sort(y_classes))}
x_map = [{s: i
for i, s in enumerate(np.sort(x_class))}
for x_class in x_classes]
if random_coeff:
v = np.random.normal(size=n_y_classes)
w = np.array([
np.random.normal(size=(n, n_y_classes))
for n in n_x_classes
])
v -= v.mean()
for i in range(n_features):
w[i] -= w[i].mean(0)
w[i] -= w[i].mean(1)[:, np.newaxis]
else:
v = np.zeros(n_y_classes)
w = np.array(
[np.zeros(shape=(n, n_y_classes)) for n in n_x_classes])
self.n_y_classes = n_y_classes
self.n_features = n_features
self.n_x_classes = n_x_classes
self.n_x_classes_sum = n_x_classes_sum
self.x_map = x_map
self.y_map = y_map
self.v = v
self.w = w
# decision_function(X) Predict confidence scores for samples.
def categorize_(self, u, u_classes):
if u_classes is None:
u_classes = np.unique(u)
u_map = {s: i for i, s in enumerate(u_classes)}
u_int = np.asarray([u_map[ui] for ui in u])
return u_int, u_map
def predict_proba(self, x, return_weights=False):
n_features = self.n_features
v = self.v
w = self.w
x_map = self.x_map
x = np.asarray(x)
n_samples = x.shape[0]
x_int = np.asarray(
[[x_map[j][xij] for j, xij in enumerate(xi)] for xi in x])
h = v + np.asarray([
np.sum([w[j][x_int[i, j]] for j in range(n_features)], 0)
for i in range(n_samples)
])
h = np.asarray(h)
p = np.exp(h)
p /= p.sum(1)[:, np.newaxis]
if return_weights:
return p, h
return p
def predict_log_proba(self, x):
return np.log(self.predict_proba(x))
def predict(self, x):
y_map = self.y_map
p = self.predict_proba(x)
y_int = p.argmax(1)
y = np.asarray([y_map[yi] for yi in y_int])
return y
def score(self, x, y):
return (self.predict(x) == y).mean()
def fit(self,
x,
y,
atol=1e-4,
rtol=1e-3,
max_iter=500,
v_init=None,
w_init=None):
x = np.asarray(x)
y = np.asarray(y)
n_samples, n_features = x.shape
x_classes = self.x_classes
y_classes = self.y_classes
if x_classes is None:
x_classes = n_features * [None]
elif np.asarray(x_classes).ndim == 1:
x_classes = np.tile(
np.asarray(x_classes)[:, np.newaxis], n_features).T
tmp = [self.categorize_(xi, ci) for xi, ci in zip(x.T, x_classes)]
x_int = np.asarray([t[0] for t in tmp]).T
x_map = [t[1] for t in tmp]
n_x_classes = np.asarray([len(m) for m in x_map])
n_x_classes_sum = np.sum(n_x_classes)
n_x_classes_cumsum = np.insert(n_x_classes.cumsum(), 0, 0)
# one-hot encoding of x
x_oh = csc_matrix((np.ones(n_samples * n_features),
(np.repeat(np.arange(n_samples), n_features),
(x_int + n_x_classes_cumsum[:-1]).flatten())),
shape=(n_samples, n_x_classes_sum))
y_int, y_map = self.categorize_(y, y_classes)
n_y_classes = len(y_map)
# one-hot encoding of y
y_oh = csc_matrix((np.ones(n_samples), (np.arange(n_samples), y_int)))
# 'cold' classes
y_hot = (y_oh.toarray().astype(bool))
y_cold = ~(y_oh.toarray().astype(bool))
i1i2 = np.stack([n_x_classes_cumsum[:-1], n_x_classes_cumsum[1:]]).T
if v_init is None:
v = matlib.zeros(n_y_classes)
else:
v = np.asmatrix(v_init)
if w_init is None:
w = matlib.zeros((n_x_classes_sum, n_y_classes))
else:
w = np.asmatrix(np.vstack(w_init))
def solve1(u, pinv):
w = pinv[2].dot(u)
w = np.multiply(pinv[1], w)
w = pinv[0] * w
return w
def solve2(u, pinv):
return solve1(x_oh.T * u, pinv)
if x_oh.shape[0] < x_oh.shape[1]:
solve = solve1
z = x_oh
k = x_oh.shape[0] - 1
else:
solve = solve2
z = x_oh.T * x_oh
k = n_x_classes_sum - n_features + 1
# SVD-based solve of x_oh * w = h
svd = svds(z, k=k)
sv_pinv = svd[1].copy()
zero_sv = np.isclose(sv_pinv, 0)
sv_pinv[zero_sv] = 0.0
sv_pinv[~zero_sv] = 1.0 / sv_pinv[~zero_sv]
pinv = (svd[2].T, sv_pinv[:, np.newaxis], svd[0].T)
# discrepancy
disc = [1.0 / float(n_y_classes)**2 + 1]
err = [1.0 / float(n_y_classes)**2 + 1]
ll = []
for it in range(1, max_iter + 1):
h0 = v
h1 = x_oh * w
p = np.exp(h0 + h1)
p /= p.sum(1)
# additive update
dh = y_oh - p
v = (h0 + dh).mean(0)
w = solve(h1 + dh, pinv)
v -= v.mean()
w -= w.mean(1)
for i1, i2 in i1i2:
w[i1:i2] -= w[i1:i2].mean(0)
# discrepancy: avg 2-norm squared of cold entries
disc.append(np.power(p[y_cold], 2).mean())
err.append((np.asarray(dh)**2).sum(1).mean(0))
ll.append(-np.log(p[y_hot]).mean())
# if disc[-1] > disc[-2]:
# # print('DISC BREAK !!!!!!', it, '!!!!!!!!!!!!')
# break
# if np.abs(err[-1] - err[-2]) < atol:
# # print('AERR BREAK !!!!!!', it, '!!!!!!!!!!!!')
# break
# if np.abs(err[-1] - err[-2]) / err[-2] < rtol:
# # print('RERR BREAK !!!!!!', it, '!!!!!!!!!!!!')
# break
# if it == max_iter:
# # print('NO BREAKKKKKK', it)
v = np.asarray(v).squeeze()
w = np.array([np.asarray(w[i1:i2]) for i1, i2 in i1i2])
disc = disc[1:]
err = err[1:]
self.n_samples = n_samples
self.n_features = n_features
self.n_x_classes = n_x_classes
self.n_y_classes = n_y_classes
self.x = x
self.x_int = x_int
self.x_map = x_map
self.x_oh = x_oh
self.y = y
self.y_int = y_int
self.y_map = y_map
self.y_oh = y_oh
self.pinv = pinv
self.v = v
self.w = w
self.disc = disc
self.err = err
self.ll = ll
# def random(self, n_features=None, n_x_classes=None, n_y_classes=None):
# if self.x_classes is not None:
# n_features = len(self.x_classes)
# n_x_classes = [len(x_class) for x_class in self.x_classes]
# if self.y_classes is not None:
# n_y_classes = len(self.y_classes)
# v = np.random.normal(size=n_y_classes)
# w = np.array(
# [np.random.normal(size=(n, n_y_classes)) for n in n_x_classes])
# v -= v.mean()
# for i in range(n_features):
# w[i] -= w[i].mean(0)
# w[i] -= w[i].mean(1)[:, np.newaxis]
# self.n_features = n_features
# self.n_x_classes = n_x_classes
# self.n_y_classes = n_y_classes
# self.v = v
# self.w = w
# return self
def generate_data(self, n_samples):
n_features = self.n_features
n_x_classes = self.n_x_classes
n_y_classes = self.n_y_classes
v = self.v
w = self.w
x = np.hstack([
np.random.randint(n, size=(n_samples, 1), dtype=int)
for n in n_x_classes
])
h = v + np.array([
np.sum([w[j][x[i, j]] for j in range(n_features)], 0)
for i in range(n_samples)
])
p = np.exp(h)
p /= p.sum(1)[:, np.newaxis]
y = (p.cumsum(1) < np.random.uniform(size=(n_samples, 1))).sum(1)
return x, y
def get_params(self, deep=True):
return dict(
x_classes=self.x_classes,
y_classes=self.y_classes,
random_coeff=self.random_coeff)
``` |
{
"source": "joepatten/AI_540_assignment_5_WW",
"score": 3
} |
#### File: AI_540_assignment_5_WW/submit/PyAgent.py
```python
from random import randint
import Action
import Orientation
from operator import mul
class WorldState:
def __init__(self):
self.agentLocation = [1,1]
self.agentOrientation = Orientation.RIGHT
self.agentHasArrow = True
self.agentHasGold = False
class Agent:
def __init__(self):
self.worldState = WorldState()
self.previousAction = Action.CLIMB
self.actionList = []
def Initialize(self):
self.worldState.agentLocation = [1,1]
self.worldState.agentOrientation = Orientation.RIGHT
self.worldState.agentHasArrow = True
self.worldState.agentHasGold = False
self.orientation_move = {0:[1,0], 1:[0,1], 2:[-1,0], 3:[0,-1]}
self.previousAction = Action.CLIMB
self.actionList = []
track.wumpus_location = track.next_location
self.old_path = track.path[1:]
track.path = []
if track.gold_location:
self.old_path = track.gold_path[1:]
def Process(self, percept):
self.UpdateState(percept)
# use logic to find wumpus (if possible)
self.check_stenches()
if (not self.actionList):
if (percept['Glitter']):
self.actionList.append(Action.GRAB)
track.gold_location = self.worldState.agentLocation
track.gold_path = track.path + [self.worldState.agentLocation]
elif (self.worldState.agentHasGold and (self.worldState.agentLocation == [1,1])): # Rule 3b
self.actionList.append(Action.CLIMB)
elif (percept['Bump']):
self.Move(percept['Bump'])
self.actionList.append(1)
track.path = track.path[:-1]
else:
if (percept['Stench']):
if self.worldState.agentLocation not in track.stenches:
track.stenches.append(self.worldState.agentLocation)
if len(self.old_path) > 0:
next_loc = self.old_path[0]
next_move = self.get_move(next_loc)
if self.orientation_move[self.worldState.agentOrientation] == next_move:
self.actionList.append(Action.GOFORWARD)
track.path.append(self.worldState.agentLocation)
self.old_path = self.old_path[1:]
else:
new_direction = self.get_direction(next_move)
turns = self.get_turns(new_direction) % 4
if turns <= 2:
for _ in range(turns):
self.actionList.append(2) #turn right
else:
for _ in range(4 - turns):
self.actionList.append(1)
#if have gold, follow path back to start
elif track.path and self.worldState.agentHasGold:
next_loc = track.path[-1]
next_move = self.get_move(next_loc)
if self.orientation_move[self.worldState.agentOrientation] == next_move:
self.actionList.append(Action.GOFORWARD)
track.path = track.path[:-1]
else:
self.minimal_turn(next_move)
else:
# uninformed search
track.next_location = self.add_vectors(self.worldState.agentLocation,self.orientation_move[self.worldState.agentOrientation])
#if we are about to run into a wumpus, then pick another route
if track.next_location == track.wumpus_location or percept['Bump']:
self.actionList.append(1)
elif track.next_location in (track.explored):
potential_moves = self.get_all_moves()
# all potential are explored go to previous path
if len(potential_moves) == 0:
#turn and then goforward to previous location in path
previous_location = track.path[-1]
next_move = self.add_vectors(previous_location, self.worldState.agentLocation, negative=True)
self.minimal_turn(next_move)
self.actionList.append(Action.GOFORWARD)
track.path = track.path[:-1]
else:
# turn left until heading toward unexplored
self.actionList.append(1)
else:
# if no wumpus and we are exploring in unexplored, go ahead!
self.actionList.append(Action.GOFORWARD)
track.path.append(self.worldState.agentLocation)
action = self.actionList.pop(0)
self.previousAction = action
return action
def minimal_turn(self, next_move):
new_direction = self.get_direction(next_move)
turns = self.get_turns(new_direction) % 4
if turns <= 2:
for _ in range(turns):
self.actionList.append(2) #turn right
else:
for _ in range(4 - turns):
self.actionList.append(1)
def get_turns(self, new_direction):
return self.worldState.agentOrientation - new_direction
def get_direction(self, new_move):
for k,v in self.orientation_move.items():
if v == new_move:
return k
def get_all_moves(self):
potential_moves = []
for k in self.orientation_move:
potential_move = self.add_vectors(self.orientation_move[k], self.worldState.agentLocation)
if potential_move not in track.explored and potential_move != track.wumpus_location:
potential_moves.append(self.orientation_move[k])
return potential_moves
def UpdateState(self, percept):
currentOrientation = self.worldState.agentOrientation
if (self.previousAction == Action.GOFORWARD):
if (not percept['Bump']):
self.Move(percept['Bump'])
if (self.previousAction == Action.TURNLEFT):
self.worldState.agentOrientation = (currentOrientation + 1) % 4
if (self.previousAction == Action.TURNRIGHT):
currentOrientation -= 1
if (currentOrientation < 0):
currentOrientation = 3
self.worldState.agentOrientation = currentOrientation
if (self.previousAction == Action.GRAB):
self.worldState.agentHasGold = True # Only GRAB when there's gold
if (self.previousAction == Action.SHOOT):
self.worldState.agentHasArrow = False
# Nothing to do for CLIMB
def negative_vector(self, vec):
return [-v for v in vec]
def add_vectors(self, vec1, vec2, negative=False):
if negative:
vec2 = self.negative_vector(vec2)
return [v1 + v2 for v1,v2 in zip(vec1, vec2)]
def get_move(self, new_loc):
# get next move from a PATH
neg_current_loc = self.negative_vector(self.worldState.agentLocation)
next_move = self.add_vectors(new_loc, neg_current_loc)
return next_move
def Move(self, bump):
X = self.worldState.agentLocation[0]
Y = self.worldState.agentLocation[1]
if (self.worldState.agentOrientation == Orientation.RIGHT):
X = X + 1
if (self.worldState.agentOrientation == Orientation.UP):
Y = Y + 1
if (self.worldState.agentOrientation == Orientation.LEFT):
X = X - 1
if (self.worldState.agentOrientation == Orientation.DOWN):
Y = Y - 1
if [X,Y] not in track.explored:
track.explored.append([X,Y])
if not bump:
self.worldState.agentLocation = [X,Y]
def check_stenches(self):
if not track.wumpus_location:
wumpus_location = [0,0]
# looking at when we know at least 3 stench areas
if len(track.stenches) >= 3:
while reduce(mul, wumpus_location, 1) == 0:
for i in range(len(track.stenches)):
for j in range(2):
if track.stenches[i][j] == track.stenches[(i+1)%len(track.stenches)][j]:
wumpus_location[j] = track.stenches[i][j]
wumpus_location[(j+1)%2] = max(track.stenches[(i+1)%len(track.stenches)][(j+1)%2],track.stenches[i][(j+1)%2])-1
elif len(track.stenches) == 2:
s1, s2 = track.stenches
if s1[0] < s2[0]:
s1, s2 = s2, s1
# if s1 and s2 share 1 of same coords, then wumpus is between them
for j in range(2):
if s1[j] == s2[j]:
wumpus_location[j] = s1[j]
wumpus_location[(j+1)%2] = max(s1[(j+1)%2], s2[(j+1)%2]) - 1
# else we need to check 2 spots that are kitty corner
if reduce(mul, wumpus_location, 1) == 0:
if s1[0] > s2[1] and s1[1] < s2[1]:
if [s1[0]-2, s1[1]] in track.explored or [s2[0], s2[1]-2] in track.explored or [s1[0]-1, s1[1]] in track.explored:
wumpus_location = [s1[0], s2[1]]
elif [s2[0]+2, s2[1]] in track.explored or [s1[0], s1[1]+2] in track.explored or [s2[0]+1, s2[1]] in track.explored:
wumpus_location = [s2[0], s1[1]]
else:
if [s1[0]-2, s1[1]] in track.explored or [s2[0], s2[1]+2] in track.explored or [s1[0]-1, s1[1]] in track.explored :
wumpus_location = [s1[0], s2[1]]
elif [s2[0]+2, s2[1]] in track.explored or [s1[0], s1[1]-2] in track.explored or [s2[0]+1, s2[1]] in track.explored:
wumpus_location = [s2[0], s1[1]]
elif len(track.stenches) == 1:
s1 = track.stenches[0]
#need to have three explored nodes
adjacents = [[s1[0], s1[1]+2],[s1[0]+2, s1[1]],[s1[0], s1[1]-2],[s1[0]-2, s1[1]]]
explored_adj = []
for adjacent in adjacents:
if adjacent in track.explored:
explored_adj.append(adjacent)
if len(explored_adj) == 3:
for adjacent in adjacents:
if adjacent not in explored_adj:
for j in range(2):
if adjacent[j] == s1[j]:
wumpus_location[j] = s1[j]
wumpus_location[(j+1)%2] = max(s1[(j+1)%2], adjacent[(j+1)%2]) - 1
if reduce(mul, wumpus_location, 1) != 0:
track.wumpus_location = wumpus_location
myAgent = 0
track = 0
class Tracking():
def __init__(self):
self.path = []
self.stenches = []
self.gold_location = None
self.wumpus_location = None
self.next_location = None
self.explored = [[1,1]]
self.gold_path = []
def PyAgent_Constructor ():
print "PyAgent_Constructor"
global myAgent
global track
track = Tracking()
myAgent = Agent()
def PyAgent_Destructor ():
global myAgent
global track
track = Tracking()
myAgent = Agent()
print "PyAgent_Destructor"
def PyAgent_Initialize ():
print "PyAgent_Initialize"
global myAgent
global track
myAgent.Initialize()
def PyAgent_Process (stench,breeze,glitter,bump,scream):
global myAgent
percept = {'Stench': bool(stench), 'Breeze': bool(breeze), 'Glitter': bool(glitter), 'Bump': bool(bump), 'Scream': bool(scream)}
return myAgent.Process(percept)
def PyAgent_GameOver (score):
print "PyAgent_GameOver: score = " + str(score)
``` |
{
"source": "joepatten/wumpus_world_ai",
"score": 3
} |
#### File: joepatten/wumpus_world_ai/PyAgent.py
```python
import random
import Action
import Orientation
class Py_Agent:
def __init__(self):
self.step = 0
self.has_gold = False
self.has_arrow = True
self.orient = 0
self.location = [1, 1]
self.orientations = ['RIGHT', 'UP', 'LEFT', 'DOWN']
self.go_forward = False
def get_location(self):
return self.location
def get_orientation(self):
return self.orientations[self.orient]
def shoot_arrow(self):
self.has_arrow = False
def turn_left(self):
self.orient = (self.orient + 1) % 4
def turn_right(self):
self.orient = (self.orient - 1) % 4
def grab_gold(self):
self.has_gold = True
def should_climb(self):
return pa.has_gold and pa.location == [1, 1]
def update_location(self, bump):
orientation = self.get_orientation()
if bump != 1:
if orientation == 'RIGHT':
self.location[0] += 1
if orientation == 'LEFT':
self.location[0] -= 1
if orientation == 'UP':
self.location[1] += 1
if orientation == 'DOWN':
self.location[1] -= 1
self.go_forward = False
pa = Py_Agent()
def PyAgent_Constructor():
print("PyAgent_Constructor")
def PyAgent_Destructor():
print("PyAgent_Destructor")
def PyAgent_Initialize():
print("PyAgent_Initialize")
def PyAgent_Process(stench, breeze, glitter, bump, scream):
# 4) Update location if you have gone forward in last turn
if pa.go_forward:
pa.update_location(bump)
# 3a) Grab gold if see glitter
if (glitter == 1):
pa.grab_gold()
return Action.GRAB
# 3b) Climb out if at location [1, 1] and have gold
if pa.should_climb:
Action.CLIMB
# 3c) Shoot arrow if smell stench and if have arrow
if (stench == 1):
if pa.has_arrow:
pa.shoot_arrow()
return Action.SHOOT
# 3d) If bump into wall, randomly turn right or left
if (bump == 1):
random_draw = random.random()
if random_draw > .5:
pa.turn_left()
return Action.TURNLEFT
else:
pa.turn_right()
return Action.TURNRIGHT
# 3e) Else, go forward
pa.go_forward = True
return Action.GOFORWARD
def PyAgent_GameOver(score):
print("PyAgent_GameOver: score = " + str(score))
``` |
{
"source": "joe-peak/blog-site",
"score": 3
} |
#### File: joe-peak/blog-site/manage.py
```python
from datetime import datetime
from flask import Flask, render_template, url_for
from flask_script import Manager
from flask_bootstrap import Bootstrap
from flask_moment import Moment
app = Flask(__name__)
manager = Manager(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
@app.route('/', methods = ['GET'])
def index():
print(datetime.utcnow())
return render_template('index.html', name = 'Joe.C.Zhou', current_time = datetime.utcnow()), 200
@app.route('/index')
def user():
return 'Hello Flask'
@app.errorhandler(404)
def not_found():
return render_template('404.html'), 404
@app.errorhandler(403)
def not_found():
return render_template('403.html'), 403
@app.errorhandler(500)
def not_found():
return render_template('500.html'), 500
if __name__ == '__main__':
manager.run()
``` |
{
"source": "Joeper214/barm",
"score": 2
} |
#### File: app/behaviors/eventbehavior.py
```python
from ferris.core.ndb import Behavior
from app.models.allocation import Allocation
from plugins import calendar
from datetime import timedelta
import json
import datetime
class EventBehavior(Behavior):
def __init__(self, Model):
self.Model = Allocation
def after_delete(self, key):
pass
@classmethod
def isWeekend(cls, myDate):
return True if myDate.weekday() == 5 or myDate.weekday() == 6 else False
def after_put(self, instance):
params = {}
total_hours = instance.total_hours
alloc_params = Allocation.find_allocation(instance.allocation_id)
reflect_hours = Allocation.minusRemaining(alloc_params, total_hours)
params['name'] = alloc_params.resource_name
params['email'] = alloc_params.email
params['project_name'] = alloc_params.project_name
params['color'] = alloc_params.color
params['event_id'] = instance.key
myDate = instance.start_date
while total_hours > 0:
if self.isWeekend(myDate) is False:
conv_date = myDate.strftime('%Y-%m-%d')
params['start_date'] = conv_date
params['end'] = conv_date
if total_hours < instance.frequency:
params['summary'] = alloc_params.project_name+" ("+str(total_hours)+")"
params['alloc_hours'] = total_hours
Allocation.test_call(params)
else:
params['summary'] = alloc_params.project_name+" ("+ str(instance.frequency)+")"
total_hours -= instance.frequency
params['alloc_hours'] = instance.frequency
Allocation.test_call(params)
myDate += datetime.timedelta(days=1)
```
#### File: app/controllers/projects.py
```python
from ferris import Controller, messages, route_with
from ferris.components.pagination import Pagination
from app.models.project import Project
from datetime import timedelta
import json
import datetime
class Projects(Controller):
class Meta:
components = (messages.Messaging, Pagination,)
Model = Project
pagination_limit = 25
prefixes = ('api',)
@route_with('/api/projects/list', methods=['GET'])
def api_list(self):
self.context['data'] = self.components.pagination.paginate(query=Project.list_all())
@route_with('/api/projects/create', methods=['POST'])
def api_create(self):
params = json.loads(self.request.body)
hours = int(params['billable_hours'])
params['billable_hours'] = hours
params['remaining_hours'] = hours
params['start_date'] = datetime.datetime.utcfromtimestamp(float(params['start_date']))
self.context['data'] = Project.create(params)
@route_with('/api/projects/:<key>', methods=['GET'])
def api_get(self, key):
project = self.util.decode_key(key).get()
self.context['data'] = project
@route_with('/api/projects/:<key>', methods=['POST'])
def api_update(self, key):
params = json.loads(self.request.body)
keyS = self.util.decode_key(params['key']['urlsafe'])
Project.updateHours(params,keyS)
@route_with('/api/projects/:<key>', methods=['DELETE'])
def api_delete(self, key):
project = self.util.decode_key(key).get()
project.delete()
return 200
``` |
{
"source": "Joeper214/blueoil",
"score": 2
} |
#### File: blueoil/blueoil/blocks.py
```python
import tensorflow as tf
from blueoil.layers import batch_norm, conv2d
# TODO(wakisaka): should be replace to conv_bn_act().
def darknet(name, inputs, filters, kernel_size, is_training=tf.constant(False), activation=None, data_format="NHWC"):
"""Darknet19 block.
Do convolution, batch_norm, bias, leaky_relu activation.
Ref: https://arxiv.org/pdf/1612.08242.pdf
https://github.com/pjreddie/darknet/blob/3bf2f342c03b0ad22efd799d5be9990c9d792354/cfg/darknet19.cfg
https://github.com/pjreddie/darknet/blob/8215a8864d4ad07e058acafd75b2c6ff6600b9e8/cfg/yolo.2.0.cfg
"""
if data_format == "NCHW":
channel_data_format = "channels_first"
elif data_format == "NHWC":
channel_data_format = "channels_last"
else:
raise ValueError("data format must be 'NCHW' or 'NHWC'. got {}.".format(data_format))
with tf.compat.v1.variable_scope(name):
if activation is None:
def activation(x): return tf.nn.leaky_relu(x, alpha=0.1, name="leaky_relu")
conv = conv2d("conv", inputs, filters=filters, kernel_size=kernel_size,
activation=None, use_bias=False, data_format=channel_data_format,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(),) # he initializer
# TODO(wakisaka): Should be the same as darknet batch norm.
# https://github.com/tensorflow/tensorflow/blob/r1.1/tensorflow/contrib/layers/python/layers/layers.py
# https://github.com/pjreddie/darknet/blob/8215a8864d4ad07e058acafd75b2c6ff6600b9e8/src/batchnorm_layer.c#L135
batch_normed = batch_norm("bn", conv, is_training=is_training, decay=0.99, scale=True, center=True,
data_format=data_format)
tf.compat.v1.summary.histogram("batch_normed", batch_normed)
output = activation(batch_normed)
tf.compat.v1.summary.histogram("output", output)
return output
# TODO(wakisaka): should be replace to conv_bn_act().
def lmnet_block(
name,
inputs,
filters,
kernel_size,
custom_getter=None,
is_training=tf.constant(True),
activation=None,
use_bias=True,
use_batch_norm=True,
is_debug=False,
data_format='channels_last',
):
"""Block used in lmnet
Combine convolution, bias, weights quantization and activation quantization as one layer block.
Args:
name(str): Block name, as scope name.
inputs(tf.Tensor): Inputs.
filters(int): Number of filters for convolution.
kernel_size(int): Kernel size.
custom_getter(callable): Custom getter for `tf.compat.v1.variable_scope`.
is_training(tf.constant): Flag if training or not.
activation(callable): Activation function.
use_bias(bool): If use bias.
use_batch_norm(bool): If use batch norm.
is_debug(bool): If is debug.
data_format(string): channels_last for NHWC. channels_first for NCHW. Default is channels_last.
Returns:
tf.Tensor: Output of current layer block.
"""
with tf.compat.v1.variable_scope(name, custom_getter=custom_getter):
conv = tf.layers.conv2d(inputs, filters=filters, kernel_size=kernel_size, padding='SAME', use_bias=False,
data_format=data_format)
if use_batch_norm:
# TODO(wenhao) hw supports `tf.contrib.layers.batch_norm` currently. change it when supported.
# batch_normed = tf.layers.batch_normalization(conv,
# momentum=0.99,
# scale=True,
# center=True,
# training=is_training)
four_letter_data_format = 'NHWC' if data_format == 'channels_last' else 'NCHW'
batch_normed = tf.contrib.layers.batch_norm(conv,
decay=0.99,
scale=True,
center=True,
updates_collections=None,
is_training=is_training,
data_format=four_letter_data_format)
else:
batch_normed = conv
if use_bias:
bias = tf.get_variable('bias', shape=filters, initializer=tf.zeros_initializer)
biased = batch_normed + bias
else:
biased = batch_normed
if activation:
output = activation(biased)
else:
output = biased
if is_debug:
tf.compat.v1.summary.histogram('conv', conv)
tf.compat.v1.summary.histogram('batch_normed', batch_normed)
tf.compat.v1.summary.histogram('biased', biased)
tf.compat.v1.summary.histogram('output', output)
return output
def conv_bn_act(
name,
inputs,
filters,
kernel_size,
weight_decay_rate=0.0,
is_training=tf.constant(False),
activation=None,
batch_norm_decay=0.99,
data_format="NHWC",
enable_detail_summary=False,
):
"""Block of convolution -> batch norm -> activation.
Args:
name (str): Block name, as scope name.
inputs (tf.Tensor): Inputs.
filters (int): Number of filters (output channel) for convolution.
kernel_size (int): Kernel size.
weight_decay_rate (float): Number of L2 regularization be applied to convolution weights.
Need `tf.losses.get_regularization_loss()` in loss function to apply this parameter to loss.
is_training (tf.constant): Flag if training or not for batch norm.
activation (callable): Activation function.
batch_norm_decay (float): Batch norm decay rate.
data_format (string): Format for inputs data. NHWC or NCHW.
enable_detail_summary (bool): Flag for summarize feature maps for each operation on tensorboard.
Returns:
output (tf.Tensor): Output of this block.
"""
if data_format == "NCHW":
channel_data_format = "channels_first"
elif data_format == "NHWC":
channel_data_format = "channels_last"
else:
raise ValueError("data format must be 'NCHW' or 'NHWC'. got {}.".format(data_format))
with tf.compat.v1.variable_scope(name):
conved = tf.layers.conv2d(
inputs,
filters=filters,
kernel_size=kernel_size,
padding='SAME',
use_bias=False,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(), # he initializer
data_format=channel_data_format,
kernel_regularizer=tf.contrib.layers.l2_regularizer(weight_decay_rate),
)
batch_normed = tf.contrib.layers.batch_norm(
conved,
decay=batch_norm_decay,
updates_collections=None,
is_training=is_training,
center=True,
scale=True,
data_format=data_format,
)
if activation:
output = activation(batch_normed)
else:
output = batch_normed
if enable_detail_summary:
tf.compat.v1.summary.histogram('conv_output', conved)
tf.compat.v1.summary.histogram('batch_norm_output', batch_normed)
tf.compat.v1.summary.histogram('output', output)
return output
def _densenet_conv_bn_act(
name,
inputs,
growth_rate,
bottleneck_rate,
weight_decay_rate,
is_training,
activation,
batch_norm_decay,
data_format,
enable_detail_summary,
):
"""Densenet block.
In order to fast execute for quantization, use order of layers
convolution -> batch norm -> activation instead of paper original's batch norm -> activation -> convolution.
This is not `Dense block` called by original paper, this is the part of `Dense block`.
"""
bottleneck_channel = growth_rate * bottleneck_rate
with tf.compat.v1.variable_scope(name):
output_1x1 = conv_bn_act(
"bottleneck_1x1",
inputs,
filters=bottleneck_channel,
kernel_size=1,
weight_decay_rate=weight_decay_rate,
is_training=is_training,
activation=activation,
batch_norm_decay=batch_norm_decay,
data_format=data_format,
enable_detail_summary=enable_detail_summary,
)
output_3x3 = conv_bn_act(
"conv_3x3",
output_1x1,
filters=growth_rate,
kernel_size=3,
weight_decay_rate=weight_decay_rate,
is_training=is_training,
activation=activation,
batch_norm_decay=batch_norm_decay,
data_format=data_format,
enable_detail_summary=enable_detail_summary,
)
if data_format == "NHWC":
concat_axis = -1
if data_format == "NCHW":
concat_axis = 1
output = tf.concat([inputs, output_3x3], axis=concat_axis)
if enable_detail_summary:
tf.compat.v1.summary.histogram('output', output)
return output
def densenet_group(
name,
inputs,
num_blocks,
growth_rate,
bottleneck_rate=4,
weight_decay_rate=0.0,
is_training=tf.constant(False),
activation=None,
batch_norm_decay=0.99,
data_format="NHWC",
enable_detail_summary=False,
):
"""Group of Densenet blocks.
paper: https://arxiv.org/abs/1608.06993
In the original paper, this method is called `Dense block` which consists of some 1x1 and 3x3 conv blocks
which batch norm -> activation(relu) -> convolution(1x1) and batch norm -> activation -> convolution(3x3).
But in this method, the order of each block change to convolution -> batch norm -> activation.
Args:
name (str): Block name, as scope name.
inputs (tf.Tensor): Inputs.
num_blocks (int): Number of dense blocks which consist of 1x1 and 3x3 conv.
growth_rate (int): How many filters (out channel) to add each layer.
bottleneck_rate (int): The factor to be calculated bottle-neck 1x1 conv output channel.
`bottleneck_channel = growth_rate * bottleneck_rate`.
The default value `4` is from original paper.
weight_decay_rate (float): Number of L2 regularization be applied to convolution weights.
is_training (tf.constant): Flag if training or not.
activation (callable): Activation function.
batch_norm_decay (float): Batch norm decay rate.
enable_detail_summary (bool): Flag for summarize feature maps for each operation on tensorboard.
data_format (string): Format for inputs data. NHWC or NCHW.
Returns:
tf.Tensor: Output of current block.
"""
with tf.compat.v1.variable_scope(name):
x = inputs
for i in range(num_blocks):
x = _densenet_conv_bn_act(
"densenet_block_{}".format(i),
x,
growth_rate,
bottleneck_rate,
weight_decay_rate,
is_training,
activation,
batch_norm_decay,
data_format,
enable_detail_summary,
)
return x
```
#### File: blueoil/cmd/export.py
```python
import os
import shutil
import click
import PIL
import numpy as np
import tensorflow as tf
from blueoil import environment
from blueoil.utils.image import load_image
from blueoil.utils import config as config_util
from blueoil.utils import executor
DEFAULT_INFERENCE_TEST_DATA_IMAGE = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"fixtures", "export_inference_test_data_images",
"5605039097_05baa93bfd_m.jpg")
# TODO(wakisaka): duplicated function with blueoil/cmd/measure_latency.py
def _pre_process(raw_image, pre_processor, data_format):
image = pre_processor(image=raw_image)['image']
if data_format == 'NCHW':
image = np.transpose(image, [2, 0, 1])
return image
def _save_all_operation_outputs(image_path, output_dir, image, raw_image, all_outputs, image_size):
shutil.copy(image_path, os.path.join(output_dir))
tmp_image = PIL.Image.open(image_path)
tmp_image.save(os.path.join(output_dir, "raw_image.png"))
np.save(os.path.join(output_dir, "raw_image.npy"), raw_image)
np.save(os.path.join(output_dir, "preprocessed_image.npy"), image)
for _output in all_outputs:
np.save(os.path.join(output_dir, "{}.npy".format(_output['name'])), _output['val'])
def _minimal_operations(sess):
"""Get inference operations."""
minimal_graph_def = executor.convert_variables_to_constants(sess)
minimal_graph = tf.Graph()
with minimal_graph.as_default():
tf.import_graph_def(minimal_graph_def, name="")
ops = minimal_graph.get_operations()
return ops
def _export(config, restore_path, image_path):
if restore_path is None:
restore_file = executor.search_restore_filename(environment.CHECKPOINTS_DIR)
restore_path = os.path.join(environment.CHECKPOINTS_DIR, restore_file)
print("Restore from {}".format(restore_path))
if not os.path.exists("{}.index".format(restore_path)):
raise Exception("restore file {} dont exists.".format(restore_path))
output_root_dir = os.path.join(environment.EXPERIMENT_DIR, "export")
output_root_dir = os.path.join(output_root_dir, os.path.basename(restore_path))
if not os.path.exists(output_root_dir):
os.makedirs(output_root_dir)
graph = tf.Graph()
ModelClass = config.NETWORK_CLASS
network_kwargs = dict((key.lower(), val) for key, val in config.NETWORK.items())
with graph.as_default():
model = ModelClass(
classes=config.CLASSES,
is_debug=config.IS_DEBUG,
**network_kwargs,
)
is_training = tf.constant(False, name="is_training")
images_placeholder, _ = model.placeholders()
model.inference(images_placeholder, is_training)
init_op = tf.compat.v1.global_variables_initializer()
saver = tf.compat.v1.train.Saver(max_to_keep=50)
session_config = tf.compat.v1.ConfigProto()
sess = tf.compat.v1.Session(graph=graph, config=session_config)
sess.run(init_op)
saver.restore(sess, restore_path)
main_output_dir = os.path.join(output_root_dir, "{}x{}".format(config.IMAGE_SIZE[0], config.IMAGE_SIZE[1]))
if not os.path.exists(main_output_dir):
os.makedirs(main_output_dir)
# save inference values as npy files for runtime inference test and debug.
if image_path:
all_ops = _minimal_operations(sess)
inference_values_output_dir = os.path.join(main_output_dir, "inference_test_data")
if not os.path.exists(inference_values_output_dir):
os.makedirs(inference_values_output_dir)
raw_image = load_image(image_path)
image = _pre_process(raw_image, config.PRE_PROCESSOR, config.DATA_FORMAT)
images = np.expand_dims(image, axis=0)
feed_dict = {
images_placeholder: images,
}
all_outputs = []
index = 0
for op in all_ops:
for op_output in op.outputs:
# HACK: This is for TensorFlow bug workaround.
# We can remove following 4 lines once it's been resolved in TensorFlow
# Issue link: https://github.com/tensorflow/tensorflow/issues/36456
if (not tf.config.experimental.list_physical_devices('GPU')
and "FusedBatchNormV3" in op_output.name
and int(op_output.name.split(":")[1]) in set(range(1, 6))):
continue
val = sess.run(op_output.name, feed_dict=feed_dict)
name = '%03d' % index + '_' + op_output.name.replace('/', '_')
all_outputs.append({'val': val, 'name': name})
index += 1
_save_all_operation_outputs(
image_path, inference_values_output_dir, image, raw_image, all_outputs, config.IMAGE_SIZE)
yaml_names = config_util.save_yaml(main_output_dir, config)
pb_name = executor.save_pb_file(sess, main_output_dir)
message = """
Create pb and yaml files in: {}
pb: {}
yaml: {}, {}
""".format(main_output_dir,
pb_name,
*yaml_names)
if image_path:
message += "Create npy files in under `inference_test_data` folder \n"
message += "npy: {}".format([d["name"] for d in all_outputs] + ["raw_image", "preprocessed_image", ])
print(message)
print("finish")
return main_output_dir
def run(experiment_id,
restore_path=None,
image_size=(None, None),
image=DEFAULT_INFERENCE_TEST_DATA_IMAGE,
config_file=None):
environment.init(experiment_id)
config = config_util.load_from_experiment()
if config_file:
config = config_util.merge(config, config_util.load(config_file))
config.BATCH_SIZE = 1
config.NETWORK.BATCH_SIZE = 1
config.DATASET.BATCH_SIZE = 1
if list(image_size) != [None, None]:
config.IMAGE_SIZE = list(image_size)
config.NETWORK.IMAGE_SIZE = list(image_size)
# override pre processes image size.
if config.PRE_PROCESSOR:
config.PRE_PROCESSOR.set_image_size(image_size)
# override post processes image size.
if config.POST_PROCESSOR:
config.POST_PROCESSOR.set_image_size(image_size)
print("Override IMAGE_SIZE", config.IMAGE_SIZE)
executor.init_logging(config)
config_util.display(config)
return _export(config, restore_path, image)
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option(
"-i",
"--experiment_id",
help="id of this experiment.",
required=True,
)
@click.option(
"--restore_path",
help="restore ckpt file base path. e.g. saved/experiment/checkpoints/save.ckpt-10001",
default=None,
)
@click.option(
'--image_size',
nargs=2,
type=click.Tuple([int, int]),
help="input image size height and width. if it is not provided, it restore from saved experiment config. "
"e.g. --image_size 320 320",
# NOQA
default=(None, None),
)
@click.option(
"--image",
help="path of target image",
default=DEFAULT_INFERENCE_TEST_DATA_IMAGE,
)
@click.option(
"-c",
"--config_file",
help="config file path. override saved experiment config.",
)
def main(experiment_id, restore_path, image_size, image, config_file):
"""Exporting a trained model to proto buffer files and meta config yaml.
In the case with `image` option, create each layer output value npy files into
`export/{restore_path}/{image_size}/inference_test_data/**.npy` as expected value for inference test and debug.
"""
run(experiment_id, restore_path, image_size, image, config_file)
if __name__ == '__main__':
main()
```
#### File: blueoil/cmd/init.py
```python
from collections import OrderedDict
import inspect
import re
import shutil
import inquirer
import blueoil.data_augmentor as augmentor
from blueoil.generate_lmnet_config import generate
from blueoil.data_processor import Processor
task_type_choices = [
'classification',
'object_detection',
'semantic_segmentation',
'keypoint_detection'
]
classification_network_definitions = [
{
'name': 'LmnetV1Quantize',
'desc': 'Quantized Lmnet version 1. Accuracy is better than LmnetV0Quantize.',
},
{
'name': 'ResNetQuantize',
'desc': 'Quantized ResNet 18. Accuracy is better than LmnetV1Quantize.',
},
]
object_detection_network_definitions = [
{
'name': 'LMFYoloQuantize',
'desc': 'YOLO-like object detection network.',
},
]
semantic_segmentation_network_definitions = [
{
'name': 'LmSegnetV1Quantize',
'desc': 'Quantized LeapMind original semantic segmentation network, version 1.',
},
]
keypoint_detection_network_definitions = [
{
'name': 'LmSinglePoseV1Quantize',
'desc': 'Quantized LeapMind original single-person pose estimation network, version 1.',
},
]
IMAGE_SIZE_VALIDATION = {
"LmnetV1Quantize": {
"max_size": 512,
"divider": 16,
},
"ResNetQuantize": {
"max_size": 512,
"divider": 16,
},
"LMFYoloQuantize": {
"max_size": 480,
"divider": 32,
},
"LmSegnetV1Quantize": {
"max_size": 512,
"divider": 8,
},
"LmSinglePoseV1Quantize": {
"max_size": 512,
"divider": 8,
},
}
classification_dataset_formats = [
{
'name': 'Caltech101',
'desc': 'Caletch101 compatible',
},
{
'name': 'DeLTA-Mark for Classification',
'desc': 'Dataset for classification created by DeLTA-Mark',
},
]
object_detection_dataset_formats = [
{
'name': 'OpenImagesV4',
'desc': 'OpenImagesV4 compatible',
},
{
'name': 'DeLTA-Mark for Object Detection',
'desc': 'Dataset for object detection created by DeLTA-Mark',
},
]
semantic_segmentation_dataset_formats = [
{
'name': 'CamvidCustom',
'desc': 'CamVid base cumstom format',
},
]
keypoint_detection_dataset_formats = [
{
'name': 'Mscoco for Single-Person Pose Estimation',
'desc': 'Mscoco 2017 for Single-Person Pose Estimation',
},
]
learning_rate_schedule_map = OrderedDict([
("constant", "'constant' -> constant learning rate."),
("cosine", "'cosine' -> cosine learning rate."),
("2-step-decay", "'2-step-decay' -> learning rate decrease by 1/10 on {epochs}/2 and {epochs}-1."),
("3-step-decay", "'3-step-decay' -> learning rate decrease by 1/10 on {epochs}/3 and {epochs}*2/3 and {epochs}-1"),
(
"3-step-decay-with-warmup",
"'3-step-decay-with-warmup' -> "
"warmup learning rate 1/1000 in first epoch, then train the same way as '3-step-decay'"
),
])
def network_name_choices(task_type):
if task_type == 'classification':
return [definition['name'] for definition in classification_network_definitions]
elif task_type == 'object_detection':
return [definition['name'] for definition in object_detection_network_definitions]
elif task_type == 'semantic_segmentation':
return [definition['name'] for definition in semantic_segmentation_network_definitions]
elif task_type == 'keypoint_detection':
return [definition['name'] for definition in keypoint_detection_network_definitions]
def dataset_format_choices(task_type):
if task_type == 'classification':
return [definition['name'] for definition in classification_dataset_formats]
elif task_type == 'object_detection':
return [definition['name'] for definition in object_detection_dataset_formats]
elif task_type == 'semantic_segmentation':
return [definition['name'] for definition in semantic_segmentation_dataset_formats]
elif task_type == 'keypoint_detection':
return [definition['name'] for definition in keypoint_detection_dataset_formats]
def default_batch_size(task_type):
default_task_type_batch_sizes = {
'classification': '10',
'object_detection': '16',
'semantic_segmentation': '8',
'keypoint_detection': '4',
}
return default_task_type_batch_sizes[task_type]
def prompt(question):
"""Execute prompt answer
Args:
question (list): list of inquirer question
Returns: string of answer
"""
answers = inquirer.prompt(question)
return answers['value']
def generate_image_size_validate(network_name):
"""Generate image_size_validate depending on task_type.
Args:
network_name (string): network name.
Returns: validate function.
"""
max_size = IMAGE_SIZE_VALIDATION[network_name]["max_size"]
divider = IMAGE_SIZE_VALIDATION[network_name]["divider"]
def image_size_validate(answers, current):
# change to tuple (height, width).
image_size = image_size_filter(current)
image_size = (int(size) for size in image_size)
for size in image_size:
if not size % divider == 0:
raise inquirer.errors.ValidationError('',
reason="Image size should be multiple of {}, but image size is {}"
.format(divider, current))
if size > max_size:
raise inquirer.errors.ValidationError('',
reason="Image size should be lower than {} but image size is {}"
.format(max_size, current))
return True
return image_size_validate
def integer_validate(answers, current):
if not current.isdigit():
raise inquirer.errors.ValidationError('', reason='Input value should be integer')
return True
def image_size_filter(raw):
match = re.match(r"([0-9]+)[^0-9]+([0-9]+)", raw)
# raw: 128x128 -> ('128', '128')
image_size = match.groups()
return image_size
def save_config(blueoil_config, output=None):
if not output:
output = blueoil_config['model_name'] + ".py"
tmpfile = generate(blueoil_config)
shutil.copy(tmpfile, output)
return output
def ask_questions():
model_name_question = [
inquirer.Text(
name='value',
message='your model name ()')
]
model_name = prompt(model_name_question)
task_type_question = [
inquirer.List(name='value',
message='choose task type',
choices=task_type_choices)
]
task_type = prompt(task_type_question)
network_name_question = [
inquirer.List(name='value',
message='choose network',
choices=network_name_choices(task_type))
]
network_name = prompt(network_name_question)
dataset_format_question = [
inquirer.List(name='value',
message='choose dataset format',
choices=dataset_format_choices(task_type))
]
dataset_format = prompt(dataset_format_question)
enable_data_augmentation = [
inquirer.Confirm(name='value',
message='enable data augmentation?',
default=True)
]
train_dataset_path_question = [
inquirer.Text(name='value',
message='training dataset path')
]
train_path = prompt(train_dataset_path_question)
enable_test_dataset_path_question = [
inquirer.List(name='value',
message='set validation dataset?'
' (if answer no, the dataset will be separated for training and validation'
' by 9:1 ratio.)',
choices=['yes', 'no'])
]
enable_test_dataset_path = prompt(enable_test_dataset_path_question)
test_dataset_path_question = [
inquirer.Text(name='value',
message='validation dataset path')
]
if enable_test_dataset_path == 'yes':
test_path = prompt(test_dataset_path_question)
else:
test_path = ''
batch_size_question = [
inquirer.Text(name='value',
message='batch size (integer)',
default=default_batch_size(task_type),
validate=integer_validate)
]
batch_size = prompt(batch_size_question)
image_size_question = [
inquirer.Text(name='value',
message='image size (integer x integer)',
default='128x128',
validate=generate_image_size_validate(network_name))
]
image_size = image_size_filter(prompt(image_size_question))
training_epochs_question = [
inquirer.Text(name='value',
message='how many epochs do you run training (integer)',
default='100',
validate=integer_validate)
]
training_epochs = prompt(training_epochs_question)
training_optimizer_question = [
inquirer.List(name='value',
message='select optimizer',
choices=['Momentum', 'Adam'],
default='Momentum')
]
training_optimizer = prompt(training_optimizer_question)
initial_learning_rate_value_question = [
inquirer.Text(name='value',
message='initial learning rate',
default='0.001')
]
initial_learning_rate_value = prompt(initial_learning_rate_value_question)
# learning rate schedule
learning_rate_schedule_question = [
inquirer.List(name='value',
message='choose learning rate schedule'
' ({{epochs}} is the number of training epochs you entered before)',
choices=list(learning_rate_schedule_map.values()),
default=learning_rate_schedule_map["constant"])
]
_tmp_learning_rate_schedule = prompt(learning_rate_schedule_question)
for key, value in learning_rate_schedule_map.items():
if value == _tmp_learning_rate_schedule:
learning_rate_schedule = key
data_augmentation = {}
if prompt(enable_data_augmentation):
all_augmentor = {}
checkboxes = []
for name, obj in inspect.getmembers(augmentor):
if inspect.isclass(obj) and issubclass(obj, Processor):
argspec = inspect.getfullargspec(obj)
# ignore self
args = argspec.args[1:]
defaults = argspec.defaults
if len(args) == len(defaults):
default_val = [(arg, default) for arg, default in zip(args, defaults)]
default_str = " (default: {})".format(", ".join(["{}={}".format(a, d) for a, d in default_val]))
else:
defaults = ("# Please fill a value.",) * (len(args) - len(defaults)) + defaults
default_val = [(arg, default) for arg, default in zip(args, defaults)]
default_str = " (**caution**: No default value is provided, \
please modify manually after config exported.)"
all_augmentor[name + default_str] = {"name": name, "defaults": default_val}
checkboxes.append(name + default_str)
data_augmentation_question = [
inquirer.Checkbox(name='value',
message='Please choose augmentors',
choices=checkboxes)
]
data_augmentation_res = prompt(data_augmentation_question)
if data_augmentation_res:
for v in data_augmentation_res:
data_augmentation[all_augmentor[v]["name"]] = all_augmentor[v]["defaults"]
quantize_first_convolution_question = [
inquirer.Confirm(name='value',
message='apply quantization at the first layer?',
default=True)
]
quantize_first_convolution = prompt(quantize_first_convolution_question)
return {
'model_name': model_name,
'task_type': task_type,
'network_name': network_name,
'network': {
'quantize_first_convolution': quantize_first_convolution,
},
'dataset': {
'format': dataset_format,
'train_path': train_path,
'test_path': test_path,
},
'trainer': {
'batch_size': int(batch_size),
'epochs': int(training_epochs),
'optimizer': training_optimizer,
'learning_rate_schedule': learning_rate_schedule,
'initial_learning_rate': float(initial_learning_rate_value),
'save_checkpoint_steps': 1000,
'keep_checkpoint_max': 5,
},
'common': {
'image_size': [int(val) for val in image_size],
'pretrain_model': False,
'dataset_prefetch': True,
'data_augmentation': data_augmentation,
},
}
```
#### File: networks/classification/lm_resnet.py
```python
import functools
import tensorflow as tf
from blueoil.networks.classification.base import Base
from blueoil.layers import fully_connected
class LmResnet(Base):
"""Residual network (ResNet) of 18-layers for classification
This ResNet-18 is modified from the ImageNet version of ResNet-18 of the original paper
Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
- The first layer is 3x3 convolution layer with stride 1 instead of 7x7 conv with stride 2,
like the CIFAR-10 version of ResNet-18 in the paper.
- The 3x3 max pooling with stride 2 is not used in this architecture.
- In each residual block, batch normalization (BN) is after the add, to be specific, a
pre-activation variant of residual block is used.
- Utilizing Space-to-Depth operator for each the transition layer, convolution op with
strides of 2 is replaced with space-to-depth.
- Note currently this ResNet-18 only supports NHWC data format.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.data_format == 'NHWC'
self.custom_getter = None
self.activation = tf.nn.relu
self.init_ch = 64
self.num_blocks = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
}[18]
@staticmethod
def _batch_norm(inputs, training):
return tf.contrib.layers.batch_norm(
inputs,
decay=0.997,
updates_collections=None,
is_training=training,
activation_fn=None,
center=True,
scale=True)
@staticmethod
def _conv2d_fix_padding(inputs, filters, kernel_size, strides):
"""Convolution layer deals with stride of 2"""
if strides == 2:
inputs = tf.space_to_depth(inputs, block_size=2, name="pool")
return tf.layers.conv2d(
inputs, filters, kernel_size,
padding="SAME",
kernel_initializer=tf.contrib.layers.xavier_initializer(),
use_bias=False)
def basicblock(self, x, out_ch, strides, training):
"""Basic building block of single residual function"""
in_ch = x.get_shape().as_list()[1 if self.data_format in {'NCHW', 'channels_first'} else 3]
shortcut = x
x = self._batch_norm(x, training)
x = self.activation(x)
x = self._conv2d_fix_padding(x, out_ch, 3, strides)
x = self._batch_norm(x, training)
x = self.activation(x)
x = self._conv2d_fix_padding(x, out_ch, 3, 1)
if strides == 2:
shortcut = tf.nn.avg_pool(shortcut, ksize=[1, strides, strides, 1],
strides=[1, strides, strides, 1], padding='VALID')
if in_ch != out_ch:
shortcut = tf.pad(shortcut, [[0, 0], [0, 0], [0, 0],
[(out_ch - in_ch) // 2, (out_ch - in_ch + 1) // 2]])
return shortcut + x
def resnet_group(self, x, out_ch, count, strides, training, name):
with tf.compat.v1.variable_scope(name, custom_getter=self.custom_getter):
for i in range(0, count):
with tf.compat.v1.variable_scope('block{}'.format(i)):
x = self.basicblock(x, out_ch,
strides if i == 0 else 1,
training)
return x
def base(self, images, is_training):
"""Base network.
Args:
images: Input images.
is_training: A flag for if it is training or not.
Returns:
tf.Tensor: Inference result.
"""
self.images = images
x = self._conv2d_fix_padding(images, self.init_ch, 3, 1)
x = self.resnet_group(x, self.init_ch * 1, self.num_blocks[0], 1, is_training, 'group0')
x = self.resnet_group(x, self.init_ch * 2, self.num_blocks[1], 2, is_training, 'group1')
x = self.resnet_group(x, self.init_ch * 4, self.num_blocks[2], 2, is_training, 'group2')
x = self.resnet_group(x, self.init_ch * 8, self.num_blocks[3], 2, is_training, 'group3')
x = self._batch_norm(x, is_training)
x = tf.nn.relu(x)
# global average pooling
h = x.get_shape()[1].value
w = x.get_shape()[2].value
x = tf.layers.average_pooling2d(name="gap", inputs=x, pool_size=[h, w], padding="VALID", strides=1)
output = fully_connected("linear", x, filters=self.num_classes, activation=None)
return output
class LmResnetQuantize(LmResnet):
version = 1.0
def __init__(
self,
activation_quantizer=None,
activation_quantizer_kwargs=None,
weight_quantizer=None,
weight_quantizer_kwargs=None,
*args,
**kwargs
):
super().__init__(
*args,
**kwargs
)
assert weight_quantizer
assert activation_quantizer
activation_quantizer_kwargs = activation_quantizer_kwargs if activation_quantizer_kwargs is not None else {}
weight_quantizer_kwargs = weight_quantizer_kwargs if weight_quantizer_kwargs is not None else {}
self.activation = activation_quantizer(**activation_quantizer_kwargs)
weight_quantization = weight_quantizer(**weight_quantizer_kwargs)
self.custom_getter = functools.partial(self._quantized_variable_getter,
weight_quantization=weight_quantization)
@staticmethod
def _quantized_variable_getter(getter, name, weight_quantization=None, *args, **kwargs):
"""Get the quantized variables.
Use if to choose or skip the target should be quantized.
Args:
getter: Default from tensorflow.
name: Default from tensorflow.
weight_quantization: Callable object which quantize variable.
args: Args.
kwargs: Kwargs.
"""
assert callable(weight_quantization)
var = getter(name, *args, **kwargs)
with tf.compat.v1.variable_scope(name):
# Apply weight quantize to variable whose last word of name is "kernel".
if "kernel" == var.op.name.split("/")[-1]:
return weight_quantization(var)
return var
```
#### File: tests/device_tests/test_device_e2e.py
```python
import copy
import glob
import os
import sys
import unittest
class DeviceE2eTest(unittest.TestCase):
"""Test Case of Device E2E Test."""
sys_path_default = copy.deepcopy(sys.path)
def _get_param(self, test_case, input_path, lib_name):
test_case_dir = os.path.join(input_path, test_case)
output_dir = glob.glob(os.path.join(test_case_dir, "export/*/*/output"))
if output_dir:
output_dir = output_dir[0]
else:
message = "No such directory: '{}'".format(os.path.join(test_case_dir, "export/*/*/output"))
raise FileNotFoundError(message) if sys.version_info.major == 3 else IOError(message)
test_data_dir = os.path.join(os.path.dirname(output_dir), "inference_test_data")
model_dir = os.path.join(output_dir, "models")
lib_dir = os.path.join(model_dir, "lib")
return {
"python_path": os.path.join(output_dir, "python"),
'image': os.path.join(test_data_dir, "raw_image.png"),
'model': os.path.join(lib_dir, lib_name),
'config': os.path.join(model_dir, "meta.yaml"),
}
def _get_test_cases(self, input_path, lib_name):
return [[case, self._get_param(case, input_path, lib_name)] for case in os.listdir(input_path)]
def _run(self, python_path, image, model, config):
sys.path = copy.deepcopy(self.sys_path_default) + [python_path]
from run import run_prediction
run_prediction(image, model, config)
self.assertTrue(os.path.exists(os.path.join('output', "output.json")))
def test_run(self):
input_path = os.environ['DEVICE_TEST_INPUT_PATH']
lib_name = os.environ['DEVICE_TEST_LIB_NAME']
test_cases = self._get_test_cases(input_path, lib_name)
for test_case_name, params in test_cases:
print("Testing case: {}".format(test_case_name))
if sys.version_info.major == 2:
self._run(**params)
else:
with self.subTest(test_case_name=test_case_name, params=params):
self._run(**params)
if __name__ == "__main__":
unittest.main()
```
#### File: unit/datasets_tests/test_open_images_v4.py
```python
import numpy as np
import pytest
from blueoil.pre_processor import Resize, ResizeWithGtBoxes
from blueoil.datasets.open_images_v4 import OpenImagesV4BoundingBox
from blueoil.datasets.open_images_v4 import OpenImagesV4Classification
from blueoil.datasets.open_images_v4 import OpenImagesV4BoundingBoxBase
from blueoil.datasets.dataset_iterator import DatasetIterator
# Apply set_test_environment() in conftest.py to all tests in this file.
pytestmark = pytest.mark.usefixtures("set_test_environment")
def test_open_images_v4_classification():
batch_size = 1
image_size = [256, 256]
dataset = OpenImagesV4Classification(batch_size=batch_size,
pre_processor=Resize(image_size))
dataset = DatasetIterator(dataset)
for _ in range(5):
images, labels = dataset.feed()
assert isinstance(images, np.ndarray)
assert images.shape[0] == batch_size
assert images.shape[1] == image_size[0]
assert images.shape[2] == image_size[1]
assert images.shape[3] == 3
assert isinstance(labels, np.ndarray)
assert labels.shape[0] == batch_size
assert labels.shape[1] == dataset.num_classes
def _show_images_with_boxes(images, labels):
"""show image for debug"""
import PIL.Image
import PIL.ImageDraw
import time
images_min = abs(images.min())
images_max = (images + images_min).max()
images = (images + images_min) * (255 / images_max)
images = (images).astype(np.uint8)
for image, label in zip(images, labels):
image = PIL.Image.fromarray(image)
draw = PIL.ImageDraw.Draw(image)
for box in label:
xy = [box[0], box[1], box[0] + box[2], box[1] + box[3]]
draw.rectangle(xy)
image.show()
time.sleep(1.5)
def test_open_images_v4_object_detection():
batch_size = 1
image_size = [256, 256]
dataset = OpenImagesV4BoundingBox(batch_size=batch_size,
pre_processor=ResizeWithGtBoxes(image_size))
dataset = DatasetIterator(dataset)
num_max_boxes = dataset.num_max_boxes
assert dataset.num_max_boxes == OpenImagesV4BoundingBox.count_max_boxes()
for _ in range(5):
images, labels = dataset.feed()
# _show_images_with_boxes(images, labels)
assert isinstance(images, np.ndarray)
assert images.shape[0] == batch_size
assert images.shape[1] == image_size[0]
assert images.shape[2] == image_size[1]
assert images.shape[3] == 3
assert isinstance(labels, np.ndarray)
assert labels.shape[0] == batch_size
assert labels.shape[1] == num_max_boxes
assert labels.shape[2] == 5
class Dummy(OpenImagesV4BoundingBoxBase):
extend_dir = "custom_open_images_v4_bounding_boxes/for_train"
def test_custom_open_images_v4_object_detection():
validation_size = 0.2
batch_size = 1
image_size = [256, 128]
train_dataset = Dummy(batch_size=batch_size,
validation_size=validation_size,
pre_processor=ResizeWithGtBoxes(image_size))
train_dataset = DatasetIterator(train_dataset)
validation_dataset = Dummy(batch_size=batch_size,
subset="validation",
validation_size=validation_size,
pre_processor=ResizeWithGtBoxes(image_size))
validation_dataset = DatasetIterator(validation_dataset)
num_max_boxes = train_dataset.num_max_boxes
assert train_dataset.num_max_boxes == Dummy.count_max_boxes()
assert train_dataset.num_per_epoch == 10 * (1 - validation_size)
assert validation_dataset.num_per_epoch == 10 * (validation_size)
for _ in range(13):
images, labels = train_dataset.feed()
# _show_images_with_boxes(images, labels)
assert isinstance(images, np.ndarray)
assert images.shape[0] == batch_size
assert images.shape[1] == image_size[0]
assert images.shape[2] == image_size[1]
assert images.shape[3] == 3
assert isinstance(labels, np.ndarray)
assert labels.shape[0] == batch_size
assert labels.shape[1] == num_max_boxes
assert labels.shape[2] == 5
class DummyHasValidation(OpenImagesV4BoundingBoxBase):
extend_dir = "custom_open_images_v4_bounding_boxes/for_train"
validation_extend_dir = "custom_open_images_v4_bounding_boxes/for_validation"
def test_custom_has_validation_open_images_v4_object_detection():
batch_size = 8
image_size = [196, 128]
train_dataset = DummyHasValidation(subset="train", batch_size=batch_size,
pre_processor=ResizeWithGtBoxes(image_size))
train_dataset = DatasetIterator(train_dataset)
validation_dataset = DummyHasValidation(subset="validation", batch_size=batch_size,
pre_processor=ResizeWithGtBoxes(image_size))
validation_dataset = DatasetIterator(validation_dataset)
num_max_boxes = validation_dataset.num_max_boxes
assert validation_dataset.num_max_boxes == DummyHasValidation.count_max_boxes()
assert train_dataset.num_per_epoch == 10
assert validation_dataset.num_per_epoch == 16
assert len(train_dataset.classes) == 44
assert len(validation_dataset.classes) == 44
for _ in range(3):
images, labels = train_dataset.feed()
# _show_images_with_boxes(images, labels)
assert isinstance(images, np.ndarray)
assert images.shape[0] == batch_size
assert images.shape[1] == image_size[0]
assert images.shape[2] == image_size[1]
assert images.shape[3] == 3
assert isinstance(labels, np.ndarray)
assert labels.shape[0] == batch_size
assert labels.shape[1] == num_max_boxes
assert labels.shape[2] == 5
for _ in range(3):
images, labels = validation_dataset.feed()
# _show_images_with_boxes(images, labels)
assert isinstance(images, np.ndarray)
assert images.shape[0] == batch_size
assert images.shape[1] == image_size[0]
assert images.shape[2] == image_size[1]
assert images.shape[3] == 3
assert isinstance(labels, np.ndarray)
assert labels.shape[0] == batch_size
assert labels.shape[1] == num_max_boxes
assert labels.shape[2] == 5
if __name__ == '__main__':
from blueoil.environment import setup_test_environment
setup_test_environment()
test_open_images_v4_classification()
test_open_images_v4_object_detection()
test_custom_open_images_v4_object_detection()
test_custom_has_validation_open_images_v4_object_detection()
```
#### File: tests/unit/test_visualize.py
```python
from easydict import EasyDict
import numpy as np
import PIL.Image
from blueoil.visualize import (
draw_fps,
visualize_classification,
visualize_object_detection,
visualize_semantic_segmentation,
visualize_keypoint_detection,
)
def test_draw_fps():
"""Verify just image is changed."""
pil_image = PIL.Image.new("RGB", size=(100, 200))
stored = np.array(pil_image)
fps = 11.1
fps_only_network = 22.2
draw_fps(pil_image, fps, fps_only_network)
assert not np.all(np.array(stored) == np.array(pil_image))
def test_classification():
"""Verify just image is changed."""
input_image = PIL.Image.new("RGB", size=(100, 200))
results = np.array([0.1, 0.3, 0.4, 0.2])
config = EasyDict({"CLASSES": ["a", "b", "c", "d"]})
result_image = visualize_classification(np.array(input_image), results, config)
assert not np.all(np.array(input_image) == np.array(result_image))
def test_object_detection():
"""Verify just image is changed."""
input_image = PIL.Image.new("RGB", size=(100, 200))
results = np.array([[32, 20, 10, 5, 2, 0.5], [2, 4, 2, 4, 1, 0.5]])
config = EasyDict({"IMAGE_SIZE": (64, 64), "CLASSES": ["a", "b", "c", "d"]})
result_image = visualize_object_detection(np.array(input_image), results, config)
assert not np.all(np.array(input_image) == np.array(result_image))
def test_semantic_segmentation():
"""Verify just image is changed."""
input_image = PIL.Image.new("RGB", size=(100, 200))
results = np.random.random_sample(size=(64, 64, 4))
config = EasyDict({"IMAGE_SIZE": (64, 64), "CLASSES": ["a", "b", "c", "d"]})
result_image = visualize_semantic_segmentation(np.array(input_image), results, config)
assert not np.all(np.array(input_image) == np.array(result_image))
def test_keypoint_detection():
"""Verify just image is changed."""
input_image = PIL.Image.new("RGB", size=(100, 200))
joints = np.zeros(shape=(17, 3), dtype=np.int)
joints[0] = [30, 30, 1]
result_image = visualize_keypoint_detection(np.array(input_image), joints)
assert not np.all(np.array(input_image) == np.array(result_image))
if __name__ == '__main__':
test_draw_fps()
test_classification()
test_object_detection()
test_semantic_segmentation()
test_keypoint_detection()
``` |
{
"source": "Joeper214/codex-for-all",
"score": 3
} |
#### File: app/components/cookie.py
```python
from webapp2_extras.securecookie import SecureCookieSerializer
from app.settings import settings
SECRET_KEY = settings['app_config']['webapp2_extras.sessions']['secret_key']
class CookieHelper():
cookie = None
max_age_dir = {}
def __init__(self, secret_key=None):
if secret_key:
self.cookie = SecureCookieSerializer(secret_key)
else:
self.cookie = SecureCookieSerializer(SECRET_KEY)
def _serialize(self, name, value):
return self.cookie.serialize(name, value)
def _deserialize(self, name, value, max_age=None):
return self.cookie.deserialize(name, value, max_age=None)
def get(self, controller, name, encrypted=True):
value = controller.request.cookies.get(name)
max_age = None
if name in self.max_age_dir:
max_age = self.max_age_dir[name]
if encrypted:
return self._deserialize(name, value, max_age)
return value
def write(self, controller, name, value, max_age=None, path='/', domain=None, secure=False, encrypted=True):
# Saves a cookie in the client.
if encrypted:
value = self._serialize(name, value)
if max_age:
self.max_age_dir[name] = max_age
controller.response.set_cookie(name, value, max_age=max_age, path=path, domain=domain, secure=secure)
def delete(self, controller, name):
# Deletes a cookie previously set in the client.
controller.response.delete_cookie(name)
def unset(self, controller, name):
# Cancels a cookie previously set in the response.
controller.response.unset_cookie(name)
```
#### File: app/components/twilio_rest.py
```python
from twilio.rest import TwilioRestClient
from ferris import settings
class TwilioRest(object):
def __init__(self):
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = settings.get('twilio').get('account_sid')
auth_token = settings.get('twilio').get('auth_token')
self.from_number = "+17602784278"
self.client = TwilioRestClient(account_sid, auth_token)
def send_sms(self, to, message):
message = self.client.messages.create(
body=message,
to=to,
from_=self.from_number)
print message.sid
```
#### File: app/components/utilities.py
```python
import logging
def results_to_dict(cls, items, limit=None):
limit = 500 if limit is None else limit
items = cls.components.pagination.paginate(query=items, cursor=cls.request.get('cursor'), limit=limit)
build = []
data = {}
if items:
for item in items:
build.append(parse_entity(cls, item))
data['items'] = build
if cls.context.get('paging', None):
data['next_cursor'] = cls.context.get('paging').get('next_cursor')
data['previous_cursor'] = cls.context.get('paging').get('previous_cursor')
return cls.util.stringify_json(data)
def parse_entity(cls, item, convert_to_string=False):
from app.models.service_order import ServiceOrder
from app.models.trip import Trip
from app.models.beck_user import BeckUser
from app.models.vehicle import Vehicle
from app.models.address import Address
i = {}
i['key'] = item.key.urlsafe()
for name, value in item.to_dict().items():
""" check if value is a Key """
if str(type(value)) == "<class 'google.appengine.ext.ndb.key.Key'>":
""" extract data """
val = value.get()
try:
""" for address objects only """
if val.json_result:
""" nullifying json geo data for optimization"""
new = {}
for n in filter(lambda a: not a.startswith('__'), dir(val)):
new[n] = {} if n == 'json_result' else getattr(val, n)
i[name] = new
else:
i[name] = val
except:
""" everything else goes here """
i[name] = val
else:
i[name] = value
if convert_to_string:
i = cls.util.stringify_json(i)
return i
def check_json(string):
import json
try:
data = json.loads(string)
except:
data = False
logging.info("JSON data ===============>")
logging.info(data)
return data
```
#### File: app/controllers/code_share.py
```python
from ferris import Controller, route, route_with
from google.appengine.api import mail
# google app engine oauth2 API
from ferris.components import csrf
from ferris.components.csrf import csrf_protect
import re
# firebase libraries
from packages.firebase_rest import FirebaseRest
import datetime
import time
class CodeShare(Controller):
class Meta:
prefixes = ('api',)
components = (csrf.CSRF,)
def check_email_format(self, email):
match = re.search(
r'[\w+-]+(?:\.[\w+-]+)*@[\w+-]+(?:\.[\w+-]+)*(?:\.[a-zA-Z]{2,4})',
email
)
if match:
return True
else:
return False
@route_with("/")
def index(self):
pass
@route_with("/codex")
def editor(self):
pass
# checks if data is 30 days old
def check_time_difference(self, date):
# date should be timestamp float
if date is not None:
# get current date
curdate = datetime.datetime.now()
date_2 = time.mktime(curdate.timetuple())
time_difference = date_2 - date
# 30 days == 2,600,000 milliseconds
if time_difference >= 2600000.0:
return True
else:
return False
# gets the Firebase ID to be deleted
def get_firebase_id(self, d):
id_key = None
for key, value in d.iteritems():
id_key = key
for k, v in value.iteritems():
if k == "updatedAt":
if self.check_time_difference(v):
self.delete_firebase_data(id_key)
# deletes data in Firebase by ID
def delete_firebase_data(self, fireID):
f = FirebaseRest(fireID)
f.delete()
return 200
# get reference to the data (for cronjob)
@route
def get_firebase_reference(self):
f = FirebaseRest('')
data = f.get()
d = dict(data)
self.get_firebase_id(d)
return 200
# email composer for sending / sharing codex
def compose(self):
params = {
'email': self.request.get('email'),
'url': self.request.get('url')
}
email = params['email']
if self.check_email_format(email):
mail.send_mail(
sender="<EMAIL>",
to=params['email'].lower(),
subject="Codex shared to you",
body=params['url']
)
self.context['data'] = params['email']
return 200
else:
return 403
# creates json data and uses csrf to avoid spam
@route
@csrf_protect
def api_compose(self):
cs = self.compose()
return cs
``` |
{
"source": "Joeper214/mailingapp",
"score": 3
} |
#### File: app/components/mails.py
```python
from google.appengine.ext import ndb
from google.appengine.api import memcache
from app.models.mail import Mail
from app.models.user import User
class Mails(object):
def __init__(self, controller):
self.controller = controller
self.mail = Mail()
self.user = User()
def inbox(self, user_email):
data = memcache.get('mail_results')
mail = []
if data is None:
source = 'From Datastore'
data = self.mail.list()
memcache.add('mail_results', data, 60)
else:
source = 'From Memcache'
if self.controller.request.method=='POST':
subject = self.controller.request.params['subject']
for item in data:
if item.subject == subject and item.recipient == user_email:
mail.append(item)
else:
for item in data:
if item.recipient == user_email:
mail.append(item)
self.controller.session['user_email'] = user_email
self.controller.context['user_email'] = user_email
self.controller.context['source'] = source
self.controller.context['inboxes'] = mail
def sentmails(self):
user_email = self.controller.session['user_email']
data = memcache.get('mail_results')
mail = []
if data is None:
source = 'From Datastore'
data = self.mail.list()
memcache.add('mail_results', data, 60)
else:
source = 'From Memcache'
if self.controller.request.method == 'POST':
subject = self.controller.request.params['subject']
for item in data:
if item.subject == subject and item.sender == user_email:
mail.append(item)
else:
for item in data:
if item.sender == user_email:
mail.append(item)
self.controller.session['user_email'] = user_email
self.controller.context['user_email'] = user_email
self.controller.context['source'] = source
self.controller.context['sentmails'] = mail
def compose_mail(self):
if self.controller.request.method=='GET':
self.controller.context['user_email'] = self.controller.session['user_email']
if self.controller.request.method=='POST':
sender = self.controller.session['user_email']
recipient = self.controller.request.params['recipient']
subject = self.controller.request.params['subject']
message = self.controller.request.params['message']
params = {'sender' : sender,
'recipient': recipient,
'subject' : subject,
'message' : message}
if self.is_valid_recipient()==True:
self.mail.create(params)
self.controller.context['status'] = 'added'
self.controller.context['user_email'] = sender
else:
self.controller.context['user_email'] = sender
self.controller.context['subject'] = subject
self.controller.context['message'] = message
self.controller.context['e_msg'] = 'Please Input A valid and registered Recipient Email'
def is_valid_recipient(self):
recipient = self.controller.request.params['recipient']
m = self.user.find_by_email(recipient)
if m is None:
return False
else:
return True
```
#### File: app/controllers/users.py
```python
from ferris import Controller, route
from google.appengine.api import memcache
from google.appengine.ext import ndb
from app.models.user import User
from app.components.users import Users
class Users(Controller):
class Meta:
components = (Users,)
model = User
@route
def login(self):
return self.components.users.authenticate()
@route
def signup(self):
self.components.users.signup()
```
#### File: app/models/mail.py
```python
from ferris import BasicModel, ndb
from ferris.behaviors import searchable
from app.behaviors.mail_behavior import MailBehavior
class Mail(BasicModel):
class Meta:
behaviors = (searchable.Searchable, MailBehavior)
search_index = ('global',)
sender = ndb.StringProperty()
recipient = ndb.StringProperty(required = True)
subject = ndb.StringProperty(required = True)
message = ndb.TextProperty(required = True)
@classmethod
def list(cls):
return cls.query().order(cls.created).fetch()
@classmethod
def inboxes(cls, email):
return cls.find_all_by_recipient(email)
@classmethod
def sentmail(cls, email):
return cls.find_all_by_sender(email)
@classmethod
def create(cls, params):
item = cls(sender = params['sender'],
recipient = params['recipient'],
subject = params['subject'],
message = params['message'])
item.put()
@classmethod
def query_key(cls, keyname):
instance = ndb.Key(cls, keyname).get()
if instance is not None:
return instance
return None
``` |
{
"source": "Joeperdefloep/fastscape-demo",
"score": 2
} |
#### File: fastscape-demo/examples/xshade.py
```python
import xrspatial
def hillshade(ds, groupby=None, elev_var='topography__elevation', **kwargs):
elev = ds[elev_var]
if groupby is not None:
# TODO: use shortcut=True
# https://github.com/holoviz/datashader/issues/871
hshade = elev.groupby(groupby).apply(xrspatial.hillshade, shortcut=False,
**kwargs)
else:
hshade = xrspatial.hillshade(elev, **kwargs)
return hshade
# TODO: related to todo above
#return (hshade
# .rename(dim_0='y', dim_1='x')
# .assign_coords(x=ds.x, y=ds.y))
``` |
{
"source": "joepetrini/bike-counter",
"score": 2
} |
#### File: webapp/account/views.py
```python
from django.conf import settings
from django.http import HttpResponseRedirect
#from django.core.urlresolvers import reverse
from django.contrib.auth import login as auth_login, logout, authenticate
#from django.views.generic import ListView, DetailView
from django.contrib.auth.forms import AuthenticationForm
from django.views.generic.edit import FormView, View, CreateView
from .forms import ProfileForm, UserForm
class LoginView(FormView):
form_class = AuthenticationForm
template_name = 'login.html'
def form_valid(self, form):
auth_login(self.request, form.get_user())
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
#return super(LoginView, self).form_valid(form)
def form_invalid(self, form):
return super(LoginView, self).form_invalid(form)
class RegisterView(CreateView):
form_class = UserForm
template_name = 'register.html'
success_url = settings.LOGIN_REDIRECT_URL #'/orgs'
def form_valid(self, form):
resp = super(RegisterView, self).form_valid(form)
user = authenticate(username=form.cleaned_data['username'], password=<PASSWORD>.cleaned_data['<PASSWORD>'])
auth_login(self.request, user)
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
class LogoutView(View):
def get(self, request, *args, **kwargs):
logout(request)
return HttpResponseRedirect(settings.LOGOUT_REDIRECT_URL)
class ProfileView(FormView):
form_class = ProfileForm
template_name = 'profile.html'
```
#### File: webapp/main/logic.py
```python
import random
from datetime import datetime
from django.db import transaction
from django.utils.timezone import now
from .models import *
def csv_for_appt(appt):
out = ''
# Headers
out += "Time,Bike,Direction,"
for m in appt.organization.organizationmetrics_set.all():
out += "%s," % m.metric.name
out = out[:-1] + "\n"
# Detail
for s in appt.survey_set.all():
out += "%s,%s,%s," % (s.created, s.is_bicycle, s.direction)
for sv in s.surveyvalue_set.all():
out += "%s," % sv.value.stored_value
out = out[:-1] + "\n"
return out
def stats_for_appt(appt):
stat = {}
stat['total'] = appt.survey_set.all().count()
metrics = {}
min = {}
for i in range(0, ((appt.actual_end - appt.actual_start).seconds / 60)):
min[i] = 0
metrics[-1] = {'name': 'direction', 'stats': {}}
# List of metrics
for m in appt.organization.organizationmetrics_set.filter(report=True):
metrics[m.metric.id] = {'name': m.metric.name, 'stats': {}}
# Value counts across all recorded info
for s in appt.survey_set.all():
# Direction
try:
metrics[-1]['stats'][s.direction] += 1
except KeyError:
metrics[-1]['stats'][s.direction] = 1
minutes_in = (s.recorded_at - appt.actual_start).seconds / 60
try:
min[minutes_in] += 1
except KeyError:
min[minutes_in] = 1
for sv in s.surveyvalue_set.select_related().all():
# Not in reportable metrics
if sv.metric.id not in metrics.keys():
continue
try:
metrics[sv.metric.id]['stats'][sv.value.display_value] += 1
except KeyError:
metrics[sv.metric.id]['stats'][sv.value.display_value] = 1
print min
stat['metrics'] = metrics
stat['minutes'] = min
return stat
def sim_appt(appt, avg_time=25):
with transaction.atomic():
# Clear data
appt.reset()
#for s in appt.survey_set.all():
# SurveyValue.objects.filter(survey=s).delete()
#Survey.objects.filter(appointment=appt).delete()
start = now()
total_time = 0
while True:
sec = random.randint(0, avg_time * 2)
total_time += sec
t = start + datetime.timedelta(seconds=total_time)
s = Survey.objects.create(appointment=appt, recorded_at=t)
for m in appt.organization.organizationmetrics_set.all():
metric = m.metric
if metric.value_set.system_name == 'direction':
val = random.choice(list(appt.location.directions()))
else:
val = random.choice(list(m.metric.value_set.value_set.all()))
# TODO handle defaults
has_def = m.metric.value_set.value_set.filter(is_default=True).count()
sv = SurveyValue.objects.create(survey=s, metric=metric, value=val)
# TODO Add events
if total_time > appt.organization.session_length * 60:
break
appt.actual_start = start
appt.actual_end = start + datetime.timedelta(0, total_time)
appt.time_taken = total_time
appt.save()
def get_appts_choices(theOrg, theYear=None):
all_appts_choices = [('default', '--Pick--'),('ALL', 'Download All Appointments')]
if theYear is not None:
all_appts_choices += [(a['id'],
(str(a['id']) + ' - ' + str(a['location__name'])) )
for a in Appointment.objects.filter(scheduled_start__year = theYear, organization = Organization.objects.get(slug=theOrg)).order_by('id').values('id', 'location__name') ]
else:
all_appts_choices += [(a['id'],
(str(a['id']) + ' - ' + str(a['location__name'])) )
for a in Appointment.objects.filter(organization = Organization.objects.get(slug=theOrg)).order_by('id').values('id', 'location__name') ]
#for the count year drown-down, pull down all unique start_date years for the appts in the dB
# to accomodate for potential DB compatibilities with django's distinct() function (only postgreSQL works), I'll do the unique year filtering myself
return all_appts_choices
```
#### File: management/commands/create_appts.py
```python
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand, CommandError
from main.models import Location, Organization, Appointment
class Command(BaseCommand):
help = 'Created unassigned appts for locations'
def handle(self, *args, **options):
org = Organization.objects.all()[0]
locations = Location.objects.filter(organization=org, enabled=True)
from_date = datetime.today() - timedelta(days=30)
sched_date = datetime.today() + timedelta(days=10)
for loc in locations:
# Check that location has 2 appts from last 30 days forward
appt_count = Appointment.objects.filter(organization=org, location=loc, scheduled_start__gt=from_date).count()
if appt_count < 2:
print "missing appts for %s - %s found" % (loc, appt_count)
for i in range(0, 2 - appt_count):
print("Creating appt for %s" % (loc))
Appointment.objects.create(organization=org, location=loc, scheduled_start=sched_date)
```
#### File: webapp/main/models.py
```python
import datetime
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from model_utils.models import TimeStampedModel
from django.utils.timezone import now
from model_utils import Choices
from django.utils import timezone
class Organization(TimeStampedModel):
name = models.CharField(max_length=50, unique=True)
city = models.CharField(max_length=25, null=True, blank=True)
state = models.CharField(max_length=25, null=True, blank=True)
slug = models.SlugField(max_length=15, unique=True)
member_count = models.IntegerField(null=True, blank=True)
session_length = models.IntegerField(default=90)
class Meta:
db_table = 'organization'
def __unicode__(self):
return "%s - %s, %s" % (self.name, self.city, self.state)
def metrics_list(self):
return OrganizationMetrics.objects.filter(
organization=self).values_list('metric__system_name', flat=True)
class Membership(TimeStampedModel):
ROLES = Choices(
('member', 'member'),
('staff', 'staff'),
('admin', 'admin'),
)
user = models.ForeignKey(User)
organization = models.ForeignKey(Organization)
role = models.CharField(choices=ROLES, default=ROLES.member, max_length=15)
class Meta:
db_table = 'membership'
unique_together = ('user', 'organization')
def __unicode__(self):
return "%s - %s - %s" % (self.user, self.organization.name, self.role)
class Location(TimeStampedModel):
TYPES = Choices(
('intersection', 'Intersection'),
('trail', 'Trail'),
('bridge', 'Bridge'),
)
organization = models.ForeignKey(Organization)
name = models.CharField(max_length=80)
description = models.CharField(max_length=250, null=True, blank=True)
type = models.CharField(choices=TYPES, default=TYPES.intersection, max_length=20)
enabled = models.BooleanField(default=True)
longitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
latitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
direction1 = models.CharField(max_length=20, null=True, blank=True)
direction1_label = models.CharField(max_length=50, null=True, blank=True)
direction2 = models.CharField(max_length=20, null=True, blank=True)
direction2_label = models.CharField(max_length=50, null=True, blank=True)
# Not used
has_east = models.BooleanField(default=True)
has_north = models.BooleanField(default=True)
has_south = models.BooleanField(default=True)
has_west = models.BooleanField(default=True)
class Meta:
db_table = 'location'
ordering = ['name']
def __unicode__(self):
return "%s - %s" % (self.organization.name, self.name)
def directions(self):
dirs = []
if self.has_east:
dirs.append(Value.objects.get(value_set__system_name='direction', stored_value='east'))
if self.has_west:
dirs.append(Value.objects.get(value_set__system_name='direction', stored_value='west'))
if self.has_north:
dirs.append(Value.objects.get(value_set__system_name='direction', stored_value='north'))
if self.has_south:
dirs.append(Value.objects.get(value_set__system_name='direction', stored_value='south'))
return dirs
class ValueSet(TimeStampedModel):
name = models.CharField(max_length=25)
system_name = models.SlugField(max_length=25, unique=True)
class Meta:
db_table = 'value_set'
def __unicode__(self):
return self.name
class Value(TimeStampedModel):
value_set = models.ForeignKey(ValueSet)
stored_value = models.CharField(max_length=25)
display_value = models.CharField(max_length=25)
is_default = models.BooleanField(default=False)
class Meta:
db_table = 'value'
def save(self, *args, **kwargs):
if self.is_default:
pass
return super(Value, self).save(*args, **kwargs)
def __unicode__(self):
return "%s - %s" % (self.value_set, self.display_value)
class Metric(TimeStampedModel):
name = models.CharField(max_length=25)
system_name = models.SlugField(max_length=25, unique=True)
desc = models.CharField(max_length=250, null=True, blank=True)
value_set = models.ForeignKey(ValueSet)
class Meta:
db_table = 'metric'
def __unicode__(self):
return "%s - %s - %s" % (self.name, self.value_set, self.desc)
class Event(TimeStampedModel):
name = models.CharField(max_length=25)
system_name = models.SlugField(max_length=25, unique=True)
class Meta:
db_table = 'events'
def __unicode__(self):
return "%s" % (self.name)
class OrganizationEvents(TimeStampedModel):
organization = models.ForeignKey(Organization)
event = models.ForeignKey(Event)
class Meta:
db_table = 'org_events'
unique_together = ('organization', 'event')
def __unicode__(self):
return "%s - %s" % (self.organization, self.event)
class OrganizationMetrics(TimeStampedModel):
organization = models.ForeignKey(Organization)
metric = models.ForeignKey(Metric)
required = models.BooleanField(default=True)
order = models.IntegerField(default=0)
report = models.BooleanField(default=True)
class Meta:
db_table = 'org_metrics'
unique_together = ('organization', 'metric')
def __unicode__(self):
return "%s - %s" % (self.organization, self.metric)
class Appointment(TimeStampedModel):
organization = models.ForeignKey(Organization)
location = models.ForeignKey(Location)
user = models.ForeignKey(User, null=True, blank=True, help_text='Leave blank for unassigned')
scheduled_start = models.DateTimeField()
actual_start = models.DateTimeField(null=True, blank=True)
actual_end = models.DateTimeField(null=True, blank=True)
time_taken = models.IntegerField(null=True, blank=True)
longest_pause = models.IntegerField(default=0)
total_pause = models.IntegerField(default=0)
total_away = models.IntegerField(default=0)
# CONSIDER - ADDING NEW FIELD FOR EXPECTED APPT SESSION LENGTH - RIGHT NOW ITS A CONFIG IN BIKE.JS FILE
class Meta:
db_table = 'appointment'
ordering = ['actual_end', 'scheduled_start']
def __unicode__(self):
return "%s - %s - %s - %s" % (self.organization.name, self.location, self.user, self.scheduled_start)
def time_taken_in_min(self):
return self.time_taken / 60
def start(self):
self.actual_start = datetime.datetime.now()
self.save()
def end(self, time_taken):
# per django docs rich changing to using the timezone.now() instead of datetime.datetime.now() as we're using USE_TZ=true
self.actual_end = timezone.now()
self.time_taken = int(time_taken)
self.save()
def reset(self):
# Clear data
for s in self.survey_set.all():
SurveyValue.objects.filter(survey=s).delete()
Survey.objects.filter(appointment=self).delete()
SurveyEvent.objects.filter(appointment=self).delete()
self.actual_end = None
self.actual_start = None
self.time_taken = None
self.longest_pause = 0
self.total_pause = 0
self.total_away = 0
self.save()
def complete(self):
if self.actual_end is None:
return False
return True
def isMorning(self, appt):
if appt.actual_start.hour < 12:
return True
else:
return False
def status(self):
if self.actual_start and self.actual_end is None:
return "In progress"
elif self.user is None:
return "Unassigned"
elif self.actual_end is None:
return "Not started"
return "Complete"
class SessionTrackerViewObject():
def __init__(self, givenLocation):
self.thisLocation = givenLocation
self.amSession1 = None
self.amSession2 = None
self.pmSession1 = None
self.pmSession2 = None
def assignSessions(self, sessions):
for cnt in sessions:
# on 11/22/15 - Rich switched to using the actual start time instead of the scheduled start time
if cnt.actual_start.time() < datetime.time(12,00):
if self.amSession1 is None :
self.amSession1 = cnt
else:
self.amSession2 = cnt
else:
if self.pmSession1 is None:
self.pmSession1 = cnt
else:
self.pmSession2 = cnt
if self.pmSession2 is not None and self.amSession2 is not None:
break
class SurveyEvent(TimeStampedModel):
appointment = models.ForeignKey(Appointment)
event = models.ForeignKey(Event)
longitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
latitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
guid = models.CharField(max_length=50, null=True, blank=True)
class Meta:
db_table = 'survey_events'
def __unicode__(self):
return "%s - %s - %s" % (self.created, self.appointment, self.event)
class Survey(TimeStampedModel):
appointment = models.ForeignKey(Appointment)
is_bicycle = models.BooleanField(default=True)
longitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
latitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
time_to_take = models.IntegerField(blank=True, null=True)
guid = models.CharField(max_length=50, null=True, blank=True)
# fyi - on 11/24/15 Rich switched to using the datetime.datetime.now() function to resolve timezone discreptency...
# .. betewen the appointment actual_start and the survey recorded_at variable
rightnow = datetime.datetime.now()
recorded_at = models.DateTimeField(default=rightnow)
direction = models.CharField(max_length=20, null=True, blank=True)
class Meta:
db_table = 'survey'
def __unicode__(self):
return "%s" % (self.appointment)
class SurveyValue(TimeStampedModel):
survey = models.ForeignKey(Survey)
metric = models.ForeignKey(Metric)
value = models.ForeignKey(Value)
class Meta:
db_table = 'survey_value'
unique_together = ('survey', 'metric')
def __unicode__(self):
return "%s - %s - %s" % (self.survey, self.metric, self.value)
```
#### File: webapp/main/tests.py
```python
from django.test import TestCase
from .models import Value, ValueSet
class ValidateSingleDefaultValue(TestCase):
def setUp(self):
pass
def test_single_default(self):
pass
``` |
{
"source": "joepettigrew/multi-blog",
"score": 3
} |
#### File: multi-blog/handlers/dislikepost.py
```python
from handler import Handler
from models import Sentiment, Blogs
from util import post_exists
class DislikePost(Handler):
"""Dislikes posts for auth users"""
def get(self):
blog_id = int(self.request.get("bid"))
# Check user's auth status and check post exists
if self.user and post_exists(blog_id):
# Check to see if the post's owner's the same as auth user.
if not Blogs.verify_owner(blog_id, self.user):
# Check to see if the user has interacted with this post before
sentiment = Sentiment.by_owner(self.user, blog_id)
if sentiment is None:
sentiment = Sentiment(username=self.user,
blog_id=blog_id,
sentiment=False)
sentiment.put()
# Update the Datastore
blog = Blogs.by_id(blog_id)
blog.dislikes += 1
blog.put()
self.redirect("/post/%s" % blog_id)
```
#### File: multi-blog/handlers/editpost.py
```python
from handlers import Handler
from models import Blogs
from util import post_exists_wrap
class EditPost(Handler):
"""Edits posts for auth uers"""
@post_exists_wrap
def get(self, blog_id):
params = dict(auth_user=self.user)
# Verifies user's auth status and ownership of the post
if self.user and Blogs.verify_owner(blog_id, self.user):
blog = Blogs.by_id(blog_id)
params['blog_id'] = blog_id
params['title'] = blog.title
params['content'] = blog.content
self.user_page("editpost.html", "/signup", **params)
else:
self.redirect("/welcome")
@post_exists_wrap
def post(self, blog_id):
title = self.request.get("title")
content = self.request.get("content")
blog_id = self.request.get("bid")
# Verifies user's auth status and ownership of the post
if self.user and Blogs.verify_owner(blog_id, self.user):
if title and content:
# Remove <div> tag from posting
content = content.replace("<div>", "")
content = content.replace("</div>", "")
# Update the Datastore
blog = Blogs.by_id(blog_id)
blog.title = title
blog.content = content
blog.put()
# Redirect to welcome page
self.redirect("/welcome")
else:
params = dict(auth_user=self.user)
params['title'] = title
params['content'] = content
params['blog_id'] = blog_id
params['error'] = "We need both the title and the blog post."
self.render("editpost.html", **params)
else:
self.redirect("/signup")
```
#### File: multi-blog/handlers/signup.py
```python
from handlers import Handler
from handlers import Validate
from models import Users
import auth
class SignUpPage(Handler):
"""Registers user to the site"""
def get(self):
self.anom_page("signup.html", "/welcome")
def post(self):
have_error = False
username = self.request.get("username")
password = self.request.get("password")
verify = self.request.get("verify")
email = self.request.get("email")
params = dict(username=username, email=email)
if not Validate(username).username():
params['error_username'] = "Invalid username"
have_error = True
elif username == Users.by_username(username):
params['error_username'] = "Same username already exists"
have_error = True
if not Validate(password).password():
params['error_password'] = "<PASSWORD>"
have_error = True
elif password != verify:
params['error_verify'] = "Passwords didn't match"
have_error = True
if not Validate(email).email():
params['error_email'] = "Invalid email address"
have_error = True
elif email and email == Users.by_email(email):
params['error_email'] = "This email address is already used"
have_error = True
if have_error:
self.render("signup.html", **params)
else:
# Create password hash
pass_hash = auth.make_pw_hash(username, password)
# Create user in DB
user = Users(username=username, password=<PASSWORD>, email=email)
user.put()
# Create cookie
self.set_secure_cookie("username", username)
# Redirect user to welcome page
self.redirect("/welcome")
```
#### File: multi-blog/handlers/singlepost.py
```python
from handlers import Handler
from models import Users, Blogs, Sentiment, Comments
from util import post_exists_wrap
class SinglePost(Handler):
"""Renders single post as well as handling comment posting"""
@post_exists_wrap
def get(self, blog_id):
blog = Blogs.by_id(blog_id)
sentiment = Sentiment.by_owner(self.user, blog_id)
comments = Comments.by_blog_id(blog_id)
params = dict(blog=blog,
sentiment=sentiment,
comments=comments,
auth_user=self.user)
if not blog:
self.error(404)
return
self.render("singlepost.html", **params)
@post_exists_wrap
def post(self, blog_id):
blog_id = int(self.request.get("bid"))
if self.user:
comment = self.request.get("comment")
blog = Blogs.by_id(blog_id)
comments = Comments.by_blog_id(blog_id)
sentiment = Sentiment.by_owner(self.user, blog_id)
params = dict(blog_id=blog_id,
auth_user=self.user,
blog=blog,
comments=comments,
sentiment=sentiment)
if not comment:
params['error_comment'] = "You didn't write any comment!"
self.render("singlepost.html", **params)
else:
comment = Comments(blog_id=blog_id,
comment=comment,
username=self.user)
comment.put()
self.redirect("/post/%s#Comments" % blog_id)
else:
self.redirect("/post/%s#Comments" % blog_id)
``` |
{
"source": "joe-phon/-",
"score": 3
} |
#### File: -/qq_robot/sample.py
```python
import json
import requests
import re
# 插件加载方法:
# 先在命令行运行 qqbot ,
# 启动成功后,在另一个命令行窗口输入: qq plug qqbot.plugins.sample
def answerMessage(ask_message):
url = 'http://openapi.tuling123.com/openapi/api/v2'
body = {
"reqType":0,
"perception": {
"inputText": {
"text": ""
}
},
"userInfo": {
"apiKey": "6c293e88435c4ef99b86f8d15ed25a3f",
"userId": "qiao"
}
}
body['perception']['inputText']['text'] = ask_message
data = json.dumps(body)
response = requests.post(url, data = data)
retext = response.text
answ_text = re.findall((re.compile('{.*?results":.*?values.*?text":"(.*?)"}', re.S)), retext)
text = str(answ_text[0])
try:
answ_shows = re.findall((re.compile('{.*?showtext":"(.*?)",', re.S)), retext)
return str(answ_shows[0])
except IndexError:
answ_names = re.findall((re.compile('{.*?name":"(.*?)",', re.S)), retext)
answ_urls = re.findall((re.compile('{.*?detailurl":"(.*?)"}', re.S)), retext)
try:
for index in range(3):
text = text+"\n原标题"+str(index+1)+":"+str(answ_names[index])+"\n链接地址:"+str(answ_urls[index])
return (text)
except IndexError:
return (str(answ_text[0]))
def onQQMessage(bot, contact, member, content):
answer = answerMessage(content)
bot.SendTo(contact, answer)
```
#### File: bot/spiders/joke.py
```python
import scrapy
from scrapy.http.response.html import HtmlResponse
from scrapy.selector.unified import SelectorList
from bot.items import BotItem
import re
class JokeSpider(scrapy.Spider):
name = 'joke'
allowed_domains = ['m.qiushi.92game.net']
start_urls = ['http://m.qiushi.92game.net/']
base_url = 'http://m.qiushi.92game.net/'
def parse(self, response):
jokes = response.xpath('//div[@class="qiushi"]').getall()
for joke in jokes:
joke = ''.join(joke).strip()
item = BotItem(content=joke)
yield item
page_url = response.xpath('//div[@class="pagebar footer"]/a[last()]/@href').get()
page_text = response.xpath('//div[@class="pagebar footer"]/a[last()]/text()').get()
print(page_url)
next_url=self.base_url+page_url
if page_text == '下一页':
print(next_url)
# yield scrapy.Request(next_url,callback=self.parse)
else:
return
``` |
{
"source": "joepie91/binder",
"score": 3
} |
#### File: joepie91/binder/assertutil.py
```python
import platform
def get_assert_tuple_args(e):
assert isinstance(e, AssertionError)
args = e.args
assert isinstance(args, tuple)
if platform.python_version_tuple() < ('2','7','3'):
# assert tuple args already unpacked!
# see http://bugs.python.org/issue13268
return args
else:
assert len(args) == 1
return args[0]
```
#### File: binder/binder/db_mysql.py
```python
from binder.conn import Connection, REPEATABLE_READ, _VALID_ISOLATION_LEVELS
from binder.sqlgen import DIALECT_MYSQL
_ISOLATION_SQL = "SET SESSION TRANSACTION ISOLATION LEVEL %s"
class MysqlConnection(Connection):
def __init__(self, *args, **kwargs):
import MySQLdb
read_only = kwargs.pop('read_only', None)
isolation_level = kwargs.pop('isolation_level', REPEATABLE_READ)
assert isolation_level in _VALID_ISOLATION_LEVELS, \
("Unknown isolation_level", isolation_level)
#
assert not 'charset' in kwargs
kwargs['charset'] = 'utf8'
assert not 'use_unicode' in kwargs
kwargs['use_unicode'] = True
#
dbconn = MySQLdb.connect(*args, **kwargs)
dberror = MySQLdb.Error
Connection.__init__(
self, dbconn, dberror,
DIALECT_MYSQL, "%s",
read_only
)
isolation_sql = _ISOLATION_SQL % isolation_level
self._execute(isolation_sql)
```
#### File: binder/binder/db_postgres.py
```python
from binder.conn import Connection, READ_COMMITTED, REPEATABLE_READ, \
_VALID_ISOLATION_LEVELS
from binder.sqlgen import DIALECT_POSTGRES
_psycopg2_imported = False
_ISOLATION_LEVEL_MAP = {}
def _import_psycopg2():
global _psycopg2_imported, psycopg2, _ISOLATION_LEVEL_MAP
#
if _psycopg2_imported:
return
#
import psycopg2
_psycopg2_imported = True
#
from psycopg2 import extensions
#
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
#
_ISOLATION_LEVEL_MAP.update({
#ISOLATION_LEVEL_AUTOCOMMIT
#ISOLATION_LEVEL_READ_UNCOMMITTED
READ_COMMITTED: extensions.ISOLATION_LEVEL_READ_COMMITTED,
REPEATABLE_READ: extensions.ISOLATION_LEVEL_REPEATABLE_READ,
#ISOLATION_LEVEL_SERIALIZABLE
})
class PostgresConnection(Connection):
def __init__(self, *args, **kwargs):
_import_psycopg2()
read_only = kwargs.pop('read_only', None)
isolation_level = kwargs.pop('isolation_level', REPEATABLE_READ)
assert isolation_level in _VALID_ISOLATION_LEVELS, \
("Unknown isolation_level", isolation_level)
#
pg_isolation_level = _ISOLATION_LEVEL_MAP[isolation_level]
#
dbconn = psycopg2.connect(*args, **kwargs)
dbconn.set_session(pg_isolation_level)
dberror = psycopg2.Error
Connection.__init__(
self, dbconn, dberror,
DIALECT_POSTGRES, "%s",
read_only
)
```
#### File: binder/binder/table.py
```python
from binder.col import AutoIdCol
class Table:
def __init__(self, table_name, *cols):
self.table_name = table_name
self.cols = cols
col_map = {}
auto_id_col = None
for col in cols:
col_name = col.col_name
assert not col_map.has_key(col_name), \
"Table '%s' has more than one column with name '%s'" \
% (table_name, col_name)
col_map[col_name] = col
if col.__class__ is AutoIdCol:
assert auto_id_col is None, \
"Table '%s' has more than one AutoIdCol" % table_name
auto_id_col = col
self.auto_id_col = auto_id_col
self.q = QueryCols(table_name, cols)
def new(self, **col_values):
row = {}
for col in self.cols:
col_name = col.col_name
value = col_values.get(col_name, col.default_value)
col.check_value(value)
row[col_name] = value
return row
def parse(self, **col_values):
row = {} #self.new(**col_values)
for col in self.cols:
col_name = col.col_name
value = col_values.get(col_name, col.default_value)
if type(value) in [str, unicode]:
if value == "":
value = col.default_value
else:
value = col.parse_str(value)
col.check_value(value)
row[col_name] = value
return row
def check_values(self, row):
auto_id_used = False
for col in self.cols:
value = row[col.col_name]
col.check_value(value)
if value is None:
auto_id_used = True
return auto_id_used
class QueryCols:
def __init__(self, table_name, cols):
for col in cols:
qcol = QueryCol(col)
self.__dict__[col.col_name] = qcol
def __setattr__(self, name, value):
raise AttributeError, \
"Setting ...q.'%s' = ... not allowed - did you mean to use ==?"
class QueryCol:
def __init__(self, col):
self._col = col
self._auto_id_col = col.__class__ is AutoIdCol
self.ASC = SqlSort(col, True)
self.DESC = SqlSort(col, False)
def __eq__(self, other):
return SqlCondition(self._col, "=", other)
def __gt__(self, other):
return SqlCondition(self._col, ">", other)
def __ge__(self, other):
return SqlCondition(self._col, ">=", other)
def __lt__(self, other):
return SqlCondition(self._col, "<", other)
def __le__(self, other):
return SqlCondition(self._col, "<=", other)
def YEAR(self, date):
return SqlCondition(self._col, "YEAR", date)
def MONTH(self, date):
return SqlCondition(self._col, "MONTH", date)
def DAY(self, date):
return SqlCondition(self._col, "DAY", date)
def LIKE(self, s):
return SqlCondition(self._col, "LIKE", s)
def ILIKE(self, s):
return SqlCondition(self._col, "ILIKE", s)
class SqlCondition:
def __init__(self, col, op, other):
col.check_value(other)
if col.__class__ is AutoIdCol:
assert other != None, \
"SqlCondition: cannot use None for AutoIdCol"
self.col = col
self.op = op
self.other = other
def __repr__(self):
return '"%s"' % self._repr1()
def _repr1(self):
return "%s %s %s" \
% (self.col.col_name, self.op, repr(self.other))
class SqlSort:
def __init__(self, col, asc):
self.col = col
self.asc = asc
class AND:
def __init__(self, *sqlconds):
assert len(sqlconds) > 1, "AND: must have at least 2 conditions"
for sqlcond in sqlconds:
assert isinstance(sqlcond, SqlCondition), \
"AND: conditions must be SqlCondition"
self.sqlconds = sqlconds
def __repr__(self):
conds = " AND ".join(c._repr1() for c in self.sqlconds)
conds = '"%s"' % conds
return conds
class OR:
def __init__(self, *sqlconds):
assert len(sqlconds) > 1, "OR: must have at least 2 conditions"
for sqlcond in sqlconds:
assert isinstance(sqlcond, SqlCondition), \
"OR: conditions must be SqlCondition"
self.sqlconds = sqlconds
def __repr__(self):
conds = " OR ".join(c._repr1() for c in self.sqlconds)
conds = '"%s"' % conds
return conds
```
#### File: binder/bindertest/test_create_drop.py
```python
import unittest
from binder import *
from bindertest.testdbconfig import connect, connect_postgres
Foo = Table(
"foo",
AutoIdCol("foo_id"),
IntCol("i1"),
UnicodeCol("s1", 10),
DateCol("d1"),
)
_NO_TABLE_FOO = [
("no such table: foo",), # sqlite
('table "foo" does not exist\n',), # Postgres
(1051, "Unknown table \'foo\'"), # MySQL
]
_TABLE_FOO_EXISTS = [
("table foo already exists",), # sqlite
('relation "foo" already exist\n',), # Postgres
(1050, "Table \'foo\' already exists"), # MySQL
]
class CreateDropTest(unittest.TestCase):
def test_create_drop(self):
conn = connect()
try:
conn.drop_table(Foo)
except conn.DbError, e:
self.assertIn(e.args, _NO_TABLE_FOO)
conn.commit()
conn = connect()
conn.create_table(Foo)
conn = connect()
try:
conn.create_table(Foo)
except conn.DbError, e:
self.assertIn(e.args, _TABLE_FOO_EXISTS)
conn.drop_table(Foo)
conn = connect()
try:
conn.drop_table(Foo)
except conn.DbError, e:
self.assertIn(e.args, _NO_TABLE_FOO)
def test_create_transactional(self):
conn = connect()
conn.create_table(Foo)
conn = connect()
if connect == connect_postgres:
try:
conn.drop_table(Foo)
except conn.DbError, e:
self.assertIn(e.args, _NO_TABLE_FOO)
else:
conn.drop_table(Foo)
def test_drop_if_exists(self):
conn = connect()
try:
conn.drop_table(Foo)
except conn.DbError, e:
self.assertIn(e.args, _NO_TABLE_FOO)
conn = connect()
conn.create_table(Foo)
conn = connect()
conn.drop_table(Foo, if_exists=True)
try:
conn.drop_table(Foo)
except conn.DbError, e:
self.assertIn(e.args, _NO_TABLE_FOO)
conn = connect()
conn.drop_table(Foo, if_exists=True)
conn.drop_table_if_exists(Foo)
def test_create_RO(self):
conn = connect("test123")
try:
conn.create_table(Foo)
except Exception, e:
self.assertEquals("Connection is read only: test123", str(e))
else:
self.fail()
def test_drop_RO(self):
conn = connect("test123")
try:
conn.drop_table(Foo)
except Exception, e:
self.assertEquals("Connection is read only: test123", str(e))
else:
self.fail()
if __name__ == '__main__':
unittest.main()
```
#### File: joepie91/binder/run-tests.py
```python
import os, sys
PROJECT_DIR = os.path.abspath(os.path.dirname( __file__ ))
def runtestdir(subdir):
entries = os.listdir(subdir)
total = 0
errs = 0
for f in entries:
if not f.endswith(".py"):
continue
if not f.startswith("test_"):
continue
test_file = os.path.join(subdir, f)
print >> sys.stderr, "FILE:", test_file
exit_code = os.system(sys.executable + " " + test_file)
total += 1
if exit_code != 0:
errs += 1
print >> sys.stderr, "SUMMARY: %s -> %s total / %s error (%s)" \
% (subdir, total, errs, sys.executable)
if __name__ == "__main__":
os.chdir(PROJECT_DIR)
os.environ["PYTHONPATH"] = PROJECT_DIR
runtestdir("bindertest")
``` |
{
"source": "joepie91/resolv",
"score": 3
} |
#### File: resolv/resolv/__init__.py
```python
import re
import resolvers
from resolv.shared import ResolverError
def resolve(url):
if re.match("https?:\/\/(www\.)?putlocker\.com", url) is not None:
task = resolvers.PutlockerTask(url)
return task.run()
elif re.match("https?:\/\/(www\.)?sockshare\.com", url) is not None:
task = resolvers.SockshareTask(url)
return task.run()
elif re.match("https?:\/\/(www\.)?1channel\.ch\/external\.php", url) is not None:
task = resolvers.OneChannelTask(url)
return task.run()
elif re.match("https?:\/\/(www\.)?youtube\.com\/watch\?", url) is not None:
task = resolvers.YoutubeTask(url)
return task.run()
elif re.match("https?:\/\/(www\.)?filebox\.com\/[a-zA-Z0-9]+", url) is not None:
task = resolvers.FileboxTask(url)
return task.run()
elif re.match("https?:\/\/(www\.)?vidxden\.com\/[a-zA-Z0-9]+", url) is not None:
task = resolvers.VidxdenTask(url)
return task.run()
elif re.match("https?:\/\/(www\.)?vidbux\.com\/[a-zA-Z0-9]+", url) is not None:
task = resolvers.VidbuxTask(url)
return task.run()
elif re.match("https?:\/\/(www\.)?filenuke\.com\/[a-zA-Z0-9]+", url) is not None:
task = resolvers.FilenukeTask(url)
return task.run()
elif re.match("https?:\/\/(www\.)?pastebin\.com\/[a-zA-Z0-9]+", url) is not None:
task = resolvers.PastebinTask(url)
return task.run()
elif re.match("https?:\/\/(www\.)?mediafire\.com\/\?[a-z0-9]+", url) is not None:
task = resolvers.MediafireTask(url)
return task.run()
else:
raise ResolverError("No suitable resolver found for %s" % url)
def recurse(url):
previous_result = {}
while True:
result = resolve(url)
if result.state == "failed":
return previous_result
elif result.result_type != "url":
return result
url = result.results['url']
previous_result = result
```
#### File: resolv/resolvers/putlocker.py
```python
import re
from resolv.shared import ResolverError, TechnicalError, unescape, Task
class PutlockerTask(Task):
result_type = "video"
name = "PutLocker"
author = "<NAME>"
author_url = "http://cryto.net/~joepie91"
def run(self):
try:
import mechanize
except ImportError:
self.state = "failed"
raise TechnicalError("The Python mechanize module is required to resolve PutLocker URLs.")
matches = re.search("https?:\/\/(www\.)?putlocker\.com\/(file|embed)\/([A-Z0-9]+)", self.url)
if matches is None:
self.state = "invalid"
raise ResolverError("The provided URL is not a valid PutLocker URL.")
video_id = matches.group(3)
try:
browser = mechanize.Browser()
browser.set_handle_robots(False)
browser.open("http://putlocker.com/embed/%s" % video_id)
except:
self.state = "failed"
raise TechnicalError("The PutLocker site could not be reached.")
try:
browser.select_form(nr=0)
result = browser.submit()
page = result.read()
except Exception, e:
self.state = "nonexistent"
raise ResolverError("The file was removed, or the URL is incorrect.")
matches = re.search("playlist: '([^']+)'", page)
if matches is None:
self.state = "failed"
raise ResolverError("No playlist was found on the given URL; the PutLocker server for this file may be in maintenance mode, or the given URL may not be a video file. The PutLocker resolver currently only supports video links.")
playlist = matches.group(1)
try:
browser.open("http://www.putlocker.com%s" % playlist)
except:
self.state = "failed"
raise TechnicalError("The playlist file for the given URL could not be loaded.")
matches = re.search("url=\"([^\"]+)\" type=\"video\/x-flv\"", browser.response().read())
if matches is None:
self.state = "failed"
raise ResolverError("The playlist file does not contain any video URLs. The PutLocker resolver currently only supports video links.")
video_file = matches.group(1)
try:
video_title = unescape(re.search('<a href="\/file\/[^"]+"[^>]*><strong>([^<]*)<\/strong><\/a>', page).group(1))
except:
self.state = "failed"
raise TechnicalError("Could not find the video title.")
stream_dict = {
'url' : video_file,
'method' : "GET",
'quality' : "unknown",
'priority' : 1,
'format' : "unknown"
}
self.results = {
'title': video_title,
'videos': [stream_dict]
}
self.state = "finished"
return self
```
#### File: resolv/resolvers/vidbux.py
```python
import re, time, urllib2
from resolv.shared import ResolverError, TechnicalError, Task, unpack_js
# No such file or the file has been removed due to copyright infringement issues.
class VidbuxTask(Task):
result_type = "video"
name = "VidBux"
author = "<NAME>"
author_url = "http://cryto.net/~joepie91"
def run(self):
matches = re.search("https?:\/\/(www\.)?vidbux\.com\/([a-zA-Z0-9]+)", self.url)
if matches is None:
self.state = "invalid"
raise ResolverError("The provided URL is not a valid VidBux URL.")
video_id = matches.group(2)
try:
contents = self.fetch_page(self.url)
except urllib2.URLError, e:
self.state = "failed"
raise TechnicalError("Could not retrieve the video page.")
if 'Human Verification' not in contents:
self.state = "invalid"
raise ResolverError("The provided URL does not exist.")
matches = re.search('<input name="fname" type="hidden" value="([^"]+)">', contents)
if matches is None:
self.state = "failed"
raise TechnicalError("Could not find filename.")
filename = matches.group(1)
matches = re.search('<input name="referer" type="hidden" value="([^"]*)">', contents)
if matches is None:
self.state = "failed"
raise TechnicalError("Could not find referer.")
referer = matches.group(1)
try:
contents = self.post_page(self.url, {
'op': "download1",
'usr_login': "",
'id': video_id,
'filename': filename,
'referer': referer,
'method_free': "Continue to Video"
})
except urllib2.URLError, e:
self.state = "failed"
raise TechnicalError("Could not complete human verification")
script = unpack_js(contents)
matches = re.search("'file','([^']+)'", script)
if matches is None:
self.state = "failed"
raise TechnicalError("No video was found on the specified URL.")
video_file = matches.group(1)
stream_dict = {
'url' : video_file,
'method' : "GET",
'quality' : "unknown",
'priority' : 1,
'format' : "unknown"
}
self.results = {
'title': "",
'videos': [stream_dict]
}
self.state = "finished"
return self
``` |
{
"source": "joepmorris/FatCrayonToolk",
"score": 3
} |
#### File: FatCrayonToolk/FCT/SimpleGeometry.py
```python
from __future__ import print_function
""" Classes and routines for generating 3D objects
"""
import math
import numpy as np
from scipy.spatial import ConvexHull
from scipy.spatial import Delaunay
from copy import deepcopy
import sys
import random
import re
# If you have placed the other modules in another directory, this can be useful to add that location to the path
#import os, sys; sys.path.append(os.path.dirname(__file__)); import Units as unit
def dipDirectionAndDipAng(tangent):
# Given the tangent to a curve, convert into dip direction and dip
e=tangent[0]
n=tangent[1]
up=tangent[2]
# If we rotate compass to align with math coords:
# W
# |
# S-------N
# |
# E
x=n
y=-e
thetaMath=math.atan2(y,x)
thetaCompass=-thetaMath*180.0/math.pi
dipDirection=thetaCompass
# Dip angle is the amount we are dipping from horizontal
# We chose orientation such that up is -ve
dipAngle=math.atan2( -up, math.sqrt( e*e + n*n ) )*180.0/math.pi
return dipDirection,dipAngle
def dipToStrikeDeg(dip_dir_deg):
# Definitions published by Wikipedia: https://en.wikipedia.org/wiki/Strike_and_dip
# One technique is to always take the strike so the dip is 90 deg to the right of the strike, in which case the redundant letter following the dip angle is omitted (right hand rule, or RHR).
#strike_rad=dip_dir_radians-0.5*np.pi
strike_deg=dip_dir_deg-90.0
return strike_deg
def strikeToDipDeg(strike_deg):
# Definitions published by Wikipedia: https://en.wikipedia.org/wiki/Strike_and_dip
# One technique is to always take the strike so the dip is 90 deg to the right of the strike, in which case the redundant letter following the dip angle is omitted (right hand rule, or RHR).
#strike_rad=dip_dir_radians-0.5*np.pi
#strike_deg=dip_dir_deg-90.0
dip_dir_deg=strike_deg+90.0
return dip_dir_deg
def degToRad(deg):
return deg*np.pi/180.0
def radToDeg(rad):
return rad*180.0/np.pi
# Return a tangent normal given azimuth and declination in degrees
def vectFromAzDecDeg(azDeg, decDeg):
v=np.asarray([0.0,1.0,0.0]) # Due North
v=rotatePoints( [v], np.asarray([1,0,0]), -degToRad(decDeg) )[0] # Rotate decDeg degrees sub-horizontal
v=rotatePoints( [v], np.asarray([0,0,1]), -degToRad(azDeg) )[0] # Rotate azDeg degrees clockwise of North
return v
# Return azimuth and dec from a tangent normal
def azDecDegFromVect(v):
# math.atan2(y, x): Return atan(y / x), in radians. The result is between -pi and pi
return (
(90.0-radToDeg(math.atan2(v[1],v[0])))%360.0, # Azimuth has different sign convention than math convention
-radToDeg(math.atan2(v[2],math.sqrt(v[0]*v[0]+v[1]*v[1])))
)
# Return a normalized vector
def normalize(v):
return v/math.sqrt(np.dot(v, v))
def writeStlObject(points,simplices,fd):
for simplex in simplices:
# I'm not sure we need to calculate a normal...
fd.write("facet normal 0.0 0.0 0.0\n")
fd.write("outer loop\n")
for iPt in simplex:
#print iPt,simplex
fd.write("vertex %g %g %g\n"%(points[iPt][0],points[iPt][1],points[iPt][2]))
fd.write("endloop\n")
fd.write("endfacet\n")
def writeStlFile(points,simplices,stlFile,name="stlObject"):
fd=open(stlFile,'w')
fd.write("solid %s\n"%(name))
writeStlObject(points,simplices,fd)
fd.write("endsolid\n");
def writeObjectsStlFile(objects,stlFile,name="stlObject"):
fd=open(stlFile,'w')
fd.write("solid %s\n"%(name))
#for object in objects:
(points,simplices)=objects
writeStlObject(points,simplices,fd)
fd.write("endsolid\n");
def writeVtk(objectListIn,scalarsIn,scalarNames,vtkFile,name="vtkObjects"):
# Remove empty object lists
#print 'len(objectListIn)',len(objectListIn),'len(scalarsIn)',len(scalarsIn),'len(scalarsIn[0])',len(scalarsIn[0])
if False:
# I don't get it, but this seems to be misbehaving now:
# In retrospect, it isn't clear it ever worked for the case where we have more than one scalar!
objectList=[objectListIn[i] for i in range(len(objectListIn)) if objectListIn[i] is not None]
scalars=[[scalarsIn[0][i] for i in range(len(objectListIn)) if objectListIn[i] is not None]]
if False:
# This is for a more general case with multiple scalars
# But it doesn't seem to work
objectList=[objectListIn[i] for i in range(len(objectListIn)) if objectListIn[i] is not None]
#scalars=[[scalarsIn[:][i]] for i in range(len(objectListIn)) if objectListIn[i] is not None]
scalars=[scalarsIn[:][i] for i in range(len(objectListIn)) if objectListIn[i] is not None]
if True:
# This works, but is not Pythonic
objectList=[]
scalars=[]
for i in range(len(scalarsIn)):
scalars.append([])
for i in range(len(objectListIn)):
if objectListIn[i] is not None:
objectList.append(objectListIn[i])
for iS in range(len(scalarsIn)):
scalars[iS].append(scalarsIn[iS][i])
#print objectList
#print scalars
#print 'len(objectList)',len(objectList),'len(scalars)',len(scalars)
fd=open(vtkFile,'w')
nPtsObj=[]
nPts=0
nTri=0
nObj=len(objectList)
for pts,simps in (objectList):
nPtsObj.append(len(pts))
nPts+=len(pts)
nTri+=len(simps)
nShift=[0]*nObj
for iShift in range(nObj-1):
nShift[iShift+1]=nShift[iShift]+nPtsObj[iShift]
fd.write("# vtk DataFile Version 2.0\n")
fd.write("%s\n"%(name))
fd.write("ASCII\n")
fd.write("DATASET UNSTRUCTURED_GRID\n")
fd.write("POINTS %d float\n"%(nPts))
for pts,simps in (objectList):
for pt in (pts):
fd.write("%g %g %g\n"%(pt[0],pt[1],pt[2]))
fd.write("CELLS %d %d\n"%(nTri,(1+3)*nTri))
iObj=0
#col=[]
for pts,simps in (objectList):
for tri in (simps):
fd.write("3 %d %d %d\n"%(tri[0]+nShift[iObj],tri[1]+nShift[iObj],tri[2]+nShift[iObj]))
#col.append(colorList[iObj])
iObj+=1
fd.write("CELL_TYPES %d\n"%(nTri))
for i in range(nTri):
fd.write("5 ") # http://www.vtk.org/wp-content/uploads/2015/04/file-formats.pdf (see Fig. 2)
if (i%10==9):
fd.write("\n")
fd.write("\n")
fd.write("CELL_DATA %d\n"%(nTri))
# Repeat as many of these as you want to define data on the tris
#for colorList, scalarName in scalars,scalarNames:
#print "check",len(scalars),scalarNames
for iCol in range(len(scalars)):
colorList=scalars[iCol]; scalarName=scalarNames[iCol]
fd.write("SCALARS "+scalarName+" float 1\n")
fd.write("LOOKUP_TABLE default\n")
iObj=0
i=0
for pts,simps in (objectList):
for tri in (simps):
fd.write("%g "%(colorList[iObj])); i+=1
if (i%10==9):
fd.write("\n")
iObj+=1
fd.write("\n")
fd.close()
def simplicesFromPoints(points):
hull=ConvexHull(points)
return hull.simplices
def convexFromPoints(points):
return ( points, simplicesFromPoints(points) )
def delaunayFromPoints(points):
return ( points, Delaunay(points).simplices )
# A non-object
emptyObject=None
# Merging two objects requires a shift in the indices
def mergeObj(obj1, obj2):
if (obj1 is None):
return obj2
if (obj2 is None):
return obj1
if (obj1==emptyObject):
return obj2
if (obj2==emptyObject):
return obj1
return (
np.vstack( (obj1[0],obj2[0]) ),
np.vstack( (obj1[1],obj2[1]+len(obj1[0])) )
)
def mergeObjects(objects):
nObj=len(objects)
merged=np.asarray(deepcopy(objects[0]))
nShift=0
for i in range(nObj-1):
#print i
nShift+=len(objects[i][0])
merged[0]=np.vstack( (merged[0],objects[i+1][0]) )
merged[1]=np.vstack( (merged[1],objects[i+1][1]+nShift) )
return merged
# Some useful objects
unitCubePts=np.asarray([
[-0.5,-0.5,-0.5],
[ 0.5,-0.5,-0.5],
[-0.5, 0.5,-0.5],
[ 0.5, 0.5,-0.5],
[-0.5,-0.5, 0.5],
[ 0.5,-0.5, 0.5],
[-0.5, 0.5, 0.5],
[ 0.5, 0.5, 0.5]
])
Cube=convexFromPoints(unitCubePts)
unitWedgePts=np.asarray([
[-0.5,-0.5,-0.5],
[ 0.5,-0.5,-0.5],
[ 0.0, 0.5,-0.5],
[-0.5,-0.5, 0.5],
[ 0.5,-0.5, 0.5],
[ 0.0, 0.5, 0.5]
])
unitWedge=convexFromPoints(unitWedgePts)
def diskObj(r, h, n=50):
dTh=2*math.pi/n
pts=[]
for i in range(n):
x=r*math.cos(i*dTh); y=r*math.sin(i*dTh)
pts.append( [x,y,-0.5*h] )
pts.append( [x,y, 0.5*h] )
pts=np.asarray(pts)
return convexFromPoints(pts)
# This was published on: https://en.wikipedia.org/wiki/Regular_dodecahedron
# Golden ratio
gr=(1.0+math.sqrt(5.0))/2.0
radiusOneSpherePts=np.asarray([
[-1,-1,-1],[ 1,-1,-1], [-1, 1,-1],[ 1, 1,-1], [-1,-1, 1],[ 1,-1, 1], [-1, 1, 1],[ 1, 1, 1],
[0,-1/gr,-gr],[0, 1/gr,-gr],[0,-1/gr, gr],[0, 1/gr, gr],
[-1/gr,-gr,0],[ 1/gr,-gr,0],[-1/gr, gr,0],[ 1/gr, gr,0],
[-gr,0,-1/gr],[-gr,0, 1/gr],[ gr,0,-1/gr],[ gr,0, 1/gr]
])
radiusOneSphereObj=convexFromPoints(radiusOneSpherePts)
def randSpherePtsFromGaussians(n,rad):
np.random.seed(1)
pts=[]
for i in range(n):
u = np.random.normal(0,1)
v = np.random.normal(0,1)
w = np.random.normal(0,1)
d= math.sqrt(u*u+v*v+w*w)
pts.append( rad*np.asarray([u,v,w])/d )
return pts
def cylObj(x0, x1, r, n=10, lengthSum=None):
sphere0=(r*radiusOneSpherePts)
sphere0[:,0]+=x0[0]; sphere0[:,1]+=x0[1]; sphere0[:,2]+=x0[2];
sphere1=(r*radiusOneSpherePts)
sphere1[:,0]+=x1[0]; sphere1[:,1]+=x1[1]; sphere1[:,2]+=x1[2];
pts=np.vstack( (sphere0, sphere1) )
#print lengthSum
#if (lengthSum != None):
try:
lengthSum[0]+=np.sqrt( np.dot((x1-x0),(x1-x0)) )
except:
pass
#print lengthSum
return convexFromPoints(pts)
# Set up a unit arrow pointing in y-direction
pts1=deepcopy(unitCubePts); pts1[:,1]-=0.5
pts2=deepcopy(unitWedgePts); pts2[:,0]*=2.0; pts2[:,1]+=0.5
unitArrow1=convexFromPoints(pts1)
unitArrow2=convexFromPoints(pts2)
unitArrowY=mergeObj(unitArrow1,unitArrow2)
def extrudePoints(points, disp):
"""
Return a list of points including the initial points and extruded end
"""
farEnd=deepcopy(points)
farEnd[:,0]+=disp[0]
farEnd[:,1]+=disp[1]
farEnd[:,2]+=disp[2]
return np.vstack( (points,farEnd) )
def transObj(object, disp):
"""
Translate an object
"""
return (object[0]+disp,object[1])
def scaleObj(object, scale):
"""
Scale an object
"""
return (object[0]*scale,object[1])
def getPoints(object):
"""
Return the list of points/vertices
"""
return object[0]
def getNPoly(object):
"""
Return the number polygons in the object
"""
return len(object[1])
def getPolyPoints(object, i):
"""
Return the list of points for polygon i
"""
return object[0][object[1][i]]
def getPolyNormal(object, x0, i):
"""
Return the normal to polygon i that points away from x0
"""
pts=getPolyPoints(object,i)
# Note that this normal could be badly behaved if aVec and bVec are close to parallel
aVec=pts[2]-pts[0]
bVec=pts[1]-pts[0]
nVec=np.cross(aVec,bVec)
nVec=nVec/np.linalg.norm(nVec)
# Check if our normal is pointing away from x0
#print 'nVec',nVec
#print 'pts[0]',pts[0]
#print 'x0',x0
if np.dot( nVec, pts[0]-x0 ) > 0.0:
return nVec
else:
return -1.0*nVec
def getPolyArea(object, i):
"""
Return the area of the polygon i
"""
pts = getPolyPoints(object, i)
area=0.0
for j in range(1,len(pts)-1):
#print 'j',j
vtmp = np.cross(pts[j]-pts[0],pts[j+1]-pts[0])
area += 0.5*np.sqrt(np.dot(vtmp,vtmp))
return area
# http://stackoverflow.com/questions/6802577/python-rotation-of-3d-vector
def rotationMatrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis/math.sqrt(np.dot(axis, axis))
a = math.cos(theta/2.0)
b, c, d = -axis*math.sin(theta/2.0)
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
def rotatePoints(points, axis, theta):
rot=rotationMatrix(axis,theta)
#return rot*points
return np.transpose( np.dot(rot, np.transpose( points ) ) )
def rotateTensor(tensor, axis, theta):
#http://www.continuummechanics.org/stressxforms.html
rot=rotationMatrix(axis,theta)
return np.dot(rot,np.dot(tensor,np.transpose(rot)))
def rotateObj(object, axis, theta):
rot=rotationMatrix(axis,theta)
return ( np.transpose( np.dot(rot, np.transpose(object[0])) ) , object[1])
# This was published by http://geomalgorithms.com/a05-_intersect-1.html
def intersectionOfLineAndPlane(lineX,lineS,planeX,planeN):
V0=np.asarray(planeX)
n=np.asarray(planeN)
P0=np.asarray(lineX)
u=np.asarray(lineS)
sI=( np.dot( n, (V0-P0) ) )/( np.dot( n,u ) )
return P0+sI*u
def distOfIntersectionOfLineAndPlane(lineX,lineS,planeX,planeN):
V0=np.asarray(planeX)
n=np.asarray(planeN)
P0=np.asarray(lineX)
u=np.asarray(lineS)
sI=( np.dot( n, (V0-P0) ) )/( np.dot( n,u ) )
return sI,P0+sI*u
def shortestDistanceBetweenLineSegments( xio,xif, xjo,xjf ):
# Calculate tangents to the line segments
p1=xio; p2=xif
p3=xjo; p4=xjf
# The Python code in this function is based upon C++ code developed by <NAME>.
# The original C++ code had the following request:
# // Copyright 2001 softSurfer, 2012 <NAME>
# // This code may be freely used and modified for any purpose
# // providing that this copyright notice is included with it.
# // SoftSurfer makes no warranty for this code, and cannot be held
# // liable for any real or imagined damage resulting from its use.
# // Users of this code must verify correctness for their application.
u = p1 - p2;
v = p3 - p4;
w = p2 - p4;
a = np.dot(u,u);
b = np.dot(u,v);
c = np.dot(v,v);
d = np.dot(u,w);
e = np.dot(v,w);
D = a*c - b*b;
sD = D;
tD = D;
SMALL_NUM = 0.00000001;
# compute the line parameters of the two closest points
if (D < SMALL_NUM): # the lines are almost parallel
sN = 0.0; # force using point P0 on segment S1
sD = 1.0; # to prevent possible division by 0.0 later
tN = e;
tD = c;
else: # get the closest points on the infinite lines
sN = (b*e - c*d);
tN = (a*e - b*d);
if (sN < 0.0): # sc < 0 => the s=0 edge is visible
sN = 0.0;
tN = e;
tD = c;
elif (sN > sD):# sc > 1 => the s=1 edge is visible
sN = sD;
tN = e + b;
tD = c;
if (tN < 0.0): # tc < 0 => the t=0 edge is visible
tN = 0.0;
# recompute sc for this edge
if (-d < 0.0):
sN = 0.0;
elif (-d > a):
sN = sD;
else:
sN = -d;
sD = a;
elif (tN > tD): # tc > 1 => the t=1 edge is visible
tN = tD;
# recompute sc for this edge
if ((-d + b) < 0.0):
sN = 0;
elif ((-d + b) > a):
sN = sD;
else:
sN = (-d + b);
sD = a;
# finally do the division to get sc and tc
if(abs(sN) < SMALL_NUM):
sc = 0.0;
else:
sc = sN / sD;
if(abs(tN) < SMALL_NUM):
tc = 0.0;
else:
tc = tN / tD;
# get the difference of the two closest points
dP = w + (sc * u) - (tc * v);
distance = np.linalg.norm(dP);
return distance
# Generate a convex hull from points - Not good in general because drifts are not always convex
def pointCloudToConvexPolyhedron(drift_scan, dt, keepFraction=0.01):
np.random.seed(1)
nPoints=len(dt['x'])
pts = []
for i in range(nPoints):
#print "%.1f"%((100.0*i)/nPoints)
if (i%100==0):
sys.stdout.write("Scan progress %d%% \r" % ((100.0*i)/nPoints) )
sys.stdout.flush()
if random.random()<keepFraction:
pts.append( [dt['x'][i],dt['y'][i],dt['z'][i]] )
drift_scan=mergeObj( drift_scan, convexFromPoints( pts ) )
return drift_scan
# Generate a Delaunay triangular mesh from points - Not good on its own because it will not handle concavity
# However, we can prune the larger triangles to recover a resonable representation of a complex tunnel
def pointCloudToDelaunay(drift_scan, dt, keepFraction=0.01):
np.random.seed(1)
nPoints=len(dt['x'])
pts = []
for i in range(nPoints):
#print "%.1f"%((100.0*i)/nPoints)
if (i%100==0):
sys.stdout.write("Scan progress %d%% \r" % ((100.0*i)/nPoints) )
sys.stdout.flush()
if random.random()<keepFraction:
pts.append( [dt['x'][i],dt['y'][i],dt['z'][i]] )
drift_scan=mergeObj( drift_scan, delaunayFromPoints( pts ) )
return drift_scan
# Remove large triangles from an object - we assume we are given a bunch of triangles
# We drop any triangle with at least one side larger than L
def pruneLargeTriangles(obj, L):
pts, simps_in = obj
simps_out = []
nTri = len(simps_in)
for i in range(nTri):
if (i%100==0):
sys.stdout.write("Pruning progress %d%% \r" % ((100.0*i)/nTri) )
sys.stdout.flush()
tri = simps_in[i]
if np.linalg.norm( np.asarray(pts[tri[1]])-np.asarray(pts[tri[0]]) ) > L:
continue
if np.linalg.norm( np.asarray(pts[tri[2]])-np.asarray(pts[tri[1]]) ) > L:
continue
if np.linalg.norm( np.asarray(pts[tri[0]])-np.asarray(pts[tri[2]]) ) > L:
continue
# If we made it this far, the triangle is small enough to keep
simps_out.append( tri )
return (pts, simps_out)
def pointCloudToCubes(drift_scan, dt, keepFraction=0.01, size=0.2):
np.random.seed(1)
#print 'reading from',scanFile
#dt=pd.read_csv(scanFile,usecols=[0,1,2],names=['x','y','z'])
#dt=pd.read_csv(scanFile)
#print dt['x'][:10]
#pts=[]
nPoints=len(dt['x'])
#pntObj=sg.scaleObj(sg.radiusOneSphereObj,[0.1,0.1,0.1])
pntObj=scaleObj(Cube,[size,size,size])
for i in range(nPoints):
#print "%.1f"%((100.0*i)/nPoints)
if (i%100==0):
sys.stdout.write("Scan progress %d%% \r" % ((100.0*i)/nPoints) )
sys.stdout.flush()
if random.random()<keepFraction:
#pts.append( [dt['x'][i],dt['y'][i],dt['z'][i]] )
# Note that this repeated merging is not at all efficient, but it's acceptable performance for the number of points we want to keep
drift_scan=mergeObj(drift_scan,
transObj( pntObj, [dt['x'][i],dt['y'][i],dt['z'][i]] )
)
print('Scan complete')
#pts=np.asarray(pts)
#drift_scan=sg.convexFromPoints(pts) # This turns a list of points into a polyhedron
return drift_scan
def grepPointCloudToCubes(drift_scan, filename, grepString, keepFraction=0.01, size=0.2):
np.random.seed(1)
dt={'x':[],'y':[],'z':[]}
fd=open(filename,'r')
for line in fd:
m = re.match(grepString, line)
if m:
x = float(m.group(1))
y = float(m.group(2))
z = float(m.group(3))
dt['x'].append(x)
dt['y'].append(y)
dt['z'].append(z)
fd.close()
nPoints=len(dt['x'])
#pntObj=sg.scaleObj(sg.radiusOneSphereObj,[0.1,0.1,0.1])
pntObj=scaleObj(Cube,[size,size,size])
for i in range(nPoints):
#print "%.1f"%((100.0*i)/nPoints)
if (i%100==0):
sys.stdout.write("Scan progress %d%% \r" % ((100.0*i)/nPoints) )
sys.stdout.flush()
if random.random()<keepFraction:
#pts.append( [dt['x'][i],dt['y'][i],dt['z'][i]] )
# Note that this repeated merging is not at all efficient, but it's acceptable performance for the number of points we want to keep
drift_scan=mergeObj(drift_scan,
transObj( pntObj, [dt['x'][i],dt['y'][i],dt['z'][i]] )
)
print('Scan complete')
#pts=np.asarray(pts)
#drift_scan=sg.convexFromPoints(pts) # This turns a list of points into a polyhedron
return drift_scan
# From math.stackexchange.com find-shortest-distance-between-lines-in-3d
def shortestDistanceBetweenLines(a,b, c,d):
# a=origin of first line
# b=tangent to first line
# c=origin of second line
# d=tangent to second line
#print "a",a
#print "b",b
#print "c",c
#print "d",d
# t=path length along first line
# s=path length along second line
e=a-c
A = -np.dot(b,b)*np.dot(d,d) + np.dot(b,d)*np.dot(b,d)
# A=0 if the lines are parallel
s = ( -np.dot(b,b)*np.dot(d,e) + np.dot(b,e)*np.dot(d,b) )/A
t = ( np.dot(d,d)*np.dot(b,e) - np.dot(b,e)*np.dot(d,b) )/A
dvect=e+b*t-d*s
dist=np.sqrt( np.dot( dvect, dvect ) )
return dist
# Place a radial hydraulic fracture of radius r at x0
def HF(r,x0, strikeRad, dipRad, h=0.5):
# start with a disk
disk=diskObj(r,h)
disk=rotateObj(disk,[0.0,1.0,0.0],dipRad)
disk=rotateObj(disk,[0.0,0.0,1.0],-strikeRad)
disk=transObj(disk,x0)
return disk
# Look for intersection of ellipses with a line segment
# A line segment is described by:
# - Origin l0
# - Tangent tVec
# - Length l
# An ellipse is described by:
# - Center e0
# - Major axis aVec
# - Minor axis bVec
# Note: These choices of defininition make the calculations more efficient
# First we locate the intersection of the line with the plane of the ellipse
# Then we check if the point of intersection is BOTH
# - Inside the ellipse
# - Inside the line segment
def ellipseIntersectSegmentVect(l0, tVec, l, e0, aVec, bVec):
# Obtain normal to the ellipse (note that we must non-dimensionalize)
aMag=np.linalg.norm(aVec)
aVecN=aVec/aMag
bMag=np.linalg.norm(bVec)
bVecN=bVec/bMag
nVec=np.cross(aVecN,bVecN)
# Location of intersection along the length of the line segment:
s,xInt=distOfIntersectionOfLineAndPlane(lineX=l0,lineS=tVec,planeX=e0,planeN=nVec)
# These will later be set to the distances along the a- and b-axes
aa=np.Inf; bb=np.Inf
if s<0.0 or s>l:
# The point of intersection lies outside the line segment
return False,s,aa,bb,xInt
# The intersection is within the line segment
# Is the intersection inside the ellipse?
# Need to obtain the projection of the point of intersection onto the aVec and bVec directions
aa=np.dot((xInt-e0),aVecN)
bb=np.dot((xInt-e0),bVecN)
if aa*aa/(aMag*aMag)+bb*bb/(bMag*bMag) > 1.0:
# We are outside the ellipse
return False,s,aa,bb,xInt
# If we made it this far we are inside the ellipse and the line segment
return True,s,aa,bb,xInt
def ellipseIntersectSegmentEndPts(x0, x1, e0, aVec, bVec):
l0=x0
tVec=x1-x0
l=np.linalg.norm(tVec)
tVec=tVec/l
return ellipseIntersectSegmentVect(l0, tVec, l, e0, aVec, bVec)
def diskIntersectSegmentEndPts(x0, x1, c0, strikeDeg, dipDeg, r):
# Convert our disk into an equivalent ellipse
aVec, bVec=strikeDipToAxes(strikeDeg,dipDeg,r)
intersects,s,aa,bb,xInt=ellipseIntersectSegmentEndPts(x0, x1, c0, aVec, bVec)
return intersects,xInt
# Convert strike and dip into major and minor axes of an ellipse
def strikeDipToAxes(strikeDeg,dipDeg,radius):
strikeRad=degToRad(strikeDeg)
dipRad=degToRad(dipDeg)
aVec=rotatePoints( [0.0,radius,0.0], [0.0,0.0,1.0], -strikeRad )
bVec=rotatePoints( [0.0,0.0,radius], aVec, dipRad+0.5*np.pi )
return aVec, bVec
``` |
{
"source": "Joe-Poc/data-annotator-for-machine-learning",
"score": 2
} |
#### File: src/al/embeddings.py
```python
import logging
from fastai.tabular import *
from src.al.project_service import update_project
from src.al.sr_service import query_all_srs
import src.utils.fileSystem as fileSystem
modelDir = "models/"
log = logging.getLogger('loop_al')
# generate embeddings model
def embeddings_model(dataset, cat_names, cont_names, dep_var):
procs = [FillMissing, Categorify, Normalize]
test = TabularList.from_df(dataset.iloc[40:50].copy(), path=".", cat_names=cat_names, cont_names=cont_names)
data = (TabularList.from_df(dataset, path=".", cat_names=cat_names, cont_names=cont_names, procs=procs)
.split_by_idx(list(range(40, 50)))
.label_from_df(cols=dep_var)
.add_test(test)
.databunch())
learn = tabular_learner(data, layers=[1000, 500], metrics=accuracy)
return learn
# get embeddings vector list
def get_cat_emb_list(learn):
cat_emb_list, idx = {}, 0
for k, v in learn.data.train_ds.x.classes.items():
emb_weights = list(learn.model.named_parameters())[idx][1]
emb_np = to_np(emb_weights.data)
vec = {}
for i, sr_lb in enumerate(v):
vec[sr_lb] = emb_np[i]
cat_emb_list[k] = pd.DataFrame(vec)
idx += 1
return cat_emb_list
# load all sr data and seperate number column and categorical column
def prepare_dataset(project_name):
sr_text, num_col, obj_col = [], [], []
for sr in query_all_srs(project_name):
sr_label, top_label = [], None
if sr['userInputs']:
for label in sr['userInputs']:
sr_label.append(label['problemCategory'])
top_label = Counter(sr_label).most_common(1)[0][0]
if top_label:
sr['originalData']['_top_label_'] = top_label
sr_text.append(sr['originalData'])
sr_text = pd.DataFrame(sr_text)
for k, v in sr_text.dtypes.items():
if k == '_top_label_':
continue
if v != 'object':
num_col.append(k)
else:
obj_col.append(k)
if len(num_col) == 0:
sr_text = sr_text.astype(str)
sr_text = sr_text.drop_duplicates().reset_index(drop=True)
sr_text.replace("", 0, inplace=True)
sr_text.replace(np.nan, "", inplace=True)
return {"sr_text": sr_text, "num_col": num_col, "obj_col": obj_col}
# train a embedding model to generate sr vectors and get sr vectors
def train_embedding_model_gain_vector(project_id, project_name, sr_text, token):
emb_list, upload_file = {}, ""
sr_data = prepare_dataset(project_name)
if sr_data['obj_col']:
learn = embeddings_model(sr_data['sr_text'], sr_data['obj_col'], sr_data['num_col'], '_top_label_')
emb_list = get_cat_emb_list(learn)
# save the model to disk
model_name = project_id + "_vaporizer_model.pkl"
local_file = str("./" + modelDir + model_name)
with open(local_file, 'wb') as vec_pickle:
pickle.dump(emb_list, vec_pickle)
# upload model to s3
upload_file = modelDir + project_id + '/' + model_name
upload_file = fileSystem.upload_file(upload_file, local_file, token)
# update al info
update = {"$set": {
"al.vectorModel": upload_file,
'al.numberColumn': sr_data['num_col'],
'al.objectColumn': sr_data['obj_col'],
"al.alFailed": False
}}
update_project({"projectName": project_name}, update)
srs_vectors = embedding_vectors(sr_text, emb_list, sr_data['num_col'])
return srs_vectors
# map embeddings vector
def embedding_vectors(sr_text, emb_list, num_col):
srs_vectors = []
for sr in sr_text:
vectors = []
num_vec = []
for k, v in sr.items():
if k in num_col:
num_vec.append(v)
else:
if v in emb_list[k]:
vectors = np.append(vectors, emb_list[k][v])
else:
vectors = np.append(vectors, emb_list[k]["#na#"])
sr_vector = np.append(vectors, num_vec)
srs_vectors.append(sr_vector)
srs_vectors = pd.DataFrame(srs_vectors).replace(np.nan, 0).replace("", 0)
srs_vectors = srs_vectors.to_numpy()
return np.array(srs_vectors)
# get embeddings vector
def gain_srs_embedding_vector(sr_text, vector_model, project_id, num_col, token):
emb_list = {}
if vector_model:
# download embeddings model if not exist
model_name = project_id + "_vaporizer_model.pkl"
local_file = str("./" + modelDir + model_name)
fileSystem.download_file(True, vector_model, local_file, token)
# load embedding model to get sr vectors
with open(local_file, 'rb') as model_vec:
emb_list = pickle.load(model_vec)
srs_vectors = embedding_vectors(sr_text, emb_list, num_col)
return srs_vectors
``` |
{
"source": "joepope44/fantasty_football_app",
"score": 3
} |
#### File: joepope44/fantasty_football_app/linear_regression.py
```python
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, GradientBoostingRegressor
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.dummy import DummyRegressor
import statsmodels.api as sm
import statsmodels.formula.api as smf
import seaborn as sns
sns.set(style="whitegrid")
tmp.head()
tmp.to_csv('data/tmp.csv')
y = tmp['Passing Yards']
X = tmp.drop(['Passing Yards'], axis=1)
labels = X[['Team_x', 'Name']]
X.drop([
'Team_x', 'Team_y', 'Name', 'Opponent', 'Total Salary', 'Fantasy Points_y',
'Fantasy Points_x'], axis=1, inplace=True)
regr = LinearRegression()
model = regr.fit(X, y)
model.score(X, y)
rf = RandomForestRegressor(n_estimators=1800, max_features=3)
rf.fit(X_train, y_train)
rf.score(X_test, y_test)
RMSE(rf.predict(X_test), y_test)
gbm = GradientBoostingRegressor(n_estimators=1600, max_depth=3, learning_rate=.01)
gbm.fit(X_train, y_train)
gbm.score(X_test, y_test)
RMSE(gbm.predict(X_test),y_test)
# Create decision tree classifer object
clf = RandomForestClassifier(random_state=0, n_jobs=-1)
# Train model
tree = clf.fit(X, y)
importances = tree.feature_importances_
# Sort feature importances in descending order
indices = np.argsort(importances)[::-1]
# Rearrange feature names so they match the sorted feature importances
names = [X.columns[i] for i in indices]
imp = pd.DataFrame(list(zip(names, importances[indices])))
imp.sort_values(1, ascending=False)[:10]
feat_imps = list(zip(X_train.columns,rf.feature_importances_))
feat_imps = sorted(feat_imps, key = lambda x: x[1], reverse=False)
feat_imps = pd.DataFrame(feat_imps, columns=['feature','importance']).head()
feat_imps.plot(x='feature',y='importance',kind='barh')
# LINEAR REGRESSION
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.2, random_state=44)
# Create a dummy regressor
dummy_mean = DummyRegressor(strategy='mean')
# "Train" dummy regressor
dummy_mean.fit(X_train, y_train)
# Get R-squared score
dummy_mean.score(X_test, y_test) # -0.11 R2 using mean
# vanilla linear regression - R2 is -4!
model = regr.fit(X_train, y_train)
model.score(X_test, y_test)
# LASSO
# Standarize features
scaler = StandardScaler()
X_std = scaler.fit_transform(X_train)
# Create lasso regression with alpha value
lasso = Lasso(alpha=0.1)
# Fit the linear regression
lasso_model = lasso.fit(X_std, y_train)
lasso_model.score(X_test, y_test)
dict(zip((X_train).columns, lasso_model.coef_))
# Create ridge regression with an alpha value
ridge_model = Ridge(alpha=0.3)
# Fit the linear regression
ridge_model = regr.fit(X_std, y_train)
ridge_model.score(X_test, y_test)
# R2 .914
model = sm.OLS(y_train, X_train, hasconst=100)
fit = model.fit()
fit.summary()
# rslt = model.fit_regularized()
# rslt.summary()
data = pd.DataFrame()
data['predict']=fit.predict(X_train)
data['resid']=y_train - data.predict
with sns.axes_style('white'):
plot=data.plot(
kind='scatter',
x='predict', y='resid', figsize=(10,6)
)
from sklearn.metrics import mean_squared_error
def RMSE(actual, predicted):
return np.sqrt(mean_squared_error(actual,predicted))
print('OLS regression score val RMSE: %.3f \n' % RMSE(regr.predict(X_test), y_test))
X.to_csv('data/regr.csv')
df9 = X.join(y)
df9.head()
sns.distplot(y)
y.mean() # 211 yards
# DECISION TREE
from sklearn.tree import DecisionTreeRegressor
tree = DecisionTreeRegressor(random_state=0)
tree_model = tree.fit(X_train, y_train)
``` |
{
"source": "joeportela/tinyAPI",
"score": 2
} |
#### File: tinyAPI/base/config.py
```python
__author__ = '<NAME> <<EMAIL>>'
# ----- Imports ---------------------------------------------------------------
from .exception import ConfigurationException
import tinyAPI_config
__all__ = ['ConfigManager']
# ----- Public Classes --------------------------------------------------------
class ConfigManager(object):
'''Handles retrieval and validation of configuration settings.'''
@staticmethod
def value(key):
'''Retrieves the configuration value named by key.'''
if key in tinyAPI_config.values:
return tinyAPI_config.values[key]
else:
raise ConfigurationException(
'"' + key + '" is not configured in tinyAPI_config')
```
#### File: base/data_store/FallBack.py
```python
__author__ = '<NAME> <<EMAIL>>'
# ----- Imports ---------------------------------------------------------------
from .exception import DataStoreException
# ----- Public Classes --------------------------------------------------------
class FallBack(object):
'''
Implements the fall back durability algorithm.
'''
def __init__(self, settings):
self.__selected_host = None
if len(settings) != 2:
raise DataStoreException('exactly 2 hosts must be configured')
self.settings = settings
def next(self):
if self.__selected_host is None:
self.__selected_host = 0
elif self.__selected_host == 0:
self.__selected_host = 1
else:
raise DataStoreException('no more hosts remain')
return self.settings[self.__selected_host]
```
#### File: base/data_store/memcache.py
```python
__author__ = '<NAME> <<EMAIL>>'
# ----- Imports ---------------------------------------------------------------
from tinyAPI.base.config import ConfigManager
from tinyAPI.base.stats_logger import StatsLogger
import pylibmc
import threading
import time
__all__ = [
'Memcache'
]
# ----- Thread Local Data -----------------------------------------------------
_thread_local_data = threading.local()
_thread_local_data.stats = {
'requests': 0,
'hits': 0
}
_thread_local_data.cache = {}
# ----- Public Classes --------------------------------------------------------
class Memcache(object):
'''Manages interactions with configured Memcached servers.'''
def __init__(self):
self.__handle = None
def clear_local_cache(self):
_thread_local_data.stats = {
'requests': 0,
'hits': 0
}
_thread_local_data.cache = {}
def __add_to_local_cache(self, key, data=None, ttl=None):
if key not in _thread_local_data.cache:
_thread_local_data.cache[key] = {
'added': (time.time() if data is not None else None),
'data': data,
'ttl': ttl
}
def close(self):
'''Closes all connections to Memcached servers.'''
if self.__handle is not None:
self.__handle.disconnect_all()
self.__handle = None
def __connect(self):
if self.__handle is None:
self.__handle = \
pylibmc.Client(
ConfigManager.value('memcached servers'),
binary = True,
behaviors = {
'dead_timeout': 60,
'ketama': 1,
'remove_failed': 1,
'retry_timeout': 1,
'tcp_nodelay': True
})
def __get_from_local_cache(self, key):
if key not in _thread_local_data.cache or \
_thread_local_data.cache[key]['data'] is None:
return None
added = _thread_local_data.cache[key]['added']
ttl = _thread_local_data.cache[key]['ttl']
if added is not None and ttl is not None:
if time.time() - added >= ttl:
return None
return _thread_local_data.cache[key]['data'].copy()
def purge(self, key):
'''Removes the value stored at the specified key from the cache. '''
self.__connect()
self.__handle.delete(key)
if key in _thread_local_data.cache:
del _thread_local_data.cache[key]
def retrieve(self, key):
'''Retrieves the data stored at the specified key from the cache.'''
StatsLogger().hit_ratio(
'Cache Stats',
_thread_local_data.stats['requests'],
_thread_local_data.stats['hits'])
_thread_local_data.stats['requests'] += 1
data = self.__get_from_local_cache(key)
if data is not None:
_thread_local_data.stats['hits'] += 1
return data
self.__connect()
value = self.__handle.get(key)
if value is not None:
self.__add_to_local_cache(key, value)
return value.copy() if value else None
def retrieve_multi(self, keys):
'''Retrieves the data stored for a number of keys from the cache.'''
StatsLogger().hit_ratio(
'Cache Stats',
_thread_local_data.stats['requests'],
_thread_local_data.stats['hits'])
_thread_local_data.stats['requests'] += 1
data = self.__get_from_local_cache(key)
if data is not None:
_thread_local_data.stats['hits'] += 1
return data
self.__connect()
values = self.__handle.get_multi(keys)
if values is not None:
self.__add_to_local_cache(key, values)
return values.copy() if values else {}
def store(self, key, data, ttl=0, local_cache_ttl=None):
'''Stores the data at the specified key in the cache.'''
self.__connect()
self.__handle.set(key, data, ttl)
self.__add_to_local_cache(key, data, local_cache_ttl)
```
#### File: base/services/crypto.py
```python
__author__ = '<NAME> <<EMAIL>>'
# ----- Imports ---------------------------------------------------------------
from Crypto import Random
from Crypto.Cipher import AES
from .exception import CryptoException
import base64
import hashlib
import json
import time
__all__ = [
'DataArmor'
]
# ----- Public Classes --------------------------------------------------------
class DataArmor(object):
'''Creates an encrypted token that cannot be modified without detection and
can be expired by TTL.'''
def __init__(self, key, data):
key_len = len(key)
if key_len != 16 and key_len != 24 and key_len != 32:
raise CryptoException(
'key must be of length 16, 24, or 32 bytes')
self.__key = key
self.__data = data
self.timestamp = None
def __decrypt(self, data):
data = base64.b64decode(data.encode(), b'|_')
iv = data[:AES.block_size]
cipher = AES.new(self.__key, AES.MODE_CBC, iv)
return self.__unpad(cipher.decrypt(data[AES.block_size:]))
def __encrypt(self, data):
data = self.__pad(data)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.__key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(data), b'|_')
def lock(self):
'''Secure the data.'''
data = json.dumps(self.__data)
timestamp = \
str \
(
int(time.time())
if self.timestamp is None else
self.timestamp
)
sha = \
hashlib.sha224(
data.encode('utf8') +
timestamp.encode('utf8')
) \
.hexdigest()
data = data + chr(2) + timestamp
return self.__encrypt(data).decode() + '-' + sha
def __pad(self, data):
bs = AES.block_size
return data + (bs - len(data) % bs) * chr(bs - len(data) % bs)
def set_timestamp(self, timestamp):
self.timestamp = timestamp
return self
def unlock(self, ttl=None):
'''Decrypt the data and return the original payload.'''
parts = self.__data.split('-')
try:
data = self.__decrypt(parts[0]).decode()
except:
raise CryptoException(
'data failed to decrypt; contents were likely tampered with')
sha = parts[1]
parts = data.split(chr(2))
data = parts[0]
try:
self.timestamp = int(parts[1])
except IndexError:
raise CryptoException(
'could not find timestamp; encryption key was likely incorrect')
if hashlib.sha224(
data.encode('utf8') + str(self.timestamp).encode('utf8')
) \
.hexdigest() != sha:
raise CryptoException('armored token has been tampered with');
if ttl is not None:
if (int(time.time()) - self.timestamp) > ttl:
raise CryptoException('token has expired')
return json.loads(data)
def __unpad(self, data):
return data[:-ord(data[len(data) - 1:])]
```
#### File: services/rdbms_builder/manager.py
```python
__author__ = '<NAME> <<EMAIL>>'
# ----- Imports ---------------------------------------------------------------
from .exception import RDBMSBuilderException
from tinyAPI.base.config import ConfigManager
from tinyAPI.base.data_store.exception import DataStoreDuplicateKeyException
from tinyAPI.base.utils import find_dirs, find_files
import codecs
import hashlib
import importlib.machinery
import os
import re
import subprocess
import sys
import tempfile
import tinyAPI
__all__ = [
'Manager'
]
# ----- Protected Classes -----------------------------------------------------
class _RDBMSBuilderModuleSQL(object):
'''Simple container for all of the SQL related assets for a module.'''
def __init__(self, name, prefix):
self.__name = name
self.__prefix = prefix
self.__dml_files = []
self.__build_file = None
self.__definitions = []
self.__indexes = []
self.__inserts = []
def add_definition(self, db_name, statement):
self.__definitions.append([db_name, statement])
return self
def add_dml_file(self, dml_file):
self.__dml_files.append(dml_file)
return self
def add_index(self, db_name, statement):
self.__indexes.append([db_name, statement])
return self
def add_insert(self, db_name, statement):
self.__inserts.append([db_name, statement])
return self
def get_build_file(self):
return self.__build_file
def get_definitions(self):
return self.__definitions
def get_dml_files(self):
return self.__dml_files
def get_indexes(self):
return self.__indexes
def get_inserts(self):
return self.__inserts
def get_name(self):
return self.__name
def get_prefix(self):
return self.__prefix
def set_build_file(self, build_file):
self.__build_file = build_file
return self
# ----- Public Classes --------------------------------------------------------
class Manager(object):
'''Determines which database objects to build and builds them.'''
def __init__(self, cli=None):
self.__cli = cli
self.__managed_schema = None
self.__modules = {}
self.__num_rdbms_objects = 0
self.__num_rdbms_tables = 0
self.__num_rdbms_indexes = 0
self.__num_rdbms_routines = 0
self.__connection_name = None
self.__exec_sql_command = None
self.__dependencies_map = {}
self.__dependents_map = {}
self.__modules_to_build = {}
self.__modules_to_build_prefix = {}
self.__prefix_to_module = {}
self.__foreign_keys = {}
self.__unindexed_foreign_keys = []
def __add_foreign_key_constraints(self):
self.__notice("Adding foreign key constraints...")
for module_name in self.__foreign_keys.keys():
if module_name in self.__modules_to_build:
for fk in self.__foreign_keys[module_name]:
db_name = fk[0]
foreign_key = fk[1]
matches = re.search('add constraint (.*?)$',
foreign_key,
re.M | re.I | re.S)
if matches is None:
raise RDBMSBuilderException(
'could not find name of constraint in '
+ 'statement:\n'
+ foreign_key)
self.__execute_statement(foreign_key, db_name)
self.__notice('(+) ' + matches.group(1), 1)
self.__num_rdbms_objects += 1
def __assemble_all_modules(self):
'''Finds all of the modules in the application and puts their
respective parts together in a centralize module object.'''
self.__notice('Assembling all modules...')
##
# Step 1
#
# Take an initial pass at building the modules with the minimum set of
# information required. This is necessary so that the second pass can
# properly assign dependencies based on knowing all of the available
# modules.
##
for path in ConfigManager.value('application dirs'):
files = find_files(path + '/*', 'build.py')
for file in files:
if file != '':
module_name = None
prefix = None
matches = re.search('(.*)/sql/ddl/build.py$', file)
if matches is not None:
module_name = matches.group(1).split('/')[-1]
with codecs.open(file, 'r', 'utf-8') as f:
contents = f.read()
if contents != '':
matches = re.search('def (.*?)_build\s?\(',
contents,
re.M | re.S | re.I)
if matches is None:
raise RDBMSBuilderException(
'found build.py file but could not '
+ 'find build function in "'
+ file
+ '"')
prefix = matches.group(1)
if module_name is not None and prefix is not None:
module = _RDBMSBuilderModuleSQL(module_name, prefix)
module.set_build_file(file)
self.__modules[module_name] = module
self.__prefix_to_module[prefix] = module_name
if module_name not in self.__dependencies_map:
self.__dependencies_map[module_name] = []
if module_name not in self.__dependents_map:
self.__dependents_map[module_name] = []
##
# Step 2
#
# Take a second pass at all of the modules, assign dependencies and
# associate all necessary SQL so the objects can be built.
##
for module in self.__modules.values():
loader = importlib.machinery.SourceFileLoader(
module.get_name(), module.get_build_file())
build_file = loader.load_module(module.get_name())
build = getattr(build_file, module.get_prefix() + '_build')
objects = build()
for object in objects:
module.add_definition(object.get_db_name(),
object.get_definition())
if isinstance(object, tinyAPI.Table):
self.__assign_dependencies(module, object)
if isinstance(object, tinyAPI.Table):
indexes = object.get_index_definitions()
for index in indexes:
module.add_index(object.get_db_name(), index)
inserts = object.get_insert_statements()
if inserts is not None:
for insert in inserts:
module.add_insert(object.get_db_name(), insert)
fks = object.get_foreign_key_definitions()
for fk in fks:
if module.get_name() not in self.__foreign_keys:
self.__foreign_keys[module.get_name()] = []
self.__foreign_keys[module.get_name()].append([
object.get_db_name(), fk + ';'])
self.__unindexed_foreign_keys = \
self.__unindexed_foreign_keys + \
object.get_unindexed_foreign_keys()
self.__handle_module_dml(module)
def __assign_dependencies(self, module, table):
'''Record all of the dependencies between modules so the system can be
rebuilt with dependencies in mind.'''
dependencies = table.get_dependencies()
for dependency in dependencies:
if dependency not in self.__prefix_to_module.keys():
raise RDBMSBuilderException(
'found dependency "' + dependency + '" but do not have '
+ 'a module for it')
if module.get_name() != self.__prefix_to_module[dependency]:
self.__dependencies_map[module.get_name()] \
.append(self.__prefix_to_module[dependency])
if self.__prefix_to_module[dependency] not in \
self.__dependents_map:
self.__dependents_map[
self.__prefix_to_module[dependency]] = []
self.__dependents_map[self.__prefix_to_module[dependency]] \
.append(module.get_name())
def __build_dml(self, module):
for file in module.get_dml_files():
with open(file, 'rb') as f:
self.__execute_statement(f.read().decode())
self.__track_module_info(module, file)
routines = tinyAPI.dsh().query(
"""select routine_type,
routine_name
from information_schema.routines
where routine_name like '"""
+ module.get_prefix()
+ "\_%%'")
for routine in routines:
self.__notice('(+) '
+ routine['routine_type'].lower()
+ ' '
+ routine['routine_name']
+ '()',
2)
self.__num_rdbms_routines += 1
def __build_sql(self, module):
statements = module.get_definitions()
if len(statements) > 0:
for statement in statements:
matches = re.search('^create table (.*?)$',
statement[1],
re.M | re.S | re.I)
if matches is None:
matches = re.search('^create view (.*?)$',
statement[1],
re.M | re.S | re.I)
if matches is not None:
self.__notice('(+) '
+ statement[0]
+ '.'
+ matches.group(1), 2)
self.__num_rdbms_tables += 1
self.__num_rdbms_objects += 1
self.__execute_statement(statement[1], statement[0])
statements = module.get_indexes()
if len(statements) > 0:
for statement in statements:
matches = re.match('create index (.*?)$',
statement[1],
re.M | re.S | re.I)
if matches is not None:
self.__notice('(+) '
+ statement[0]
+ '.'
+ matches.group(1), 2)
self.__num_rdbms_indexes += 1
self.__num_rdbms_objects += 1
self.__execute_statement(statement[1], statement[0])
self.__display_insert_progress(module.get_inserts())
self.__track_module_info(module, module.get_build_file())
def __clean_up_rdbms_builder_files(self):
self.__notice('Cleaning up RDBMS Builder files...');
for file in os.listdir('/tmp'):
if re.match('tinyAPI_rdbms_builder_', file):
os.remove('/tmp/' + file)
self.__notice('(-) /tmp/' + file, 1)
def __compile_build_list_by_changes(self):
'''Mark for build modules that contain modified build or DML files.'''
for module in list(self.__modules.values()):
requires_build = False
if self.__file_has_been_modified(module.get_build_file()):
requires_build = True
else:
for file in module.get_dml_files():
if self.__file_has_been_modified(file):
requires_build = True
break
if requires_build and \
module.get_name() not in self.__modules_to_build:
self.__notice('(+) ' + module.get_name(), 1)
self.__compile_build_list_for_module(module.get_name())
def __compile_build_list_for_all_modules(self):
'''Force a build for all modules.'''
for module_name in list(self.__modules.keys()):
self.__compile_build_list_for_module(module_name)
def __compile_build_list_for_module(self, module_name):
'''Mark a module for building and determine which of its dependents
also needs to be build.'''
self.__modules_to_build[
module_name] = True;
self.__modules_to_build_prefix[
self.__modules[module_name].get_prefix()] = True
if module_name in self.__dependents_map:
for dependent in self.__dependents_map[module_name]:
if dependent not in self.__modules_to_build:
self.__compile_build_list_for_module(dependent)
def __compile_dirty_module_list(self):
self.__notice('Determining if there are dirty modules...')
records = tinyAPI.dsh().query(
'''select name
from rdbms_builder.dirty_module''')
for record in records:
if record['name'] not in self.__modules:
# The dirty module is no longer part of the application, so
# we should stop tracking it here.
self.__notice('(-) ' + record['name'], 1)
tinyAPI.dsh().query(
'''delete from rdbms_builder.dirty_module
where name = %s''',
[record['name']])
tinyAPI.dsh().commit()
else:
self.__notice('(+) ' + record['name'], 1)
self.__compile_build_list_for_module(record['name'])
def __compile_reference_definitions(self):
'''Compile the reference tables created with RefTable() into variables
so that no database interactions are required to interact with
them.'''
if ConfigManager.value('reference definition file') is None:
return
self.__notice('Compiling reference definitions...')
if ConfigManager.value('data store') != 'mysql':
self.__data_store_not_supported()
reference_tables = tinyAPI.dsh().query(
"""select table_schema,
table_name
from tables
where table_schema in (""" + self.__managed_schemas + """)
and table_name like '%%\_ref\_%%'
order by table_name asc""")
content = """
# +---------------------------------------------------------------------------+
# | WARNING - MACHINE GENERATED FILE |
# +---------------------------------------------------------------------------+
##
# Any changes that you make directly to this file WILL BE LOST! It was auto-
# generated by the RDBMS Builder.
##
# ----- Imports ---------------------------------------------------------------
import builtins
import gettext
# ----- Instructions ----------------------------------------------------------
# Language translation support
builtins._ = gettext.gettext
# TABLE tinyAPI_ref_unit_test
builtins.TINYAPI_UNIT_TEST_ONE = 1
builtins.TINYAPI_UNIT_TEST_TWO = 2
builtins.TINYAPI_UNIT_TEST_THREE = 3
def _tinyapi_ref_unit_test():
return {
1: "one",
2: "two",
3: "three"
}
builtins._tinyapi_ref_unit_test = _tinyapi_ref_unit_test
"""
index = 0
for reference_table in reference_tables:
data = tinyAPI.dsh().query(
"""select value,
id
from """
+ reference_table['table_schema']
+ '.'
+ reference_table['table_name'] + """
order by id asc""")
if index > 0:
content += "\n"
index += 1
content += "# TABLE " + reference_table['table_name'] + "\n"
values = []
for item in data:
value = re.sub('[^A-Za-z0-9_]',
'_',
reference_table['table_name']
+ '_'
+ item['value'])
value = re.sub('_ref_', '_', value)
value = re.sub('[_]+', '_', value)
value = re.sub('[_]+$', '', value)
content += ('builtins.'
+ value.upper()
+ ' = '
+ str(item['id']) + "\n")
values.append(' '
+ str(item['id'])
+ ': "'
+ item['value']
+ '"')
content += ("def _"
+ reference_table['table_name']
+ '():\n'
+ ' return {\n'
+ ",\n".join(values)
+ '\n }\n'
+ 'builtins._'
+ reference_table['table_name' ]
+ ' = _'
+ reference_table[ 'table_name' ]
+ "\n")
f = open(ConfigManager.value('reference definition file'), 'w')
f.write(content.lstrip())
f.close()
def __data_store_not_supported(self):
raise RDBMSBuilderException(
'the RDBMS Builder does not currently support "'
+ ConfigManager.value('data store')
+ '"')
def __determine_managed_schemas(self):
'''The database may contain many schemas, some of which should be
ignored by tinyAPI. A list of which schemas to manage is created
here.'''
self.__notice('Determining managed schemas...')
schemas = []
for schema in ConfigManager.value('rdbms builder schemas'):
schemas.append("'" + schema + "'")
self.__managed_schemas = ', '.join(schemas)
self.__notice(self.__managed_schemas, 1)
def __display_insert_progress(self, inserts=tuple()):
if len(inserts) == 0:
return
print(' (+) adding table data ', end = '')
sys.stdout.flush()
chars = ['-', '-', '\\', '\\', '|', '|', '/', '/']
index = 0
for insert in inserts:
print(chars[index], end = '')
sys.stdout.flush()
self.__execute_statement(insert[1], insert[0])
print("\b", end = '')
sys.stdout.flush()
index += 1
if index >= len(chars):
index = 0
print(" ")
def __drop_foreign_key_constraints(self):
self.__notice('Dropping relevant foreign key constraints...')
if ConfigManager.value('data store') != 'mysql':
self.__data_store_not_supported()
constraints = tinyAPI.dsh().query(
'''select constraint_schema,
table_name,
constraint_name
from referential_constraints
where constraint_schema in ('''
+ self.__managed_schemas
+ ')')
for constraint in constraints:
parts = constraint['constraint_name'].split('_')
if parts[0] in self.__modules_to_build_prefix:
self.__notice('(-) ' + constraint['constraint_name'], 1)
tinyAPI.dsh().query(
'alter table '
+ constraint['constraint_schema']
+ '.'
+ constraint['table_name']
+ ' drop foreign key '
+ constraint['constraint_name'])
def __drop_objects(self):
self.__notice('Dropping objects that will be rebuilt...')
if ConfigManager.value('data store') != 'mysql':
self.__data_store_not_supported()
tables = tinyAPI.dsh().query(
'''select table_schema,
table_name
from tables
where table_schema in ('''
+ self.__managed_schemas
+ ')')
for table in tables:
parts = table['table_name'].split('_')
if parts[0] in self.__modules_to_build_prefix:
self.__notice('(-) table ' + table['table_name'], 1)
tinyAPI.dsh().query(
'drop table '
+ table['table_schema' ]
+ '.'
+ table['table_name'])
routines = tinyAPI.dsh().query(
'''select routine_schema,
routine_type,
routine_name
from routines
where routine_schema in ('''
+ self.__managed_schemas
+ ')')
for routine in routines:
parts = routine['routine_name'].split('_')
if parts[0] in self.__modules_to_build_prefix:
self.__notice('(-) '
+ routine['routine_type'].lower()
+ ' '
+ routine['routine_name'])
tinyAPI.dsh().query(
'drop type '
+ routine['routine_schema']
+ '.'
+ routine['routine_name'])
def __enhance_build_error(self, message):
if ConfigManager.value('data store') != 'mysql':
return ''
if re.match('ERROR (1005|1215)', message) or \
re.search('errno: 150', message):
return ('\n\npossible causes:\n\n'
+ 'o A column that has a foreign key is not the exact '
+ 'same type as the column it is\n referencing.\n\n'
+ 'o The column you are trying to reference does not have '
+ 'an index on it.\n\n'
+ 'o The table name provided for the parent table does not '
+ 'exist.\n')
else:
return ''
def __error(self, message, indent=None):
if self.__cli is None:
return None
self.__cli.error(message, indent)
def execute(self):
'''Causes the RDBMS Builder to perform all necessary tasks.'''
if self.__connection_name is None:
raise RDBMSBuilderException('connection name has not been set')
# +------------------------------------------------------------------+
# | Step 1 |
# | |
# | Clean up unused RDBMS Builder files. |
# +------------------------------------------------------------------+
self.__clean_up_rdbms_builder_files()
# +------------------------------------------------------------------+
# | Step 2 |
# | |
# | Verify that the RDBMS Builder database objects exist and if they |
# | do not, alert with instructions on how to make them. |
# +------------------------------------------------------------------+
self.__verify_rdbms_builder_objects()
# +------------------------------------------------------------------+
# | Step 3 |
# | |
# | Determine which schemas should be managed by the RDBMS Builder. |
# +------------------------------------------------------------------+
self.__determine_managed_schemas()
# +------------------------------------------------------------------+
# | Step 4 |
# | |
# | Execute any SQL files that are intended to be loaded before the |
# | build. |
# +------------------------------------------------------------------+
if self.__cli is not None and self.__cli.args.all is True:
self.__execute_prebuild_scripts()
# +------------------------------------------------------------------+
# | Step 5 |
# | |
# | Create an array containing data about all modules that exist in |
# | this API. |
# +------------------------------------------------------------------+
self.__assemble_all_modules()
# +------------------------------------------------------------------+
# | Step 6 |
# | |
# | If there are modules that have been marked dirty, add them to |
# | the build. |
# +------------------------------------------------------------------+
self.__compile_dirty_module_list()
# +------------------------------------------------------------------+
# | Step 7 |
# | |
# | Compile the list of modules that need to be build. |
# +------------------------------------------------------------------+
if self.__cli is not None and self.__cli.args.all is True:
self.__notice('Compiling build list of all modules...')
self.__compile_build_list_for_all_modules()
else:
if self.__cli.args.module_name is not None:
self.__notice('Compiling build list for specific module...')
self.__notice('(+) ' + self.__cli.args.module_name, 1)
self.__compile_build_list_for_module(
self.__cli.args.module_name)
else:
self.__notice('Comiling build list based on changes...')
self.__compile_build_list_by_changes()
# +------------------------------------------------------------------+
# | Step 8 |
# | |
# | Determine if the build should continue. |
# +------------------------------------------------------------------+
if len(self.__modules_to_build.keys()) == 0:
self.__notice('RDBMS is up to date!')
if self.__cli is not None:
self.__cli.exit()
else:
sys.exit(0)
# +------------------------------------------------------------------+
# | Step 9 |
# | |
# | Drop all foreign key constraints for the tables that need to be |
# | built so we can tear down objects without errors. |
# +------------------------------------------------------------------+
self.__drop_foreign_key_constraints()
# +------------------------------------------------------------------+
# | Step 10 |
# | |
# | Drop objects for modules marked for rebuild. |
# +------------------------------------------------------------------+
self.__drop_objects()
# +------------------------------------------------------------------+
# | Step 11 |
# | |
# | Rebuild modules. |
# +------------------------------------------------------------------+
self.__rebuild_modules()
# +------------------------------------------------------------------+
# | Step 12 |
# | |
# | Recompile all DML. |
# +------------------------------------------------------------------+
self.__recompile_dml()
# +------------------------------------------------------------------+
# | Step 13 |
# | |
# | Add all foreign key constraints. |
# +------------------------------------------------------------------+
self.__add_foreign_key_constraints()
# +------------------------------------------------------------------+
# | Step 14 |
# | |
# | Verify foreign key indexes. |
# +------------------------------------------------------------------+
self.__verify_foreign_key_indexes()
# +------------------------------------------------------------------+
# | Step 15 |
# | |
# | Find potentially incorrect default values. |
# +------------------------------------------------------------------+
self.__find_potentially_incorrect_default_values()
# +------------------------------------------------------------------+
# | Step 16 |
# | |
# | Compile reference table data into variables. |
# +------------------------------------------------------------------+
self.__compile_reference_definitions()
# +------------------------------------------------------------------+
# | Step 17 |
# | |
# | Execute any SQL files that are intended to be loaded after the |
# | build. |
# +------------------------------------------------------------------+
if self.__cli is not None and self.__cli.args.all is True:
self.__execute_postbuild_scripts()
# +------------------------------------------------------------------+
# | Step 18 |
# | |
# | Report interesting stats about the build. |
# +------------------------------------------------------------------+
self.__notice('RDBMS Builder stats:')
self.__notice(' # tables: '
+ '{:,}'.format(self.__num_rdbms_tables),
1)
self.__notice(' # indexes: '
+ '{:,}'.format(self.__num_rdbms_indexes),
1)
self.__notice(' # routines: '
+ '{:,}'.format(self.__num_rdbms_routines),
1)
self.__notice('--------------------', 1)
self.__notice('total # objects: '
+ '{:,}'.format(self.__num_rdbms_objects),
1)
def __execute_postbuild_scripts(self):
self.__notice('Finding and executing post-build files...')
for path in ConfigManager.value('application dirs'):
dirs = find_dirs(path + '/*', 'rdbms_postbuild')
for dir in dirs:
self.__notice(dir, 1)
files = os.listdir(dir)
files.sort()
for file in files:
if os.access(dir + '/' + file, os.X_OK):
self.__notice(file, 2)
try:
output = \
subprocess.check_output(
dir + '/' + file,
stderr=subprocess.STDOUT,
shell=True
)
if self.__cli.args.verbose is True:
output = output.decode()
if output != '':
self.__notice(output, 3)
except subprocess.CalledProcessError as e:
self.__error(
'execution failed with the following output:',
3
)
self.__error(output, 4)
raise RDBMSBuilderException(
e.output.rstrip().decode()
)
def __execute_prebuild_scripts(self):
self.__notice('Finding and executing pre-build files...')
for path in ConfigManager.value('application dirs'):
dirs = find_dirs(path + '/*', 'rdbms_prebuild')
for dir in dirs:
self.__notice(dir, 1)
files = os.listdir(dir)
files.sort()
for file in files:
if re.search('\.sql$', file):
self.__notice(file, 2)
try:
subprocess.check_output(
self.__get_exec_sql_command()
+ ' < ' + dir + '/' + file,
stderr=subprocess.STDOUT,
shell=True)
except subprocess.CalledProcessError as e:
raise RDBMSBuilderException(
e.output.rstrip().decode())
def __execute_statement(self, statement, db_name=None):
file = tempfile.NamedTemporaryFile(dir='/tmp',
prefix='tinyAPI_rdbms_builder_',
delete=False)
file.write(statement.encode())
file.close()
try:
subprocess.check_output(
self.__get_exec_sql_command()
+ ('' if db_name is None else ' --database=' + db_name)
+ ' < ' + file.name,
stderr=subprocess.STDOUT,
shell=True)
except subprocess.CalledProcessError as e:
message = e.output.rstrip().decode()
if len(statement) <= 2048:
raise RDBMSBuilderException(
'execution of this:\n\n'
+ statement
+ "\n\nproduced this error:\n\n"
+ message
+ self.__enhance_build_error(message))
else:
raise RDBMSBuilderException(
'execution of this file:\n\n'
+ file.name
+ "\n\nproduced this error:\n\n"
+ message
+ self.__enhance_build_error(message))
os.remove(file.name)
def __file_has_been_modified(self, file):
if ConfigManager.value('data store') != 'mysql':
self.__data_store_not_supported()
with open(file, 'rb') as f:
sha1 = hashlib.sha1(f.read()).hexdigest();
records = tinyAPI.dsh().query(
'''select sha1
from rdbms_builder.module_info
where file = %s''',
[file])
return len(records) == 0 or records[0]['sha1'] != sha1
def __find_potentially_incorrect_default_values(self):
if ConfigManager.value('data store') != 'mysql':
self.__data_store_not_supported()
self.__notice('Finding potentially incorrect default values...')
records = tinyAPI.dsh().query(
"""select table_name,
column_name
from information_schema.columns
where table_schema in (""" + self.__managed_schemas + """)
and column_default = %s""",
['0000-00-00 00:00:00']
)
for record in records:
self.__notice(
'(!) {}.{}'
.format(record['table_name'], record['column_name']),
1
)
self.__notice('has default of "0000-00-00 00:00:00"', 3)
def __get_exec_sql_command(self):
if self.__exec_sql_command is not None:
return self.__exec_sql_command
if ConfigManager.value('data store') == 'mysql':
if self.__connection_name is None:
raise RDBMSBuilderException(
'cannot execute SQL because connection name has not been '
+ 'set')
connection_data = ConfigManager.value('mysql connection data')
if self.__connection_name not in connection_data:
raise RDBMSBuilderException(
'no connection data has been configured for "'
+ self.__connection_name
+ '"')
host = connection_data[self.__connection_name][0]
user = connection_data[self.__connection_name][1]
password = connection_data[self.__connection_name][2]
command = ['/usr/bin/mysql']
if host != '':
command.append('--host=' + host)
if user != '':
command.append('--user=' + user)
if password != '':
command.append("--password='" + password + "'")
if user == '' and password == '':
command.append('--user root')
self.__exec_sql_command = ' '.join(command)
return self.__exec_sql_command
else:
self.__data_store_not_supported()
def __handle_module_dml(self, module):
files = \
find_files(
'/'.join(module.get_build_file().split('/')[0:-2]),
"*.sql")
for file in files:
if re.search('/rdbms_prebuild/', file) is None:
module.add_dml_file(file)
def __notice(self, message, indent=None):
if self.__cli is None:
return None
self.__cli.notice(message, indent)
def __rebuild_modules(self):
self.__notice('Rebuilding all DDL...')
for module_name in self.__modules_to_build.keys():
self.__notice('building module ' + module_name, 1)
self.__build_sql(self.__modules[module_name])
def __recompile_dml(self):
self.__notice('Recompiling all DML...')
for module_name in self.__modules_to_build.keys():
dml_files = self.__modules[module_name].get_dml_files()
if len(dml_files) > 0:
self.__notice('compiling for ' + module_name, 1);
self.__build_dml(self.__modules[module_name])
def set_connection_name(self, connection_name):
'''Tell the RDBMS Builder which connection (configured in
tinyAPI_config.py) to use for finding and building data
structures.'''
self.__connection_name = connection_name
return self
def __track_module_info(self, module, file):
if ConfigManager.value('data store') != 'mysql':
self.__data_store_not_supported()
with open(file, 'rb') as f:
sha1 = hashlib.sha1(f.read()).hexdigest();
tinyAPI.dsh().query(
'''insert into rdbms_builder.module_info
(
file,
sha1
)
values
(
%s,
%s
)
on duplicate key
update sha1 = %s''',
[file, sha1, sha1])
tinyAPI.dsh().query(
'''delete from rdbms_builder.dirty_module
where name = %s''',
[module.get_name()])
tinyAPI.dsh().commit()
def __verify_foreign_key_indexes(self):
self.__notice('Verifying foreign key indexes...')
for data in self.__unindexed_foreign_keys:
table_name = data[0]
parent_table_name = data[1]
cols = data[2]
parent_cols = data[3]
parts = table_name.split('_')
try:
tinyAPI.dsh().create(
'rdbms_builder.dirty_module',
{'name': parts[0]})
tinyAPI.dsh().commit()
except DataStoreDuplicateKeyException:
pass
self.__notice('(!) unindexed foreign key', 1)
self.__notice('table: '
+ table_name
+ ' -> parent: '
+ parent_table_name,
2)
self.__notice(repr(cols) + ' -> ' + repr(parent_cols), 2)
self.__notice('--------------------------------------------------'
+ '------------', 2)
if len(self.__unindexed_foreign_keys) > 0:
raise RDBMSBuilderException('unindexed foreign keys (see above)')
def __verify_rdbms_builder_objects(self):
if ConfigManager.value('data store') != 'mysql':
self.__data_store_not_supported()
tinyAPI.dsh.select_db(self.__cli.args.connection_name,
'information_schema')
databases = tinyAPI.dsh().query('show databases')
for database in databases:
if database['Database'] == 'rdbms_builder':
return
build_instructions = '''
create database rdbms_builder;
create table rdbms_builder.module_info
(
file varchar(100) not null primary key,
sha1 char(40) not null
);
create table rdbms_builder.dirty_module
(
name varchar(100) not null primary key
);
grant all privileges
on rdbms_builder.*
to ''@'localhost'
identified by '';
flush privileges;'''
raise RDBMSBuilderException(
'RDBMS Builder database and objects do not exist; create them as '
+ 'root using:\n'
+ build_instructions)
def __warn(self, message, indent=None):
if self.__cli is None:
return None
self.__cli.warn(message, indent)
```
#### File: services/schema_differ/mysql.py
```python
__author__ = '<NAME> <<EMAIL>>'
# ----- Imports ---------------------------------------------------------------
from tinyAPI.base.config import ConfigManager
from tinyAPI.base.data_store.provider import DataStoreMySQL
from tinyAPI.base.services.mysql.index_check import MySQLIndexUsageParser
import os
import re
import subprocess
__all__ = [
'SchemaDiffer'
]
# ----- Public Classes --------------------------------------------------------
class SchemaDiffer(object):
'''Finds all of the schema differences between two MySQL databases.'''
def __init__(self,
source_connection_name,
source_db_name,
target_connection_name,
target_db_name):
self.__cli = None
self.__source = None
self.__target = None
self.__source_db_name = None
self.__target_db_name = None
self.__ref_tables_to_create = None
self.__ref_tables_to_drop = None
self.__tables_to_create = None
self.__tables_to_drop = None
self.__table_create_drop_list = None
self.__ref_table_drop_list = None
self.__columns_to_create = None
self.__columns_to_drop = None
self.__columns_to_modify = None
self.__column_uniqueness_to_drop = None
self.__foreign_keys_to_create = None
self.__foreign_keys_to_drop = None
self.__ref_data_to_add = None
self.__ref_data_to_remove = None
self.__ref_data_to_modify = None
self.__indexes_to_create = None
self.__indexes_to_drop = None
self.__unique_keys_to_create = None
self.__unique_keys_to_drop = None
self.__index_usage_parser = None
self.__source = DataStoreMySQL()
self.__source.select_db(source_connection_name, 'information_schema')
self.__source_db_name = source_db_name
self.__target = DataStoreMySQL()
self.__target.select_db(target_connection_name, 'information_schema')
self.__target_db_name = target_db_name
self.__enable_write_upgrade_scripts = True
def __compute_column_differences(self):
self.__notice('Computing column differences...')
query = \
"""select table_name,
column_name,
column_default,
is_nullable,
character_set_name,
collation_name,
column_type,
column_key,
extra
from columns
where table_schema = %s
and table_name not like '%%\_ref\_%%'"""
if self.__table_create_drop_list:
query += ' and table_name not in (' \
+ self.__table_create_drop_list \
+ ')'
source_columns = \
self.__query_source(
query,
[self.__source_db_name])
target_columns = \
self.__query_target(
query,
[self.__target_db_name])
source_names = []
source = {}
if source_columns:
for source_column in source_columns:
name = source_column['table_name'] \
+ '.' \
+ source_column['column_name']
source_names.append(name)
source[name] = source_column
target_names = []
target = {}
if target_columns:
for target_column in target_columns:
name = target_column['table_name'] \
+ '.' \
+ target_column['column_name']
target_names.append(name)
target[name] = target_column
self.__columns_to_create = \
list(set(source_names).difference(target_names))
for column in self.__columns_to_create:
self.__notice('(+) ' + column, 1)
self.__columns_to_drop = \
list(set(target_names).difference(source_names))
for column in self.__columns_to_drop:
self.__notice('(-) ' + column, 1)
self.__columns_to_modify = {}
self.__column_uniqueness_to_drop = []
for name, data in source.items():
if name in self.__columns_to_create or \
name in self.__columns_to_drop:
continue;
if name not in target.keys():
raise SchemaDifferException(
'could not find column "' + name + '" in the list of '
+ 'target columns')
if data['column_key'] != 'UNI' and \
target[name]['column_key'] == 'UNI':
self.__notice('(-) ' + name + ' (uniqueness)', 1)
self.__column_uniqueness_to_drop.append(name)
for key, value in data.items():
if target[name][key] != value and key != 'column_key':
self.__notice('(=) ' + name + ' (' + key + ')', 1)
self.__columns_to_modify[name] = data
break
def __compute_foreign_key_differences(self):
self.__notice('Computing foreign key differences...')
query = \
"""select k.table_name,
k.column_name,
k.constraint_name,
k.ordinal_position,
k.referenced_table_name,
k.referenced_column_name,
c.delete_rule
from key_column_usage k
left outer join referential_constraints c
on c.constraint_schema = k.constraint_schema
and c.constraint_name = k.constraint_name
and k.constraint_name = k.constraint_name
where k.constraint_schema = %s
and k.constraint_name like '%%\_fk'"""
if self.__table_create_drop_list:
query += ' and k.table_name not in (' \
+ self.__table_create_drop_list \
+ ')' \
source_fks = \
self.__process_fks(
self.__query_source(
query,
[self.__source_db_name]))
target_fks = \
self.__process_fks(
self.__query_target(
query,
[self.__target_db_name]))
source_fk_names = source_fks.keys()
target_fk_names = target_fks.keys()
foreign_keys_to_create = \
list(set(source_fk_names).difference(target_fk_names))
foreign_keys_to_drop = \
list(set(target_fk_names).difference(source_fk_names))
self.__foreign_keys_to_create = []
for name in foreign_keys_to_create:
self.__notice('(+) ' + name, 1)
self.__foreign_keys_to_create.append(source_fks[name])
self.__foreign_keys_to_drop = []
for name in foreign_keys_to_drop:
self.__notice('(-) ' + name, 1)
self.__foreign_keys_to_drop.append(target_fks[name])
for name, fk in source_fks.items():
if name in target_fks.keys() and \
name not in self.__foreign_keys_to_create and \
name not in self.__foreign_keys_to_drop:
if source_fks[name]['table_name'] != \
target_fks[name]['table_name'] or \
source_fks[name]['ref_table_name'] != \
target_fks[name]['ref_table_name'] or \
source_fks[name]['delete_rule'] != \
target_fks[name]['delete_rule'] or \
','.join(list(source_fks[name]['cols'].values())) != \
','.join(list(target_fks[name]['cols'].values())) or \
','.join(list(source_fks[name]['ref_cols'].values())) != \
','.join(list(target_fks[name]['ref_cols'].values())):
self.__notice('(=) ' + name, 1)
self.__foreign_keys_to_drop.append(source_fks[name])
self.__foreign_keys_to_create.append(source_fks[name])
def __compute_index_differences(self):
self.__notice('Computing index differences...')
query = \
"""select table_name,
index_name,
seq_in_index,
column_name
from statistics
where index_schema = %s
and index_name like '%%\_idx'"""
if self.__table_create_drop_list:
query += ' and table_name not in (' \
+ self.__table_create_drop_list \
+ ')'
source_indexes = \
self.__query_source(
query,
[self.__source_db_name])
target_indexes = \
self.__query_target(
query,
[self.__target_db_name])
source_names = []
source = {}
for index in source_indexes:
source_names.append(index['index_name'])
if index['index_name'] not in source:
source[index['index_name']] = {
'table_name': index['table_name'],
'cols': []
}
source[index['index_name']]['cols'] \
.insert(index['seq_in_index'], index['column_name'])
target_names = []
target = {}
for index in target_indexes:
target_names.append(index['index_name'])
if index['index_name'] not in target:
target[index['index_name']] = {
'table_name': index['table_name'],
'cols': []
}
target[index['index_name']]['cols'] \
.insert(index['seq_in_index'], index['column_name'])
indexes_to_create = \
list(set(source_names).difference(target_names))
indexes_to_drop = \
list(set(target_names).difference(source_names))
indexes_to_modify = \
[]
for name, data in source.items():
if name in target.keys() and \
','.join(data['cols']) != ','.join(target[name]['cols']):
indexes_to_modify.append(name)
self.__indexes_to_create = []
for name in indexes_to_create:
self.__notice('(+) ' + name, 1)
self.__indexes_to_create.append({
'table_name': source[name]['table_name'],
'index_name': name,
'cols': source[name]['cols']
})
self.__indexes_to_drop = []
for name in indexes_to_drop:
self.__notice('(-) ' + name, 1)
self.__indexes_to_drop.append({
'table_name': target[name]['table_name'],
'index_name': name,
'cols': target[name]['cols']
})
for name in indexes_to_modify:
self.__notice('(=) ' + name, 1)
self.__indexes_to_create.append({
'table_name': source[name]['table_name'],
'index_name': name,
'cols': source[name]['cols']
})
self.__indexes_to_drop.append({
'table_name': target[name]['table_name'],
'index_name': name,
'cols': target[name]['cols']
})
def __compute_ref_table_data_differences(self):
self.__notice('Computing reference table data differences...')
query = \
"""select table_name
from tables
where table_schema = %s
and table_name like '%%\_ref\_%%'"""
if self.__ref_table_drop_list:
query += ' and table_name not in (' \
+ self.__ref_table_drop_list \
+ ')'
source_tables = \
self.__flatten_tables(
self.__query_source(
query,
[self.__source_db_name]))
target_tables = \
self.__flatten_tables(
self.__query_target(
query,
[self.__target_db_name]))
source_data = {}
for table in source_tables:
source_data[table] = {}
records = self.__query_source(
'''select id,
value,
display_order
from ''' + self.__source_db_name + '.' + table + '''
order by id asc''')
for record in records:
source_data[table][record['id']] = [
str(record['value']),
str(record['display_order'])
]
target_data = {}
for table in target_tables:
target_data[table] = {}
records = self.__query_target(
'''select id,
value,
display_order
from ''' + self.__target_db_name + '.' + table + '''
order by id asc''')
for record in records:
target_data[table][record['id']] = [
str(record['value']),
str(record['display_order'])
]
self.__ref_data_to_add = []
self.__ref_data_to_modify = []
for table, data in source_data.items():
for id, values in data.items():
if table not in target_data or \
id not in target_data[table]:
self.__notice('(+) ' + table + ' #' + str(id), 1)
self.__ref_data_to_add.append([
table,
id,
values[0],
values[1]
])
else:
if ','.join(values) != ','.join(target_data[table][id]):
self.__notice('(=) ' + table + ' #' + str(id), 1)
self.__ref_data_to_modify.append([
table,
id,
values[0],
values[1]
])
self.__ref_data_to_remove = []
for table, data in target_data.items():
for id, values in data.items():
if table not in source_data or \
id not in source_data[table]:
self.__notice('(-) ' + table + '#' + str(id), 1)
self.__ref_data_to_remove.append([
table,
id,
values[0],
values[1]
])
def __compute_ref_table_differences(self):
self.__notice('Computing reference table differences...')
query = \
"""select table_name
from tables
where table_schema = %s
and table_name like '%%\_ref\_%%'"""
source_tables = \
self.__flatten_tables(
self.__query_source(
query,
[self.__source_db_name]))
target_tables = \
self.__flatten_tables(
self.__query_target(
query,
[self.__target_db_name]))
self.__ref_tables_to_create = \
list(set(source_tables).difference(target_tables))
for table in self.__ref_tables_to_create:
self.__notice('(+) ' + table, 1)
drop_list = []
self.__ref_tables_to_drop = \
list(set(target_tables).difference(source_tables))
for table in self.__ref_tables_to_drop:
self.__notice('(-) ' + table, 1)
drop_list.append("'" + table + "'")
self.__ref_table_drop_list = ','.join(drop_list)
def __compute_table_differences(self):
self.__notice('Computing table differences...')
create_drop_list = []
query = \
"""select table_name
from tables
where table_schema = %s
and table_name not like '%%\_ref\_%%'"""
source_tables = \
self.__flatten_tables(
self.__query_source(
query,
[self.__source_db_name]))
target_tables = \
self.__flatten_tables(
self.__query_target(
query,
[self.__target_db_name]))
self.__tables_to_create = \
list(set(source_tables).difference(target_tables))
for table in self.__tables_to_create:
self.__notice('(+) ' + table, 1)
create_drop_list.append("'" + table + "'")
self.__tables_to_drop = \
list(set(target_tables).difference(source_tables))
for table in self.__tables_to_drop:
self.__notice('(-) ' + table, 1)
create_drop_list.append("'" + table + "'")
self.__table_create_drop_list = ','.join(create_drop_list)
def __compute_unique_key_differences(self):
self.__notice('Computing unique key differences...')
query = \
"""select table_name,
constraint_name,
column_name,
ordinal_position
from key_column_usage
where table_schema = %s
and constraint_name like '%%\_uk'"""
if self.__table_create_drop_list:
query += ' and table_name not in (' \
+ self.__table_create_drop_list \
+ ')'
source_uks = \
self.__process_uks(
self.__query_source(
query,
[self.__source_db_name]))
target_uks = \
self.__process_uks(
self.__query_target(
query,
[self.__target_db_name]))
source_uk_names = source_uks.keys()
target_uk_names = target_uks.keys()
unique_keys_to_create = \
list(set(source_uk_names).difference(target_uk_names))
unique_keys_to_drop = \
list(set(target_uk_names).difference(source_uk_names))
self.__unique_keys_to_create = []
for name in unique_keys_to_create:
self.__notice('(+) ' + name, 1)
self.__unique_keys_to_create.append(source_uks[name])
self.__unique_keys_to_drop = []
for name in unique_keys_to_drop:
self.__notice('(-) ' + name, 1)
self.__unique_keys_to_drop.append(target_uks[name])
for name, uk in source_uks.items():
if name in target_uks.keys() and \
name not in unique_keys_to_create and \
name not in unique_keys_to_drop:
if source_uks[name]['table_name'] != \
target_uks[name]['table_name'] or \
','.join(source_uks[name]['cols'].values()) != \
','.join(target_uks[name]['cols'].values()):
self.__notice('(=) ' + name, 1)
self.__unique_keys_to_drop.append(source_uks[name])
self.__unique_keys_to_create.append(source_uks[name])
def dont_write_upgrade_scripts(self):
self.__enable_write_upgrade_scripts = False
return self
def __error(self, message, indent=None):
if not self.__cli:
return
self.__cli.error(message, indent)
def execute(self):
self.__verify_schemas()
self.__compute_ref_table_differences()
self.__compute_table_differences()
self.__compute_column_differences()
self.__compute_foreign_key_differences()
self.__compute_ref_table_data_differences()
self.__compute_index_differences()
self.__compute_unique_key_differences()
self.__perform_index_check()
if not self.there_are_differences():
self.__notice('Both schemas are the same!')
exit(0)
self.__write_upgrade_scripts()
self.__target.close()
self.__source.close()
return self
def __flatten_tables(self, tables=tuple()):
if not tables:
return []
results = []
for table in tables:
results.append(table['table_name'])
return results
def __get_column_terms(self, column_data):
terms = []
if column_data['extra'] is not None and \
len(column_data['extra']) > 0:
terms.append(column_data['extra'])
if column_data['character_set_name']:
terms.append('character set ' + column_data['character_set_name'])
if column_data['collation_name']:
terms.append('collate ' + column_data['collation_name'])
if column_data['column_key'] == 'UNI':
terms.append('unique')
if column_data['column_default']:
terms.append('default '
+ ('current_timestamp'
if column_data['column_default'] ==
'current_timestamp'
else "'" + column_data['column_default'] + "'"))
if column_data['is_nullable'] == 'NO':
terms.append('not null')
return terms
def get_column_uniqueness_to_drop(self):
return self.__column_uniqueness_to_drop
def get_columns_to_create(self):
return self.__columns_to_create
def get_columns_to_drop(self):
return self.__columns_to_drop
def get_columns_to_modify(self):
return self.__columns_to_modify
def get_foreign_keys_to_create(self):
return self.__foreign_keys_to_create
def get_foreign_keys_to_drop(self):
return self.__foreign_keys_to_drop
def get_indexes_to_create(self):
return self.__indexes_to_create
def get_indexes_to_drop(self):
return self.__indexes_to_drop
def get_ref_data_to_add(self):
return self.__ref_data_to_add
def get_ref_data_to_modify(self):
return self.__ref_data_to_modify
def get_ref_data_to_remove(self):
return self.__ref_data_to_remove
def get_ref_tables_to_create(self):
return self.__ref_tables_to_create
def get_ref_tables_to_drop(self):
return self.__ref_tables_to_drop
def get_tables_to_create(self):
return self.__tables_to_create
def get_tables_to_drop(self):
return self.__tables_to_drop
def get_unique_keys_to_create(self):
return self.__unique_keys_to_create
def get_unique_keys_to_drop(self):
return self.__unique_keys_to_drop
def __ksort(self, data):
results = {}
for key in sorted(data.keys()):
results[key] = data[key]
return results
def __notice(self, message, indent=None):
if not self.__cli:
return
self.__cli.notice(message, indent)
def __perform_index_check(self):
self.__notice('Performing index check...')
try:
index_check = ConfigManager.value('index check')
except:
self.__notice('not enabled; skipping', 1)
return False
if not os.path.isfile(index_check['path']):
raise RuntimeError(
'could not find script at "{}"'
.format(index_check['path'])
)
output = \
subprocess.check_output(
[index_check['path'],
'--server={}'.format(index_check['server']),
index_check['database']]
)
self.__index_usage_parser = \
MySQLIndexUsageParser() \
.execute(output)
if len(self.__index_usage_parser.clustered_indexes) > 0:
self.__notice('clustered indexes', 1)
for entry in self.__index_usage_parser.clustered_indexes:
self.__notice(
'(~) {}'
.format(
entry[0][:63] + '..'
if len(entry[0]) >= 66 else
entry[0]
),
2
)
if len(self.__index_usage_parser.redundant_indexes) > 0:
self.__notice('redundant indexes', 1)
for entry in self.__index_usage_parser.redundant_indexes:
self.__notice(
'(!) {}'
.format(
entry[0][:63] + '..'
if len(entry[0]) >= 66 else
entry[0]
),
2
)
def __process_fks(self, data=tuple()):
if not data:
return {}
fks = {}
for fk in data:
if fk['constraint_name'] not in fks.keys():
fks[fk['constraint_name']] = {
'name': fk['constraint_name'],
'table_name': fk['table_name'],
'ref_table_name': fk['referenced_table_name'],
'cols': {},
'ref_cols': {},
'delete_rule': fk['delete_rule']
}
fks[fk['constraint_name']] \
['cols'] \
[int(fk['ordinal_position'])] = \
fk['column_name']
fks[fk['constraint_name']] \
['ref_cols'] \
[int(fk['ordinal_position'])] = \
fk['referenced_column_name']
for constraint_name, fk in fks.items():
fks[constraint_name]['cols'] = \
self.__ksort(fks[constraint_name]['cols'])
fks[constraint_name]['ref_cols'] = \
self.__ksort(fks[constraint_name]['ref_cols'])
return fks
def __process_uks(self, data=tuple()):
uks = {}
for uk in data:
if uk['constraint_name'] not in uks.keys():
uks[uk['constraint_name']] = {
'name': uk['constraint_name'],
'table_name': uk['table_name'],
'cols': {}
}
uks[uk['constraint_name']] \
['cols'] \
[int(uk['ordinal_position'])] = \
uk['column_name']
for name, uk in uks.items():
uks[name]['cols'] = self.__ksort(uks[name]['cols'])
return uks
def __query_source(self, query, binds=tuple()):
return self.__source.query(query, binds)
def __query_target(self, query, binds=tuple()):
return self.__target.query(query, binds)
def set_cli(self, cli):
self.__cli = cli
return self
def there_are_differences(self):
return self.__ref_tables_to_create or \
self.__ref_tables_to_drop or \
self.__tables_to_create or \
self.__tables_to_drop or \
self.__columns_to_create or \
self.__columns_to_drop or \
self.__columns_to_modify or \
self.__column_uniqueness_to_drop or \
self.__foreign_keys_to_create or \
self.__foreign_keys_to_drop or \
self.__ref_data_to_add or \
self.__ref_data_to_remove or \
self.__ref_data_to_modify or \
self.__indexes_to_create or \
self.__indexes_to_drop or \
self.__unique_keys_to_create or \
self.__unique_keys_to_drop
def __verify_schemas(self):
self.__notice('Verifying schemas...')
query = \
'''select 1 as schema_exists
from schemata
where schema_name = %s'''
record = self.__source.query(query, [self.__source_db_name])
if not record:
self.__error('source schema "'
+ self.__source_db_name
+ '" does not exist',
1)
exit(1)
record = self.__target.query(query, [self.__target_db_name])
if not record:
self.__error('target schema "'
+ self.__target_db_name
+ '" does not exist',
1)
exit(1)
def __write_add_foreign_key_constraint_sql(self):
file_name = '55-foreign_keys.sql'
self.__notice(file_name, 1)
contents = ''
for fk in self.__foreign_keys_to_create:
contents += \
('alter table ' + fk['table_name'] + '\n'
+ ' add constraint ' + fk['name'] + '\n'
+ ' foreign key (' + ', '.join(fk['cols'].values()) + ')\n'
+ ' references ' + fk['ref_table_name'] + '\n'
+ ' (' + ', '.join(fk['ref_cols'].values()) + ')\n'
+ ' on delete ' + fk['delete_rule'].lower() + ';\n\n')
self.__write_file(file_name, contents)
def __write_add_indexes_sql(self):
file_name = '65-indexes.sql'
self.__notice(file_name, 1)
contents = ''
for index in self.__indexes_to_create:
contents += 'create index ' + index['index_name'] + '\n' \
+ ' on ' + index['table_name'] + '\n' \
+ ' (' + ', '.join(index['cols']) + ');\n\n'
self.__write_file(file_name, contents)
def __write_add_modify_columns_sql(self):
file_name = '35-columns.sql'
self.__notice(file_name, 1)
contents = ''
for name in self.__columns_to_create:
table_name, column_name = name.split('.')
column = self.__source.query(
'''select table_name,
column_name,
column_default,
is_nullable,
character_set_name,
collation_name,
column_type,
column_key,
extra
from information_schema.columns
where table_name = %s
and column_name = %s''',
[table_name, column_name])
contents += 'alter table ' + column[0]['table_name'] + "\n" \
+ ' add ' + column[0]['column_name'] + "\n" \
+ ' ' + column[0]['column_type']
terms = self.__get_column_terms(column[0])
if terms:
contents += "\n"
for index in range(len(terms)):
terms[index] = " " + terms[index]
contents += "\n".join(terms) + ";\n\n"
for column in self.__columns_to_modify.values():
contents += 'alter table ' + column['table_name'] + "\n" \
+ ' modify ' + column['column_name'] + "\n" \
+ ' ' + column['column_type']
terms = self.__get_column_terms(column)
if terms:
contents += "\n"
for index in range(len(terms)):
terms[index] = " " + terms[index]
contents += "\n".join(terms) + ";\n\n"
for column in self.__columns_to_drop:
table_name, column_name = column.split('.')
contents += 'alter table ' + table_name + "\n" \
+ ' drop ' + column_name + ";\n\n"
for column in self.__column_uniqueness_to_drop:
table_name, column_name = column.split('.')
index = self.__target.query(
'''show index
from ''' + self.__target_db_name + "." + table_name + '''
where column_name = %s''',
[column_name])
contents += 'alter table ' + index[0]['Table'] + "\n" \
+ ' drop index ' + index[0]['Key_name'] + ";\n\n"
self.__write_file(file_name, contents)
def __write_add_ref_tables_sql(self):
file_name = '10-ref_tables.sql'
self.__notice(file_name, 1)
contents = ''
for name in self.__ref_tables_to_create:
record = self.__source.query(
'show create table ' + self.__source_db_name + '.' + name)
contents += record[0]['Create Table'] + ";\n\n"
if contents:
contents += "\n"
self.__write_file(file_name, contents)
def __write_add_tables_sql(self):
file_name = '30-tables.sql'
self.__notice(file_name, 1)
contents = ''
for name in self.__tables_to_create:
record = self.__source.query(
'show create table ' + self.__source_db_name + '.' + name)
contents += record[0]['Create Table'] + ";\n\n\n"
self.__write_file(file_name, contents)
def __write_add_unique_key_constraint_sql(self):
file_name = '60-unique_keys.sql'
self.__notice(file_name, 1)
contents = ''
for uk in self.__unique_keys_to_create:
contents += \
'alter table ' + uk['table_name'] + '\n' \
+ ' add unique key ' + uk['name'] + '\n' \
+ ' (' + ', '.join(uk['cols'].values()) + ');\n\n'
self.__write_file(file_name, contents)
def __write_drop_foreign_key_constraint_sql(self):
file_name = '15-foreign_keys.sql'
self.__notice(file_name, 1)
contents = ''
for fk in self.__foreign_keys_to_drop:
contents += 'alter table ' + fk['table_name'] + "\n" \
+ ' drop foreign key ' + fk['name'] + ';\n\n'
self.__write_file(file_name, contents)
def __write_drop_indexes_sql(self):
file_name = '25-indexes.sql'
self.__notice(file_name, 1)
contents = ''
for index in self.__indexes_to_drop:
contents += 'alter table ' + index['table_name'] + "\n" \
+ ' drop index ' + index['index_name'] + ";\n\n"
self.__write_file(file_name, contents)
def __write_drop_ref_tables_sql(self):
file_name = '45-ref_tables.sql'
self.__notice(file_name, 1)
contents = ''
for name in self.__ref_tables_to_drop:
contents += 'drop table if exists ' + name + ';\n\n'
self.__write_file(file_name, contents)
def __write_drop_tables_sql(self):
file_name = '40-tables.sql'
self.__notice(file_name, 1)
contents = ''
for name in self.__tables_to_drop:
contents += 'drop table if exists ' + name + ';\n\n'
self.__write_file(file_name, contents)
def __write_drop_unique_key_constraint_sql(self):
file_name = '20-unique_keys.sql'
self.__notice(file_name, 1)
contents = ''
for uk in self.__unique_keys_to_drop:
contents += 'alter table ' + uk['table_name'] + "\n" \
+ ' drop key ' + uk['name'] + ";\n\n"
self.__write_file(file_name, contents)
def __write_index_check(self):
if self.__index_usage_parser is None:
return
file_name = '90-index_check.txt'
self.__notice(file_name, 1)
contents = ''
for data in self.__index_usage_parser.redundant_indexes:
contents += \
'{}\n {}\n is duplicate of \n{}\n {}\n\n' \
.format(
data[0],
data[1],
data[2],
data[3]
)
if len(self.__index_usage_parser.redundant_indexes) > 0 and \
len(self.__index_usage_parser.clustered_indexes) > 0:
contents += '-' * 78 + '\n\n'
for data in self.__index_usage_parser.clustered_indexes:
contents += \
'{}\n {}\n{}is clustered and potentially redundant\n\n' \
.format(
data[0],
data[1],
' ' * 8
)
self.__write_file(file_name, contents)
def __write_ref_table_data_sql(self):
file_name = '50-ref_data.sql'
self.__notice(file_name, 1)
contents = ''
for data in self.__ref_data_to_add:
contents += ('insert into ' + data[0] + "\n"
+ '(\n'
+ ' id,\n'
+ ' value,\n'
+ ' display_order\n'
+ ')\n'
+ 'values\n'
+ '(\n'
+ ' ' + str(data[1]) + ',\n'
+ " '" + re.sub("'", "''", data[2]) + "',\n"
+ ' ' + str(data[3]) + '\n'
+ ');\n'
+ 'commit;\n\n')
for data in self.__ref_data_to_modify:
contents += ('update ' + data[0] + '\n'
+ " set value = '" + data[2] + "',\n"
+ " display_order = " + str(data[3]) + "\n"
+ " where id = " + str(data[1]) + ";\n"
+ "commit;\n\n")
for data in self.__ref_data_to_remove:
contents += 'delete from ' + data[0] + '\n' \
+ ' where id = ' + str(data[1]) + ';\n' \
+ 'commit;\n\n'
self.__write_file(file_name, contents)
def __write_file(self, file_name, contents):
f = open(file_name, 'w')
f.write(contents)
f.close()
def __write_upgrade_scripts(self):
if not self.__enable_write_upgrade_scripts:
return
self.__notice('Writing upgrade scripts into current directory...')
self.__write_add_ref_tables_sql()
self.__write_drop_foreign_key_constraint_sql()
self.__write_drop_unique_key_constraint_sql()
self.__write_drop_indexes_sql()
self.__write_add_tables_sql()
self.__write_add_modify_columns_sql()
self.__write_drop_tables_sql()
self.__write_drop_ref_tables_sql()
self.__write_ref_table_data_sql()
self.__write_add_foreign_key_constraint_sql()
self.__write_add_unique_key_constraint_sql()
self.__write_add_indexes_sql()
self.__write_index_check()
```
#### File: services/tests/cli_tests.py
```python
__author__ = '<NAME> <<EMAIL>>'
# ----- Imports ---------------------------------------------------------------
from tinyAPI.base.services.cli import CLIOutputRenderer
import tinyAPI
import unittest
# ----- Tests -----------------------------------------------------------------
class ServicesCLITestCase(unittest.TestCase):
def test_CLIOutputRenderer_header(self):
expected = '''# +--------------------------------------------------------------------------+
# | Test |
# +--------------------------------------------------------------------------+
'''
self.assertEqual(expected, CLIOutputRenderer.header('Test'))
# ----- Main ------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
```
#### File: services/tests/ffmpeg_tests.py
```python
__author__ = '<NAME> <<EMAIL>>'
# ----- Imports ---------------------------------------------------------------
from tinyAPI.base.services.ffmpeg import Ffmpeg
import tinyAPI
import unittest
# ----- Tests -----------------------------------------------------------------
class FfmpegTestCase(unittest.TestCase):
def test_get_geometry(self):
ffmpeg = Ffmpeg('/opt/tinyAPI/base/services/tests/files/video.mov')
width, height = ffmpeg.get_geometry()
self.assertEqual(160, width)
self.assertEqual(120, height)
self.assertEqual(160, ffmpeg.width)
self.assertEqual(120, ffmpeg.height)
def test_get_duration(self):
ffmpeg = Ffmpeg('/opt/tinyAPI/base/services/tests/files/video.mov')
duration = ffmpeg.get_duration()
self.assertEqual(13, duration)
# ----- Main ------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
```
#### File: base/services/unit_testing.py
```python
__author__ = '<NAME> <<EMAIL>>'
# ----- Imports ---------------------------------------------------------------
from tinyAPI.base.config import ConfigManager
from tinyAPI.base.context import Context
import re
import subprocess
import sys
import time
import tinyAPI
import unittest
__all__ = [
'Manager',
'TransactionalDataStoreTestCase'
]
# ----- Public Classes -------------------------------------------------------
class Manager(object):
'''Provides methods for executing and reporting on unit tests.'''
def __init__(self, cli):
self.__cli = cli
self.__enable_stop_on_failure = True
self.__total_run_time = 0
self.__total_tests = 0
def disable_stop_on_failure(self):
self.__enable_stop_on_failure = False
return self
def execute(self, files=tuple()):
for file in files:
if file != '':
self.__cli.notice(file + "\n")
file_run_time_start = time.time()
num_file_tests = 0
output = \
subprocess.check_output(
"export ENV_UNIT_TEST=1 ; "
+ sys.executable + " " + file + " -v ; "
+ "exit 0",
stderr=subprocess.STDOUT,
shell=True
) \
.decode();
failed = flush = False
test_info = None
for line in output.split("\n"):
# Attempt to capture test results
results = re.search(r' \.\.\. (?P<output>.*)(?P<message>ok|fail|error|skipped)$', line, re.IGNORECASE)
if test_info is None:
test_info = re.match(r'^(?P<method>test_[^ ]+) \(.*?\.(?P<class>.*?)\)', line)
if test_info is not None:
flush = False
if re.match('FAILED \(', line) or re.search('Error:', line):
flush = True
failed = True
# If results delimiter is found we need to validate case output
if results is not None:
test_class = test_method = message = ''
if test_info is not None:
test_method = test_info.group('method')
test_class = test_info.group('class')
message = results.group('message')
# Validate that no output occured
if results.group('output'):
raise RuntimeError(
'\n{}\n{}::{}\n\nproduced\n\n{}\n{}'
.format(
'=' * 75,
test_class,
test_method,
results.group('output'),
'=' * 75
)
)
else:
if test_method is not None and test_method != line:
length = len(test_class) + len(test_method) + 12
if length > 79:
test_method = \
'...' + test_method[(length - 76):]
self.__cli.notice(
'{}::{} .. {}'
.format(
test_class,
test_method,
message.upper()
),
1
)
# We've already caught the end the current test
# Flush the rest of the incoming lines before the next test
flush = True
test_info = results = None
test_method = test_class = message = ''
elif line == 'OK':
self.__cli.notice('', 1)
elif line != '' and flush:
self.__cli.notice(line, 1)
matches = re.match('Ran (\d+) test', line)
if matches is not None:
self.__total_tests += int(matches.group(1))
if failed is True:
sys.exit(1)
self.__total_run_time += time.time() - file_run_time_start;
def print_summary(self):
self.__cli.notice(' Total number of tests executed: '
+ str('{0:,}'.format(self.__total_tests)))
self.__cli.notice('Total elapsed time for all tests: '
+ str(self.__total_run_time))
class TransactionalDataStoreTestCase(unittest.TestCase):
'''Provides a test case for transactional data stores that rolls back
changes after each unit test.'''
def setUp(self):
default_schema = ConfigManager.value('default schema')
default_connection = ConfigManager.value('default unit test connection')
if default_schema and default_connection:
tinyAPI.dsh.select_db(default_connection, default_schema)
self.maxDiff = None
self.set_up()
def set_up(self):
pass
def tearDown(self):
self.tear_down()
tinyAPI.dsh().rollback(True)
def tear_down(self):
pass
# ----- Instructions ----------------------------------------------------------
Context().set_unit_test()
```
#### File: tinyAPI/base/singleton.py
```python
__author__ = '<NAME> <<EMAIL>>'
# ----- Imports ---------------------------------------------------------------
__all__ = [
'Singleton'
]
# ----- Public Classes --------------------------------------------------------
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args,
**kwargs)
return cls._instances[cls]
```
#### File: tinyAPI/base/stats_logger.py
```python
__author__ = '<NAME> <<EMAIL>>'
# ----- Imports ---------------------------------------------------------------
from tinyAPI.base.config import ConfigManager
import logging
import random
import tinyAPI
__all__ = [
'StatsLogger'
]
# ----- Public Classes --------------------------------------------------------
class StatsLogger(object):
'''Manages writing statistics to the application log file.'''
def hit_ratio(self, name, requests, hits, pid=None):
if tinyAPI.env_unit_test() is False and \
tinyAPI.env_cli() is False and \
random.randint(1, 100000) == 1:
log_file = ConfigManager.value('app log file')
if log_file is not None:
try:
hit_ratio = str((hits / requests) * 100) + '%'
except ZeroDivisionError:
hit_ratio = 'NA'
lines = [
'\n----- ' + name + ' (start) -----'
]
if pid is not None:
lines.append('PID #{}'.format(pid))
lines.extend([
'Requests: ' + '{0:,}'.format(requests),
'Hits: ' + '{0:,}'.format(hits),
'Hit Ratio: ' + hit_ratio,
'----- ' + name + ' (stop) ------'
])
logging.basicConfig(filename = log_file)
logging.critical('\n'.join(lines))
logging.shutdown()
``` |
{
"source": "joepound/LS-Sorting",
"score": 4
} |
#### File: src/iterative_sorting/test_iterative.py
```python
import unittest
import random
from iterative_sorting import *
class IterativeSortingTest(unittest.TestCase):
def test_selection_sort(self):
arr1 = [1, 5, 8, 4, 2, 9, 6, 0, 3, 7]
arr2 = []
arr3 = [0, 1, 2, 3, 4, 5]
arr4 = random.sample(range(200), 50)
self.assertEqual(selection_sort(arr1), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertEqual(selection_sort(arr2), [])
self.assertEqual(selection_sort(arr3), [0, 1, 2, 3, 4, 5])
self.assertEqual(selection_sort(arr4), sorted(arr4))
def test_bubble_sort(self):
arr1 = [1, 5, 8, 4, 2, 9, 6, 0, 3, 7]
arr2 = []
arr3 = [0, 1, 2, 3, 4, 5]
arr4 = random.sample(range(200), 50)
self.assertEqual(bubble_sort(arr1), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertEqual(bubble_sort(arr2), [])
self.assertEqual(bubble_sort(arr3), [0, 1, 2, 3, 4, 5])
self.assertEqual(bubble_sort(arr4), sorted(arr4))
# Uncomment this test to test your count_sort implementation
# def test_counting_sort(self):
# arr1 = [1, 5, 8, 4, 2, 9, 6, 0, 3, 7]
# arr2 = []
# arr3 = [1, 5, -2, 4, 3]
# arr4 = random.sample(range(200), 50)
# self.assertEqual(count_sort(arr1), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# self.assertEqual(count_sort(arr2), [])
# self.assertEqual(
# count_sort(arr3),
# "Error, negative numbers not allowed in Count Sort"
# )
# self.assertEqual(count_sort(arr4), sorted(arr4))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joepound/Sprint-Challenge--Intro-Python",
"score": 4
} |
#### File: src/cityreader/test_cityreader.py
```python
import unittest
from cityreader import City, cityreader
def check_city(inp, exp):
if inp.name != exp.name:
return False
if inp.lat != exp.lat:
return False
if inp.lon != exp.lon:
return False
return True
class CityreaderTests(unittest.TestCase):
def setUp(self):
self.cities = cityreader()
self.expected = [
City("Seattle", 47.6217, -122.3238),
City("Richmond", 37.5294, -77.4755),
City("Virginia Beach", 36.7335, -76.0435),
City("Washington", 38.9047, -77.0163),
City("Milwaukee", 43.064, -87.9669),
City("Orlando", 28.4801, -81.3448),
City("Miami", 25.784, -80.2102),
City("Tampa", 27.9937, -82.4454),
City("Jacksonville", 30.3322, -81.6749),
City("Albuquerque", 35.1055, -106.6476),
City("Fort Worth", 32.7813, -97.3466),
City("McAllen", 26.2203, -98.2457),
City("El Paso", 31.8478, -106.431),
City("Dallas", 32.7938, -96.7659),
City("Austin", 30.3038, -97.7545),
City("Houston", 29.7871, -95.3936),
City("San Antonio", 29.4722, -98.5247),
City("New Orleans", 30.0687, -89.9288),
City("Charlotte", 35.208, -80.8308),
City("Raleigh", 35.8323, -78.6441),
City("Omaha", 41.2634, -96.0453),
City("Memphis", 35.1047, -89.9773),
City("Nashville", 36.1714, -86.7844),
City("Buffalo", 42.9016, -78.8487),
City("Queens", 40.7498, -73.7976),
City("New York", 40.6943, -73.9249),
City("Bronx", 40.8501, -73.8662),
City("Brooklyn", 40.6501, -73.9496),
City("Manhattan", 40.7834, -73.9662),
City("Philadelphia", 40.0076, -75.134),
City("Pittsburgh", 40.4396, -79.9763),
City("Sacramento", 38.5666, -121.4683),
City("Riverside", 33.9382, -117.3949),
City("San Francisco", 37.7561, -122.4429),
City("San Diego", 32.8312, -117.1225),
City("San Jose", 37.302, -121.8488),
City("Los Angeles", 34.114, -118.4068),
City("Las Vegas", 36.2288, -115.2603),
City("Denver", 39.7621, -104.8759),
City("Chicago", 41.8373, -87.6861),
City("Atlanta", 33.7627, -84.4231),
City("Indianapolis", 39.7771, -86.1458),
City("Oklahoma City", 35.4677, -97.5138),
City("Phoenix", 33.5722, -112.0891),
City("Tucson", 32.1558, -110.8777),
City("Bridgeport", 41.1909, -73.1958),
City("Hartford", 41.7661, -72.6834),
City("Baltimore", 39.3051, -76.6144),
City("Boston", 42.3189, -71.0838),
City("Cleveland", 41.4766, -81.6805),
City("Columbus", 39.9859, -82.9852),
City("Cincinnati", 39.1412, -84.506),
City("Salt Lake City", 40.7774, -111.9301),
City("Saint Louis", 38.6358, -90.2451),
City("Kansas City", 39.1239, -94.5541),
City("Minneapolis", 44.9635, -93.2679),
City("Detroit", 42.3834, -83.1024),
City("Providence", 41.8229, -71.4186),
City("Louisville", 38.1662, -85.6488),
City("Portland", 45.5372, -122.65)
]
def test_cityreader_correctness(self):
for i in range(len(self.cities)):
self.assertTrue(check_city(self.cities[i], self.expected[i]))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joe-p/pyteal",
"score": 2
} |
#### File: pyteal/ast/gtxn.py
```python
from typing import Union, cast, TYPE_CHECKING
from ..types import TealType, require_type
from ..ir import TealOp, Op, TealBlock
from ..errors import TealInputError, verifyFieldVersion, verifyTealVersion
from ..config import MAX_GROUP_SIZE
from .expr import Expr
from .leafexpr import LeafExpr
from .txn import TxnField, TxnExpr, TxnaExpr, TxnObject
if TYPE_CHECKING:
from ..compiler import CompileOptions
class GtxnExpr(TxnExpr):
"""An expression that accesses a transaction field from a transaction in the current group."""
def __init__(self, txnIndex: Union[int, Expr], field: TxnField) -> None:
super().__init__(Op.gtxn, "Gtxn", field)
self.txnIndex = txnIndex
def __str__(self):
return "({} {} {})".format(self.name, self.txnIndex, self.field.arg_name)
def __teal__(self, options: "CompileOptions"):
verifyFieldVersion(self.field.arg_name, self.field.min_version, options.version)
if type(self.txnIndex) is int:
op = TealOp(self, Op.gtxn, self.txnIndex, self.field.arg_name)
return TealBlock.FromOp(options, op)
verifyTealVersion(
Op.gtxns.min_version,
options.version,
"TEAL version too low to index Gtxn with dynamic values",
)
op = TealOp(self, Op.gtxns, self.field.arg_name)
return TealBlock.FromOp(options, op, cast(Expr, self.txnIndex))
GtxnExpr.__module__ = "pyteal"
class GtxnaExpr(TxnaExpr):
"""An expression that accesses a transaction array field from a transaction in the current group."""
def __init__(
self, txnIndex: Union[int, Expr], field: TxnField, index: Union[int, Expr]
) -> None:
super().__init__(Op.gtxna, Op.gtxnas, "Gtxna", field, index)
self.txnIndex = txnIndex
def __str__(self):
return "({} {} {} {})".format(
self.name, self.txnIndex, self.field.arg_name, self.index
)
def __teal__(self, options: "CompileOptions"):
verifyFieldVersion(self.field.arg_name, self.field.min_version, options.version)
if type(self.txnIndex) is int:
if type(self.index) is int:
opToUse = Op.gtxna
else:
opToUse = Op.gtxnas
else:
if type(self.index) is int:
opToUse = Op.gtxnsa
else:
opToUse = Op.gtxnsas
verifyTealVersion(
opToUse.min_version,
options.version,
"TEAL version too low to use op {}".format(opToUse),
)
if type(self.txnIndex) is int:
if type(self.index) is int:
op = TealOp(
self, opToUse, self.txnIndex, self.field.arg_name, self.index
)
return TealBlock.FromOp(options, op)
op = TealOp(self, opToUse, self.txnIndex, self.field.arg_name)
return TealBlock.FromOp(options, op, cast(Expr, self.index))
if type(self.index) is int:
op = TealOp(self, opToUse, self.field.arg_name, self.index)
return TealBlock.FromOp(options, op, cast(Expr, self.txnIndex))
op = TealOp(self, opToUse, self.field.arg_name)
return TealBlock.FromOp(
options, op, cast(Expr, self.txnIndex), cast(Expr, self.index)
)
GtxnaExpr.__module__ = "pyteal"
class TxnGroup:
"""Represents a group of transactions."""
def __getitem__(self, txnIndex: Union[int, Expr]) -> TxnObject:
if type(txnIndex) is int:
if txnIndex < 0 or txnIndex >= MAX_GROUP_SIZE:
raise TealInputError(
"Invalid Gtxn index {}, shoud be in [0, {})".format(
txnIndex, MAX_GROUP_SIZE
)
)
else:
require_type(cast(Expr, txnIndex), TealType.uint64)
return TxnObject(
lambda field: GtxnExpr(txnIndex, field),
lambda field, index: GtxnaExpr(txnIndex, field, index),
)
TxnGroup.__module__ = "pyteal"
Gtxn: TxnGroup = TxnGroup()
Gtxn.__module__ = "pyteal"
```
#### File: pyteal/scripts/generate_init.py
```python
import argparse, os, sys
from pyteal import __all__ as static_all
# Start of the template to be appended to
pyi_template = """## File generated from scripts/generate_init.py.
## DO NOT EDIT DIRECTLY
"""
# Template for __all__ export list
all_template = """__all__ = [
{},
]"""
# Flags to denote the beginning/end of the __all__ exports in __init__.py
begin_flag = "# begin __all__"
end_flag = "# end __all__"
# Make it safe to run from anywhere
curr_dir = os.path.dirname(os.path.abspath(__file__))
orig_dir = os.path.join(curr_dir, os.path.join("..", "pyteal"))
# Path to pyi
pyi_file = "__init__.pyi"
orig_file = os.path.join(orig_dir, pyi_file)
# Path to py
py_file = "__init__.py"
init_file = os.path.join(orig_dir, py_file)
def generate_tmp():
with open(init_file, "r") as f:
init_contents = f.read()
start_idx = init_contents.index(begin_flag)
end_idx = init_contents.index(end_flag)
all_imports = ",\n ".join(['"{}"'.format(s) for s in static_all])
return (
pyi_template
+ init_contents[:start_idx]
+ all_template.format(all_imports)
+ init_contents[end_idx + len(end_flag) :]
)
def is_different(regen):
if not os.path.exists(orig_file):
return True
with open(orig_file, "r") as f:
orig_lines = f.readlines()
curr_lines = regen.split("\n")
return orig_lines == curr_lines
def overwrite(regen):
with open(orig_file, "w") as f:
f.write(regen)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--check",
action="store_true",
help="Only check if the generated file would change",
)
args = parser.parse_args()
regen = generate_tmp()
if args.check:
if is_different(regen):
print(
"The __init__.pyi needs to be regenerated. Please run scripts/generate_init.py"
)
sys.exit(1)
print("No changes in __init__.py")
sys.exit(0)
overwrite(regen)
``` |
{
"source": "joepusateri/Keys-for-ServiceNow-Extensions",
"score": 2
} |
#### File: joepusateri/Keys-for-ServiceNow-Extensions/snkeys.py
```python
import argparse
import pdpyras
import sys
import csv
# Get all services and store the EP for each
def get_extensions(session):
escalation_policies_by_serviceid = {}
for service in session.iter_all('services'):
escalation_policies_by_serviceid[service['id']]=service['escalation_policy']['id']
# Get all extensions and for the SNOW ones, print out the IDs
with sys.stdout as csvfile:
fieldnames = ['service_name','service_id','escalation_policy_id','webhook_id']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore', dialect='excel', quoting=csv.QUOTE_ALL )
writer.writeheader()
for ext in session.iter_all('extensions'):
# If the extension has a SNOW user defined...
if 'snow_user' in ext['config']:
row = {'service_name': ext['extension_objects'][0]['summary'],
'service_id': ext['extension_objects'][0]['id'],
'escalation_policy_id': escalation_policies_by_serviceid[ext['extension_objects'][0]['id']],
'webhook_id': ext['id']}
writer.writerow(row)
if __name__ == '__main__':
ap = argparse.ArgumentParser(description="Exports service id, escalation policy id and webhook id for all ServiceNow Extensions")
ap.add_argument('-p', '--api-key', required=True, help="REST API key")
args = ap.parse_args()
session = pdpyras.APISession(args.api_key)
get_extensions(session)
``` |
{
"source": "joepvd/aiida_core",
"score": 2
} |
#### File: db/migrations/0002_db_state_change.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import models, migrations
from aiida.backends.djsite.db.migrations import upgrade_schema_version
REVISION = '1.0.2'
DOWN_REVISION = '1.0.1'
def fix_calc_states(apps, schema_editor):
from aiida.backends.djsite.db.models import DbCalcState
# from aiida.orm import load_node
from aiida.orm.utils import load_node
# These states should never exist in the database but we'll play it safe
# and deal with them if they do
for calc_state in DbCalcState.objects.filter(
state__in=[b'UNDETERMINED', b'NOTFOUND']):
old_state = calc_state.state
calc_state.state = b'FAILED'
calc_state.save()
# Now add a note in the log to say what we've done
calc = load_node(pk=calc_state.dbnode.pk)
calc.logger.warning(
"Job state {} found for calculation {} which should never be in "
"the database. Changed state to FAILED.".format(
old_state, calc_state.dbnode.pk))
class Migration(migrations.Migration):
dependencies = [
('db', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='dbcalcstate',
name='state',
# The UNDETERMINED and NOTFOUND 'states' were removed as these
# don't make sense
field=models.CharField(db_index=True, max_length=25,
choices=[(b'RETRIEVALFAILED', b'RETRIEVALFAILED'), (b'COMPUTED', b'COMPUTED'),
(b'RETRIEVING', b'RETRIEVING'), (b'WITHSCHEDULER', b'WITHSCHEDULER'),
(b'SUBMISSIONFAILED', b'SUBMISSIONFAILED'), (b'PARSING', b'PARSING'),
(b'FAILED', b'FAILED'), (b'FINISHED', b'FINISHED'),
(b'TOSUBMIT', b'TOSUBMIT'), (b'SUBMITTING', b'SUBMITTING'),
(b'IMPORTED', b'IMPORTED'), (b'NEW', b'NEW'),
(b'PARSINGFAILED', b'PARSINGFAILED')]),
preserve_default=True,
),
# Fix up any calculation states that had one of the removed states
migrations.RunPython(fix_calc_states),
upgrade_schema_version(REVISION, DOWN_REVISION)
]
```
#### File: db/subtests/query.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from aiida.backends.testbase import AiidaTestCase
class TestQueryBuilderDjango(AiidaTestCase):
def test_clsf_django(self):
"""
This tests the classifications of the QueryBuilder u. the django backend.
"""
from aiida.orm.implementation.django.dummy_model import (
DbNode, DbUser, DbComputer,
DbGroup,
)
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.data.structure import StructureData
from aiida.orm import Group, Node, Computer, Data
from aiida.common.exceptions import InputValidationError
qb = QueryBuilder()
with self.assertRaises(InputValidationError):
qb._get_ormclass(None, 'data')
with self.assertRaises(InputValidationError):
qb._get_ormclass(None, 'data.Data')
with self.assertRaises(InputValidationError):
qb._get_ormclass(None, '.')
for cls, clstype, query_type_string in (
qb._get_ormclass(StructureData, None),
qb._get_ormclass(None, 'data.structure.StructureData.'),
):
self.assertEqual(clstype, 'data.structure.StructureData.')
self.assertTrue(issubclass(cls, DbNode))
self.assertEqual(clstype, 'data.structure.StructureData.')
self.assertEqual(query_type_string,
StructureData._query_type_string)
for cls, clstype, query_type_string in (
qb._get_ormclass(Node, None),
qb._get_ormclass(DbNode, None),
qb._get_ormclass(None, '')
):
self.assertEqual(clstype, Node._plugin_type_string)
self.assertEqual(query_type_string, Node._query_type_string)
self.assertTrue(issubclass(cls, DbNode))
for cls, clstype, query_type_string in (
qb._get_ormclass(DbGroup, None),
qb._get_ormclass(Group, None),
qb._get_ormclass(None, 'group'),
qb._get_ormclass(None, 'Group'),
):
self.assertEqual(clstype, 'group')
self.assertEqual(query_type_string, None)
self.assertTrue(issubclass(cls, DbGroup))
for cls, clstype, query_type_string in (
qb._get_ormclass(DbUser, None),
qb._get_ormclass(DbUser, None),
qb._get_ormclass(None, "user"),
qb._get_ormclass(None, "User"),
):
self.assertEqual(clstype, 'user')
self.assertEqual(query_type_string, None)
self.assertTrue(issubclass(cls, DbUser))
for cls, clstype, query_type_string in (
qb._get_ormclass(DbComputer, None),
qb._get_ormclass(Computer, None),
qb._get_ormclass(None, 'computer'),
qb._get_ormclass(None, 'Computer'),
):
self.assertEqual(clstype, 'computer')
self.assertEqual(query_type_string, None)
self.assertTrue(issubclass(cls, DbComputer))
for cls, clstype, query_type_string in (
qb._get_ormclass(Data, None),
qb._get_ormclass(None, 'data.Data.'),
):
self.assertEqual(clstype, Data._plugin_type_string)
self.assertEqual(query_type_string, Data._query_type_string)
self.assertTrue(issubclass(cls, DbNode))
```
#### File: backends/sqlalchemy/globalsettings.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from aiida.backends.sqlalchemy.models.settings import DbSetting
from sqlalchemy.orm.exc import NoResultFound
from aiida.backends.sqlalchemy import get_scoped_session
def set_global_setting(key, value, description=None):
"""
Set a global setting in the DbSetting table (therefore, stored at the DB
level).
"""
DbSetting.set_value(key, value, other_attribs={"description": description})
def del_global_setting(key):
"""
Return the value of the given setting, or raise a KeyError if the
setting is not present in the DB.
:raise KeyError: if the setting does not exist in the DB
"""
try:
setting = get_scoped_session().query(DbSetting).filter_by(key=key).one()
setting.delete()
except NoResultFound:
raise KeyError("No global setting with key={}".format(key))
def get_global_setting(key):
"""
Return the value of the given setting, or raise a KeyError if the
setting is not present in the DB.
:raise KeyError: if the setting does not exist in the DB
"""
from aiida.backends.sqlalchemy.models.utils import get_value_of_sub_field
# Check first that the table exists
table_check_test()
try:
return get_value_of_sub_field(
key, lambda given_key: get_scoped_session().query(DbSetting).filter_by(
key=given_key).one().getvalue())
except NoResultFound:
raise KeyError("No global setting with key={}".format(key))
def get_global_setting_description(key):
"""
Return the description for the given setting variable, as stored in the
DB, or raise a KeyError if the setting is not present in the DB or the
table doesn't exist.
"""
from aiida.backends.sqlalchemy.models.utils import validate_key
# Check first that the table exists
table_check_test()
validate_key(key)
try:
return (get_scoped_session().query(DbSetting).filter_by(key=key).
one().get_description())
except NoResultFound:
raise KeyError("No global setting with key={}".format(key))
def table_check_test():
"""
Checks if the db_setting table exists in the database. If it doesn't exist
it rainses a KeyError.
"""
from sqlalchemy.engine import reflection
from aiida.backends import sqlalchemy as sa
inspector = reflection.Inspector.from_engine(get_scoped_session().bind)
if 'db_dbsetting' not in inspector.get_table_names():
raise KeyError("No table found")
```
#### File: migrations/versions/e15ef2630a1b_initial_schema.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm.session import Session
from aiida.backends.sqlalchemy.utils import install_tc
# revision identifiers, used by Alembic.
revision = 'e15ef2630a1b'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('db_dbuser',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('email', sa.VARCHAR(length=254), autoincrement=False, nullable=True),
sa.Column('password', sa.VARCHAR(length=128), autoincrement=False, nullable=True),
sa.Column('is_superuser', sa.BOOLEAN(), autoincrement=False, nullable=False),
sa.Column('first_name', sa.VARCHAR(length=254), autoincrement=False, nullable=True),
sa.Column('last_name', sa.VARCHAR(length=254), autoincrement=False, nullable=True),
sa.Column('institution', sa.VARCHAR(length=254), autoincrement=False, nullable=True),
sa.Column('is_staff', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.Column('is_active', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.Column('last_login', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('date_joined', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbuser_pkey'),
postgresql_ignore_search_path=False
)
op.create_index('ix_db_dbuser_email', 'db_dbuser', ['email'], unique=True)
op.create_table('db_dbworkflow',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('uuid', postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column('ctime', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('mtime', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('label', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('nodeversion', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('lastsyncedversion', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('state', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('report', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('module', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('module_class', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('script_path', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('script_md5', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['user_id'], [u'db_dbuser.id'], name=u'db_dbworkflow_user_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'db_dbworkflow_pkey'),
postgresql_ignore_search_path=False
)
op.create_index('ix_db_dbworkflow_label', 'db_dbworkflow', ['label'])
op.create_table('db_dbworkflowstep',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('parent_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('name', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('nextcall', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('state', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['parent_id'], [u'db_dbworkflow.id'], name=u'db_dbworkflowstep_parent_id_fkey'),
sa.ForeignKeyConstraint(['user_id'], [u'db_dbuser.id'], name=u'db_dbworkflowstep_user_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'db_dbworkflowstep_pkey'),
sa.UniqueConstraint('parent_id', 'name', name=u'db_dbworkflowstep_parent_id_name_key'),
postgresql_ignore_search_path=False
)
op.create_table('db_dbcomputer',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('uuid', postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column('name', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('hostname', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('enabled', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.Column('transport_type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('scheduler_type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('transport_params', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('metadata', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbcomputer_pkey'),
sa.UniqueConstraint('name', name=u'db_dbcomputer_name_key')
)
op.create_table('db_dbauthinfo',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('aiidauser_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('dbcomputer_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('metadata', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('auth_params', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('enabled', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['aiidauser_id'], [u'db_dbuser.id'], name=u'db_dbauthinfo_aiidauser_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.ForeignKeyConstraint(['dbcomputer_id'], [u'db_dbcomputer.id'], name=u'db_dbauthinfo_dbcomputer_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbauthinfo_pkey'),
sa.UniqueConstraint('aiidauser_id', 'dbcomputer_id', name=u'db_dbauthinfo_aiidauser_id_dbcomputer_id_key')
)
op.create_table('db_dbgroup',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('uuid', postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column('name', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['user_id'], [u'db_dbuser.id'], name=u'db_dbgroup_user_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbgroup_pkey'),
sa.UniqueConstraint('name', 'type', name=u'db_dbgroup_name_type_key')
)
op.create_index('ix_db_dbgroup_name', 'db_dbgroup', ['name'])
op.create_index('ix_db_dbgroup_type', 'db_dbgroup', ['type'])
op.create_table('db_dbnode',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('uuid', postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column('type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('label', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('ctime', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('mtime', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('nodeversion', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('public', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.Column('attributes', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('extras', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('dbcomputer_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['dbcomputer_id'], [u'db_dbcomputer.id'], name=u'db_dbnode_dbcomputer_id_fkey', ondelete=u'RESTRICT', initially=u'DEFERRED', deferrable=True),
sa.ForeignKeyConstraint(['user_id'], [u'db_dbuser.id'], name=u'db_dbnode_user_id_fkey', ondelete=u'RESTRICT', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbnode_pkey'),postgresql_ignore_search_path=False
)
op.create_index('ix_db_dbnode_label', 'db_dbnode', ['label'])
op.create_index('ix_db_dbnode_type', 'db_dbnode', ['type'])
op.create_table('db_dbgroup_dbnodes',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('dbnode_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('dbgroup_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['dbgroup_id'], [u'db_dbgroup.id'], name=u'db_dbgroup_dbnodes_dbgroup_id_fkey', initially=u'DEFERRED', deferrable=True),
sa.ForeignKeyConstraint(['dbnode_id'], [u'db_dbnode.id'], name=u'db_dbgroup_dbnodes_dbnode_id_fkey', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbgroup_dbnodes_pkey')
)
op.create_table('db_dblock',
sa.Column('key', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('creation', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('timeout', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('owner', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('key', name=u'db_dblock_pkey')
)
op.create_table('db_dbworkflowdata',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('parent_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('name', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('data_type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('value_type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('json_value', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('aiida_obj_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['aiida_obj_id'], [u'db_dbnode.id'], name=u'db_dbworkflowdata_aiida_obj_id_fkey'),
sa.ForeignKeyConstraint(['parent_id'], [u'db_dbworkflow.id'], name=u'db_dbworkflowdata_parent_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'db_dbworkflowdata_pkey'),
sa.UniqueConstraint('parent_id', 'name', 'data_type', name=u'db_dbworkflowdata_parent_id_name_data_type_key')
)
op.create_table('db_dblink',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('input_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('output_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('label', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('type', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['input_id'], [u'db_dbnode.id'], name=u'db_dblink_input_id_fkey', initially=u'DEFERRED', deferrable=True),
sa.ForeignKeyConstraint(['output_id'], [u'db_dbnode.id'], name=u'db_dblink_output_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dblink_pkey'),
)
op.create_index('ix_db_dblink_label', 'db_dblink', ['label'])
op.create_table('db_dbworkflowstep_calculations',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('dbworkflowstep_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('dbnode_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['dbnode_id'], [u'db_dbnode.id'], name=u'db_dbworkflowstep_calculations_dbnode_id_fkey'),
sa.ForeignKeyConstraint(['dbworkflowstep_id'], [u'db_dbworkflowstep.id'], name=u'db_dbworkflowstep_calculations_dbworkflowstep_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'db_dbworkflowstep_calculations_pkey'),
sa.UniqueConstraint('dbworkflowstep_id', 'dbnode_id', name=u'db_dbworkflowstep_calculations_dbworkflowstep_id_dbnode_id_key')
)
op.create_table('db_dbpath',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('parent_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('child_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('depth', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('entry_edge_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('direct_edge_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('exit_edge_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['child_id'], [u'db_dbnode.id'], name=u'db_dbpath_child_id_fkey', initially=u'DEFERRED', deferrable=True),
sa.ForeignKeyConstraint(['parent_id'], [u'db_dbnode.id'], name=u'db_dbpath_parent_id_fkey', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbpath_pkey')
)
op.create_table('db_dbcalcstate',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('dbnode_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('state', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['dbnode_id'], [u'db_dbnode.id'], name=u'db_dbcalcstate_dbnode_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbcalcstate_pkey'),
sa.UniqueConstraint('dbnode_id', 'state', name=u'db_dbcalcstate_dbnode_id_state_key')
)
op.create_index('ix_db_dbcalcstate_state', 'db_dbcalcstate', ['state'])
op.create_table('db_dbsetting',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('key', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('val', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('description', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbsetting_pkey'),
sa.UniqueConstraint('key', name=u'db_dbsetting_key_key')
)
op.create_index('ix_db_dbsetting_key', 'db_dbsetting', ['key'])
op.create_table('db_dbcomment',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('uuid', postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column('dbnode_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('ctime', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('mtime', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('content', sa.TEXT(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['dbnode_id'], [u'db_dbnode.id'], name=u'db_dbcomment_dbnode_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.ForeignKeyConstraint(['user_id'], [u'db_dbuser.id'], name=u'db_dbcomment_user_id_fkey', ondelete=u'CASCADE', initially=u'DEFERRED', deferrable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dbcomment_pkey')
)
op.create_table('db_dblog',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('time', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True),
sa.Column('loggername', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('levelname', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('objname', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('objpk', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('message', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('metadata', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name=u'db_dblog_pkey')
)
op.create_index('ix_db_dblog_levelname', 'db_dblog', ['levelname'])
op.create_index('ix_db_dblog_loggername', 'db_dblog', ['loggername'])
op.create_index('ix_db_dblog_objname', 'db_dblog', ['objname'])
op.create_index('ix_db_dblog_objpk', 'db_dblog', ['objpk'])
op.create_table('db_dbworkflowstep_sub_workflows',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('dbworkflowstep_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('dbworkflow_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['dbworkflow_id'], [u'db_dbworkflow.id'], name=u'db_dbworkflowstep_sub_workflows_dbworkflow_id_fkey'),
sa.ForeignKeyConstraint(['dbworkflowstep_id'], [u'db_dbworkflowstep.id'], name=u'db_dbworkflowstep_sub_workflows_dbworkflowstep_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'db_dbworkflowstep_sub_workflows_pkey'),
sa.UniqueConstraint('dbworkflowstep_id', 'dbworkflow_id', name=u'db_dbworkflowstep_sub_workflo_dbworkflowstep_id_dbworkflow__key')
)
# I get the session using the alembic connection
# (Keep in mind that alembic uses the AiiDA SQLA
# session)
session = Session(bind=op.get_bind())
install_tc(session)
def downgrade():
op.drop_table('db_dbworkflowstep_calculations')
op.drop_table('db_dbworkflowstep_sub_workflows')
op.drop_table('db_dbworkflowdata')
op.drop_table('db_dbworkflowstep')
op.drop_table('db_dbworkflow')
op.drop_table('db_dbgroup_dbnodes')
op.drop_table('db_dbgroup')
op.drop_table('db_dblink')
op.drop_table('db_dbpath')
op.drop_table('db_dbcalcstate')
op.drop_table('db_dbcomment')
op.drop_table('db_dbnode')
op.drop_table('db_dbauthinfo')
op.drop_table('db_dbuser')
op.drop_table('db_dbcomputer')
op.drop_table('db_dblog')
op.drop_table('db_dbsetting')
op.drop_table('db_dblock')
```
#### File: params/types/test_workflow.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from aiida.backends.testbase import AiidaTestCase
from aiida.cmdline.params.types import LegacyWorkflowParamType
class TestLegacyWorkflowParamType(AiidaTestCase):
@classmethod
def setUpClass(cls):
from aiida.workflows.test import WFTestEmpty, WFTestSimpleWithSubWF
super(TestLegacyWorkflowParamType, cls).setUpClass()
cls.workflow = WFTestEmpty()
cls.workflow.label = 'Unique Label'
cls.workflow.store()
cls.wf_type = LegacyWorkflowParamType()
def test_get_by_id(self):
identifier = str(self.workflow.pk)
self.assertEqual(self.wf_type.convert(identifier, None, None).uuid, self.workflow.uuid)
def test_get_by_uuid(self):
identifier = str(self.workflow.uuid)
self.assertEqual(self.wf_type.convert(identifier, None, None).uuid, self.workflow.uuid)
def test_get_by_label(self):
identifier = str(self.workflow.label)
self.assertEqual(self.wf_type.convert(identifier, None, None).uuid, self.workflow.uuid)
```
#### File: cmdline/commands/cmd_setup.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from aiida.cmdline.commands.cmd_verdi import verdi
from aiida.cmdline.params import arguments, options
from aiida.control.profile import setup_profile
@verdi.command('setup')
@arguments.PROFILE_NAME()
@options.PROFILE_ONLY_CONFIG()
@options.PROFILE_SET_DEFAULT()
@options.NON_INTERACTIVE()
@options.BACKEND()
@options.DB_HOST()
@options.DB_PORT()
@options.DB_NAME()
@options.DB_USERNAME()
@options.DB_PASSWORD()
@options.REPOSITORY_PATH()
@options.USER_EMAIL()
@options.USER_FIRST_NAME()
@options.USER_LAST_NAME()
@options.USER_INSTITUTION()
@options.FORCE()
def setup(profile_name, only_config, set_default, non_interactive, backend, db_host, db_port, db_name, db_username,
db_password, repository, email, first_name, last_name, institution, force):
"""Setup and configure a new profile."""
kwargs = dict(
profile=profile_name,
only_config=only_config,
set_default=set_default,
non_interactive=non_interactive,
backend=backend,
db_host=db_host,
db_port=db_port,
db_name=db_name,
db_user=db_username,
db_pass=db_password,
repo=repository,
email=email,
first_name=first_name,
last_name=last_name,
institution=institution,
force_overwrite=force)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
setup_profile(**kwargs)
```
#### File: params/options/test_conditional.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import unittest
import click
from click.testing import CliRunner
from aiida.cmdline.params.options.conditional import ConditionalOption
class ConditionalOptionTest(unittest.TestCase):
"""Unit tests for ConditionalOption."""
@classmethod
def setUpClass(cls):
cls.runner = CliRunner()
def simple_cmd(self, pname, required_fn=lambda ctx: ctx.params.get('on'), **kwargs):
"""
returns a command with two options:
* an option created from the args and kwargs
* --opt, ConditionalOption with required_fn from kwargs
"""
# pylint: disable=no-self-use
@click.command()
@click.option(pname, **kwargs)
@click.option('--opt', required_fn=required_fn, cls=ConditionalOption)
def cmd(on, opt):
"""dummy command for testing"""
# pylint: disable=unused-argument, invalid-name
click.echo(opt)
return cmd
def test_switch_off(self):
"""
scenario: switch --on/--off detrmines if option opt is required
action: invoke with no options
behaviour: flag is off by default -> command runs without complaining
"""
cmd = self.simple_cmd('--on/--off')
runner = CliRunner()
result = runner.invoke(cmd, [])
self.assertIsNone(result.exception)
def test_switch_on(self):
"""
scenario: switch --on/--off detrmines if option opt is required
action: invoke with --on
behaviour: fails with Missin option message
"""
cmd = self.simple_cmd('--on/--off')
runner = CliRunner()
result = runner.invoke(cmd, ['--on'])
self.assertIsNotNone(result.exception)
self.assertIn('Error: Missing option "--opt".', result.output)
def test_flag_off(self):
"""
scenario: flag "--on" detrmines if option opt is required
action: invoke without options
behaviour: command runs without complaining
"""
cmd = self.simple_cmd('--on', is_flag=True)
runner = CliRunner()
result = runner.invoke(cmd, [])
self.assertIsNone(result.exception)
def test_flag_on(self):
"""
scenario: flag "--on" detrmines if option opt is required
action: invoke with --on
behaviour: fails with Missing option message
"""
cmd = self.simple_cmd('--on', is_flag=True)
runner = CliRunner()
result = runner.invoke(cmd, ['--on'])
self.assertIsNotNone(result.exception)
self.assertIn('Error: Missing option "--opt".', result.output)
def setup_multi_non_eager(self):
"""
scenario a-or-b:
* flag a_or_b (--a/--b)
* opt-a required if a_or_b == True
* opt-b required if a_or_b == False
"""
# pylint: disable=no-self-use
@click.command()
@click.option('--a/--b', 'a_or_b')
@click.option('--opt-a', required_fn=lambda c: c.params.get('a_or_b'), cls=ConditionalOption)
@click.option('--opt-b', required_fn=lambda c: not c.params.get('a_or_b'), cls=ConditionalOption)
def cmd(a_or_b, opt_a, opt_b):
"""test command for scenario a-or-b"""
# pylint: disable=unused-argument
click.echo('{} / {}'.format(opt_a, opt_b))
runner = CliRunner()
return runner, cmd
def test_aa(self):
"""
scenario = a-or-b
action: require a, give a (+ reversed order)
behaviour: command runs
"""
runner, cmd = self.setup_multi_non_eager()
result = runner.invoke(cmd, ['--a', '--opt-a=Bla'])
self.assertIsNone(result.exception)
self.assertEqual(result.output, 'Bla / None\n')
result_rev = runner.invoke(cmd, ['--opt-a=Bla', '--a'])
self.assertIsNone(result_rev.exception)
self.assertEqual(result_rev.output, 'Bla / None\n')
def test_ab(self):
"""
scenario = a-or-b
action: require a, give b (+ reversed order)
behaviour: fail, Missing option
"""
runner, cmd = self.setup_multi_non_eager()
result = runner.invoke(cmd, ['--a', '--opt-b=Bla'])
self.assertIsNotNone(result.exception)
self.assertIn('Error: Missing option "--opt-a".', result.output)
result_rev = runner.invoke(cmd, ['--opt-b=Bla', '--a'])
self.assertIsNotNone(result_rev.exception)
self.assertIn('Error: Missing option "--opt-a".', result_rev.output)
def test_ba(self):
"""
scenario = a-or-b
action: require b, give a (+ reversed order)
behaviour: fail, Missing option
"""
runner, cmd = self.setup_multi_non_eager()
result = runner.invoke(cmd, ['--b', '--opt-a=Bla'])
self.assertIsNotNone(result.exception)
self.assertIn('Error: Missing option "--opt-b".', result.output)
result_rev = runner.invoke(cmd, ['--opt-a=Bla', '--b'])
self.assertIsNotNone(result_rev.exception)
self.assertIn('Error: Missing option "--opt-b".', result_rev.output)
@staticmethod
def user_callback(_ctx, param, value):
"""
Testing callback that does not accept 42 and transforms a missing value to -1
"""
if not value:
return -1
elif value != 42:
raise click.BadParameter('invalid', param=param)
else:
return value
@staticmethod
def setup_flag_cond(**kwargs):
"""Set up a command with a flag and a customizable option that depends on it."""
@click.command()
@click.option('--flag', is_flag=True)
@click.option('--opt-a', required_fn=lambda c: c.params.get('flag'), cls=ConditionalOption, **kwargs)
def cmd(flag, opt_a):
""" A command with a flag and customizable options that dependon it """
# pylint: disable=unused-argument
click.echo('{}'.format(opt_a))
return cmd
def test_default(self):
"""Test that the default still gets passed."""
cmd = self.setup_flag_cond(default='default')
result_noflag = self.runner.invoke(cmd)
self.assertIsNone(result_noflag.exception)
self.assertEqual('default\n', result_noflag.output)
result_flag = self.runner.invoke(cmd, ['--flag'])
self.assertIsNone(result_flag.exception)
self.assertEqual('default\n', result_flag.output)
def test_callback(self):
"""Test that the callback still gets called."""
cmd = self.setup_flag_cond(default=23, type=int, callback=self.user_callback)
result_noflag = self.runner.invoke(cmd)
self.assertIsNotNone(result_noflag.exception)
result_flag = self.runner.invoke(cmd, ['--flag'])
self.assertIsNotNone(result_flag.exception)
def test_prompt_callback(self):
"""Test that the callback gets called on prompt results."""
cmd = self.setup_flag_cond(prompt='A', default=23, type=int, callback=self.user_callback)
result_noflag = self.runner.invoke(cmd, input='\n')
self.assertIsNotNone(result_noflag.exception)
self.assertIn('A [23]: \n', result_noflag.output)
self.assertIn('Invalid', result_noflag.output)
result_flag = self.runner.invoke(cmd, ['--flag'], input='\n')
self.assertIsNotNone(result_flag.exception)
self.assertIn('A [23]: \n', result_flag.output)
self.assertIn('Invalid', result_flag.output)
def test_required(self):
"""Test that required_fn overrides required if it evaluates to False."""
cmd = self.setup_flag_cond(required=True)
result_noflag = self.runner.invoke(cmd)
self.assertIsNone(result_noflag.exception)
result_flag = self.runner.invoke(cmd, ['--flag'])
self.assertIsNotNone(result_flag.exception)
```
#### File: cmdline/utils/daemon.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import click
from tabulate import tabulate
from aiida.cmdline.utils.common import format_local_time
from aiida.daemon.client import DaemonClient
def print_client_response_status(response):
"""
Print the response status of a call to the CircusClient through the DaemonClient
:param response: the response object
"""
if 'status' not in response:
return
if response['status'] == 'active':
click.secho('RUNNING', fg='green', bold=True)
elif response['status'] == 'ok':
click.secho('OK', fg='green', bold=True)
elif response['status'] == DaemonClient.DAEMON_ERROR_NOT_RUNNING:
click.secho('FAILED', fg='red', bold=True)
click.echo('Try to run \'verdi daemon start --foreground\' to potentially see the exception')
elif response['status'] == DaemonClient.DAEMON_ERROR_TIMEOUT:
click.secho('TIMEOUT', fg='red', bold=True)
else:
click.echo(response['status'])
def get_daemon_status(client):
"""
Print the status information of the daemon for a given profile through its DaemonClient
:param client: the DaemonClient
"""
if not client.is_daemon_running:
return 'The daemon is not running'
status_response = client.get_status()
if status_response['status'] == 'stopped':
return 'The daemon is paused'
elif status_response['status'] == 'error':
return 'The daemon is in an unexpected state, try verdi daemon restart --reset'
elif status_response['status'] == 'timeout':
return 'The daemon is running but the call to the circus controller timed out'
worker_response = client.get_worker_info()
daemon_response = client.get_daemon_info()
if 'info' not in worker_response or 'info' not in daemon_response:
return 'Call to the circus controller timed out'
workers = [['PID', 'MEM %', 'CPU %', 'started']]
for worker_pid, worker_info in worker_response['info'].items():
worker_row = [worker_pid, worker_info['mem'], worker_info['cpu'], format_local_time(worker_info['create_time'])]
workers.append(worker_row)
if len(workers) > 1:
workers_info = tabulate(workers, headers='firstrow', tablefmt='simple')
else:
workers_info = '--> No workers are running. Use verdi daemon incr to start some!\n'
info = {
'pid': daemon_response['info']['pid'],
'time': format_local_time(daemon_response['info']['create_time']),
'nworkers': len(workers) - 1,
'workers': workers_info
}
template = ('Daemon is running as PID {pid} since {time}\nActive workers [{nworkers}]:\n{workers}\n'
'Use verdi daemon [incr | decr] [num] to increase / decrease the amount of workers')
return template.format(**info)
```
#### File: utils/query/mapping.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from aiida.cmdline.utils.query import formatting
class ProjectionMapper(object):
"""
Class to map projection names from the CLI to entity labels, attributes and formatters.
The command line interface will often have to display database entities and their attributes. The names of
the attributes exposed on the CLI do not always match one-to-one with the attributes in the ORM and often
they need to be formatted for the screen in some way. Additionally, for commands that display lists of entries,
often a header needs to be printed with a label for each attribute, which also are not necessarily identical.
For any given entity, the CLI typically exposes a set of projections, which are the keywords to reference certain
attributes. This mapper class serves to map these projections onto the corresponding label and attribute names, as
well as formatter functions to format the attribute values into strings, suitable to be printed by the CLI.
"""
_valid_projections = []
def __init__(self, projection_labels=None, projection_attributes=None, projection_formatters=None):
# pylint: disable=unused-variable,undefined-variable
if not self._valid_projections:
raise NotImplementedError('no valid projections were specified by the sub class')
self._projection_labels = {}
self._projection_attributes = {}
self._projection_formatters = {}
if projection_labels is not None:
for projection in self._valid_projections:
try:
self._projection_labels[projection] = projection_labels[projection]
except KeyError:
self._projection_labels[projection] = projection.replace('_', ' ').capitalize()
if projection_attributes is not None:
for projection in self._valid_projections:
try:
self._projection_attributes[projection] = projection_attributes[projection]
except KeyError:
self._projection_attributes[projection] = projection
if projection_formatters is not None:
for projection in self._valid_projections:
try:
self._projection_formatters[projection] = projection_formatters[projection]
except KeyError:
attribute = self._projection_attributes[projection]
self._projection_formatters[projection] = lambda value, attribute=attribute: value[attribute]
@property
def valid_projections(self):
return self._valid_projections
def get_label(self, projection):
return self._projection_labels[projection]
def get_attribute(self, projection):
return self._projection_attributes[projection]
def get_formatter(self, projection):
return self._projection_formatters[projection]
def format(self, projection, value):
return self.get_formatter(projection)(value)
class CalculationProjectionMapper(ProjectionMapper):
"""The CLI projection mapper for Calculation derived entities."""
def __init__(self, projections, projection_labels=None, projection_attributes=None, projection_formatters=None):
# pylint: disable=too-many-locals
from aiida.orm.calculation import Calculation
from aiida.orm.mixins import Sealable
self._valid_projections = projections
sealed_key = 'attributes.{}'.format(Sealable.SEALED_KEY)
process_paused_key = 'attributes.{}'.format(Calculation.PROCESS_PAUSED_KEY)
process_label_key = 'attributes.{}'.format(Calculation.PROCESS_LABEL_KEY)
process_state_key = 'attributes.{}'.format(Calculation.PROCESS_STATE_KEY)
process_status_key = 'attributes.{}'.format(Calculation.PROCESS_STATUS_KEY)
exit_status_key = 'attributes.{}'.format(Calculation.EXIT_STATUS_KEY)
default_labels = {
'pk': 'PK',
'uuid': 'UUID',
'ctime': 'Created',
'mtime': 'Modified',
}
default_attributes = {
'pk': 'id',
'sealed': sealed_key,
'paused': process_paused_key,
'process_label': process_label_key,
'process_state': process_state_key,
'process_status': process_status_key,
'exit_status': exit_status_key,
}
# pylint: disable=line-too-long
default_formatters = {
'ctime':
lambda value: formatting.format_relative_time(value['ctime']),
'mtime':
lambda value: formatting.format_relative_time(value['mtime']),
'state':
lambda value: formatting.format_state(value[process_state_key], value[process_paused_key], value[exit_status_key]),
'process_state':
lambda value: formatting.format_process_state(value[process_state_key]),
'sealed':
lambda value: formatting.format_sealed(value[sealed_key]),
}
if projection_labels is not None:
for projection, label in projection_labels.items():
if projection not in self.valid_projections:
raise ValueError('{} is not a valid projection'.format(projection))
else:
default_labels[projection] = label
if projection_attributes is not None:
for projection, attribute in projection_attributes.items():
if projection not in self.valid_projections:
raise ValueError('{} is not a valid projection'.format(projection))
else:
default_attributes[projection] = attribute
if projection_formatters is not None:
for projection, formatter in projection_formatters.items():
if projection not in self.valid_projections:
raise ValueError('{} is not a valid projection'.format(projection))
else:
default_formatters[projection] = formatter
super(CalculationProjectionMapper, self).__init__(default_labels, default_attributes, default_formatters)
```
#### File: common/orbital/__init__.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import math
from aiida.common.exceptions import ValidationError, MissingPluginError
from aiida.plugins.factory import BaseFactory
class Orbital(object):
"""
Base class for Orbitals. Can handle certain basic fields, their setting
and validation. More complex Orbital objects should then inherit from
this class
:param position: the absolute position (three floats) units in angstrom
:param x_orientation: x,y,z unit vector defining polar angle theta
in spherical coordinates unitless
:param z_orientation: x,y,z unit vector defining azimuthal angle phi
in spherical coordinates unitless
:param orientation_spin: x,y,z unit vector defining the spin orientation
unitless
:param diffusivity: Float controls the radial term in orbital equation
units are reciprocal Angstrom.
:param module_name: internal parameter, stores orbital type
"""
#NOTE x_orientation, z_orientation, spin_orientation, diffusivity might
#all need to be moved to RealHydrogenOrbital
_base_fields = ('position',
'x_orientation',
'z_orientation',
'spin_orientation',
'diffusivity',
'module_name', # Actually, this one is system reserved
)
def __init__(self):
self._orbital_dict = {}
def __repr__(self):
module_name = self.get_orbital_dict()['module_name']
return '<{}: {}>'.format(module_name, str(self))
def __str__(self):
raise NotImplementedError
def _validate_keys(self, input_dict):
"""
Checks all the input_dict and tries to validate them , to ensure
that they have been properly set raises Exceptions indicating any
problems that should arise during the validation
:param input_dict: a dictionary of inputs
:return: input_dict: the original dictionary with all validated kyes
now removed
:return: validated_dict: a dictionary containing all the input keys
which have now been validated.
"""
validated_dict = {}
for k in self._base_fields:
v = input_dict.pop(k, None)
if k == "module_name":
if v is None:
raise TypeError
try:
OrbitalFactory(v)
except (MissingPluginError, TypeError):
raise ValidationError("The module name {} was found to "
"be invalid".format(v))
if k == "position":
if v is None:
validated_dict.update({k: v})
continue
try:
v = list(float(i) for i in v)
if len(v) != 3:
raise ValueError
except (ValueError, TypeError):
raise ValueError("Wrong format for position, must be a"
" list of three float numbers.")
if "orientation" in k :
if v is None:
validated_dict.update({k: v})
continue
try:
v = list(float(i) for i in v)
if len(v) != 3:
raise ValueError
except (ValueError, TypeError):
raise ValueError("Wrong format for {}, must be a"
" list of three float numbers.")
# From a spherical cooridnate version of orientation
# try:
# v = tuple(float(i) for i in v)
# if len(v) != (2):
# raise ValueError
# if v[0] >= 2*math.pi or v[0] <= 0:
# raise ValueError
# if v[1] >= math.pi or v[1] <= 0:
# raise ValueError
# except(ValueError, TypeError):
# raise ValueError("Wrong format for {}, must be two tuples"
# " each having two floats theta, phi where"
# " 0<=theta<2pi and 0<=phi<=pi.".format(k))
if k == "diffusivity":
if v is None:
validated_dict.update({k: v})
continue
try:
v = float(v)
except ValueError:
raise ValidationError("Diffusivity must always be a float")
validated_dict.update({k: v})
return validated_dict
def set_orbital_dict(self, init_dict):
"""
Sets the orbital_dict, which can vary depending on the particular
implementation of this base class.
:param init_dict: the initialization dictionary
"""
if not isinstance(init_dict, dict):
raise Exception('You must supply a dict as an init')
# Adds the module_name in hard-coded manner
init_dict.update({"module_name": self._get_module_name()})
validated_dict = self._validate_keys(init_dict)
for k, v in validated_dict.items():
self._orbital_dict[k] = v
def get_orbital_dict(self):
"""
returns the internal keys as a dictionary
"""
output = {}
for k in self._default_fields:
try:
output[k] = self._orbital_dict[k]
except KeyError:
pass
return output
def _get_module_name(self):
"""
Sets the module name, or label, to the orbital
"""
return self.__module__.split('.')[-1]
def OrbitalFactory(entry_point):
"""
Return the Orbital plugin class for a given entry point
:param entry_point: the entry point name of the Orbital plugin
"""
return BaseFactory('aiida.common.orbital', entry_point)
```
#### File: aiida/common/test_folders.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import io
import unittest
class FoldersTest(unittest.TestCase):
"""
Tests for the Folder class.
"""
@classmethod
def test_unicode(cls):
"""
Check that there are no exceptions raised when
using unicode folders.
"""
from aiida.common.folders import Folder
import os
import tempfile
tmpsource = tempfile.mkdtemp()
tmpdest = tempfile.mkdtemp()
with io.open(os.path.join(tmpsource, "sąžininga"), 'w', encoding='utf8') as fhandle:
fhandle.write(u"test")
with io.open(os.path.join(tmpsource, "žąsis"), 'w', encoding='utf8') as fhandle:
fhandle.write(u"test")
folder = Folder(tmpdest)
folder.insert_path(tmpsource, "destination")
folder.insert_path(tmpsource, u"šaltinis")
folder = Folder(os.path.join(tmpsource, u"šaltinis"))
folder.insert_path(tmpsource, "destination")
folder.insert_path(tmpdest, u"kitas-šaltinis")
def test_get_abs_path_without_limit(self):
"""
Check that the absolute path function can get an absolute path
"""
from aiida.common.folders import Folder
folder = Folder('/tmp')
# Should not raise any exception
self.assertEqual(folder.get_abs_path('test_file.txt'), '/tmp/test_file.txt')
```
#### File: aiida/daemon/workflowmanager.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from aiida.common import aiidalogger
from aiida.common.datastructures import wf_states, wf_exit_call, wf_default_call
logger = aiidalogger.getChild('workflowmanager')
def execute_steps():
"""
This method loops on the RUNNING workflows and handled the execution of the
steps until each workflow reaches an end (or gets stopped for errors).
In the loop for each RUNNING workflow the method loops also in each of its
RUNNING steps, testing if all the calculation and subworkflows attached to
the step are FINISHED. In this case the step is set as FINISHED and the
workflow is advanced to the step's next method present in the db with
``advance_workflow``, otherwise if any step's JobCalculation is found in
NEW state the method will submit. If none of the previous conditions apply
the step is flagged as ERROR and cannot proceed anymore, blocking the future
execution of the step and, connected, the workflow.
Finally, for each workflow the method tests if there are INITIALIZED steps
to be launched, and in case reloads the workflow and execute the specific
those steps. In case or error the step is flagged in ERROR state and the
stack is reported in the workflow report.
"""
from aiida.orm import JobCalculation
from aiida.orm.implementation import get_all_running_steps
logger.debug("Querying the worflow DB")
running_steps = get_all_running_steps()
for s in running_steps:
if s.parent.state == wf_states.FINISHED:
s.set_state(wf_states.FINISHED)
continue
w = s.parent.get_aiida_class()
logger.info("[{0}] Found active step: {1}".format(w.pk, s.name))
s_calcs_new = [c.pk for c in s.get_calculations() if c._is_new()]
s_calcs_finished = [c.pk for c in s.get_calculations() if c.is_finished_ok]
s_calcs_failed = [c.pk for c in s.get_calculations() if c.is_failed]
s_calcs_num = len(s.get_calculations())
s_sub_wf_finished = [sw.pk for sw in s.get_sub_workflows() if sw.has_finished_ok()]
s_sub_wf_failed = [sw.pk for sw in s.get_sub_workflows() if sw.has_failed()]
s_sub_wf_num = len(s.get_sub_workflows())
if (s_calcs_num == (len(s_calcs_finished) + len(s_calcs_failed)) and
s_sub_wf_num == (len(s_sub_wf_finished) + len(s_sub_wf_failed))):
logger.info("[{0}] Step: {1} ready to move".format(w.pk, s.name))
s.set_state(wf_states.FINISHED)
advance_workflow(w, s)
elif len(s_calcs_new) > 0:
for pk in s_calcs_new:
obj_calc = JobCalculation.get_subclass_from_pk(pk=pk)
try:
obj_calc.submit()
logger.info("[{0}] Step: {1} launched calculation {2}".format(w.pk, s.name, pk))
except:
logger.error("[{0}] Step: {1} cannot launch calculation {2}".format(w.pk, s.name, pk))
def advance_workflow(w, step):
"""
The method tries to advance a step running its next method and handling
possible errors.
If the method to advance is the Workflow ``exit`` method and there are no
more steps RUNNING or in ERROR state then the workflow is set to FINISHED,
otherwise an error is added to the report and the Workflow is flagged as
ERROR.
If the method is the ``wf_default_call`` this means the step had no next,
and possibly is part of a branching. In this case the Workflow is not
advanced but the method returns True to let the other steps kick in.
Finally the methos tries to load the Workflow and execute the selected step,
reporting the errors and the stack trace in the report in case of problems.
Is no errors are reported the method returns True, in all the other cases
the Workflow is set to ERROR state and the method returns False.
:param w: Workflow object to advance
:param step: DbWorkflowStep to execute
:return: True if the step has been executed, False otherwise
"""
if step.nextcall == wf_exit_call:
logger.info("[{0}] Step: {1} has an exit call".format(w.pk, step.name))
if len(w.get_steps(wf_states.RUNNING)) == 0 and len(w.get_steps(wf_states.ERROR)) == 0:
logger.info("[{0}] Step: {1} is really finished, going out".format(w.pk, step.name))
w.set_state(wf_states.FINISHED)
return True
else:
logger.error("[{0}] Step: {1} is NOT finished, stopping workflow "
"with error".format(w.pk, step.name))
w.append_to_report("""Step: {0} is NOT finished, some calculations or workflows
are still running and there is a next call, stopping workflow with error""".format(step.name))
w.set_state(wf_states.ERROR)
return False
elif step.nextcall == wf_default_call:
logger.info("[{0}] Step: {1} is not finished and has no next call, waiting "
"for other methods to kick.".format(w.pk, step.name))
w.append_to_report("Step: {0} is not finished and has no "
"next call, waiting for other methods "
"to kick.".format(step.name))
return True
elif not step.nextcall is None:
logger.info("[{0}] In advance_workflow the step {1} goes to nextcall {2}"
"".format(w.pk, step.name, step.nextcall))
try:
#w = Workflow.get_subclass_from_pk(w_superclass.pk)
getattr(w, step.nextcall)()
return True
except Exception:
import traceback
w.append_to_report("ERROR ! This workflow got an error in the {0} "
"method, we report down the stack trace".format(
step.nextcall))
w.append_to_report("full traceback: {0}".format(traceback.format_exc()))
w.get_step(step.nextcall).set_state(wf_states.ERROR)
w.set_state(wf_states.ERROR)
return False
else:
logger.error("[{0}] Step: {1} ERROR, no nextcall".format(w.pk,
step.name))
w.append_to_report("Step: {0} ERROR, no nextcall".format(step.name))
w.set_state(wf_states.ERROR)
return False
```
#### File: orm/data/structure.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import itertools
import copy
from functools import reduce
import six
from six.moves import range, zip
from aiida.orm import Data
from aiida.common.exceptions import UnsupportedSpeciesError
from aiida.common.utils import classproperty, xyz_parser_iterator
from aiida.orm.calculation.inline import optional_inline
# Threshold used to check if the mass of two different Site objects is the same.
_mass_threshold = 1.e-3
# Threshold to check if the sum is one or not
_sum_threshold = 1.e-6
# Threshold used to check if the cell volume is not zero.
_volume_threshold = 1.e-6
# Element table
from aiida.common.constants import elements
_valid_symbols = tuple(i['symbol'] for i in elements.values())
_atomic_masses = {el['symbol']: el['mass'] for el in elements.values()}
_atomic_numbers = {data['symbol']: num for num, data in elements.items()}
def _get_valid_cell(inputcell):
"""
Return the cell in a valid format from a generic input.
:raise ValueError: whenever the format is not valid.
"""
try:
the_cell = list(list(float(c) for c in i) for i in inputcell)
if len(the_cell) != 3:
raise ValueError
if any(len(i) != 3 for i in the_cell):
raise ValueError
except (IndexError, ValueError, TypeError):
raise ValueError("Cell must be a list of three vectors, each "
"defined as a list of three coordinates.")
if abs(calc_cell_volume(the_cell)) < _volume_threshold:
raise ValueError("The cell volume is zero. Invalid cell.")
return the_cell
def get_valid_pbc(inputpbc):
"""
Return a list of three booleans for the periodic boundary conditions,
in a valid format from a generic input.
:raise ValueError: if the format is not valid.
"""
if isinstance(inputpbc, bool):
the_pbc = (inputpbc, inputpbc, inputpbc)
elif (hasattr(inputpbc, '__iter__')):
# To manage numpy lists of bools, whose elements are of type numpy.bool_
# and for which isinstance(i,bool) return False...
if hasattr(inputpbc, 'tolist'):
the_value = inputpbc.tolist()
else:
the_value = inputpbc
if all(isinstance(i, bool) for i in the_value):
if len(the_value) == 3:
the_pbc = tuple(i for i in the_value)
elif len(the_value) == 1:
the_pbc = (the_value[0], the_value[0], the_value[0])
else:
raise ValueError("pbc length must be either one or three.")
else:
raise ValueError("pbc elements are not booleans.")
else:
raise ValueError("pbc must be a boolean or a list of three "
"booleans.", inputpbc)
return the_pbc
def has_ase():
"""
:return: True if the ase module can be imported, False otherwise.
"""
try:
import ase
except ImportError:
return False
return True
def has_pymatgen():
"""
:return: True if the pymatgen module can be imported, False otherwise.
"""
try:
import pymatgen
except ImportError:
return False
return True
def get_pymatgen_version():
"""
:return: string with pymatgen version, None if can not import.
"""
if not has_pymatgen():
return None
import pymatgen
return pymatgen.__version__
def has_spglib():
"""
:return: True if the spglib module can be imported, False otherwise.
"""
try:
import spglib
except ImportError:
return False
return True
def calc_cell_volume(cell):
"""
Calculates the volume of a cell given the three lattice vectors.
It is calculated as cell[0] . (cell[1] x cell[2]), where . represents
a dot product and x a cross product.
:param cell: the cell vectors; the must be a 3x3 list of lists of floats,
no other checks are done.
:returns: the cell volume.
"""
# returns the volume of the primitive cell: |a1.(a2xa3)|
a1 = cell[0]
a2 = cell[1]
a3 = cell[2]
a_mid_0 = a2[1] * a3[2] - a2[2] * a3[1]
a_mid_1 = a2[2] * a3[0] - a2[0] * a3[2]
a_mid_2 = a2[0] * a3[1] - a2[1] * a3[0]
return abs(a1[0] * a_mid_0 + a1[1] * a_mid_1 + a1[2] * a_mid_2)
def _create_symbols_tuple(symbols):
"""
Returns a tuple with the symbols provided. If a string is provided,
this is converted to a tuple with one single element.
"""
if isinstance(symbols, six.string_types):
symbols_list = (symbols,)
else:
symbols_list = tuple(symbols)
return symbols_list
def _create_weights_tuple(weights):
"""
Returns a tuple with the weights provided. If a number is provided,
this is converted to a tuple with one single element.
If None is provided, this is converted to the tuple (1.,)
"""
import numbers
if weights is None:
weights_tuple = (1.,)
elif isinstance(weights, numbers.Number):
weights_tuple = (weights,)
else:
weights_tuple = tuple(float(i) for i in weights)
return weights_tuple
def create_automatic_kind_name(symbols, weights):
"""
Create a string obtained with the symbols appended one
after the other, without spaces, in alphabetical order;
if the site has a vacancy, a X is appended at the end too.
"""
sorted_symbol_list = list(set(symbols))
sorted_symbol_list.sort() # In-place sort
name_string = "".join(sorted_symbol_list)
if has_vacancies(weights):
name_string += "X"
return name_string
def validate_weights_tuple(weights_tuple, threshold):
"""
Validates the weight of the atomic kinds.
:raise: ValueError if the weights_tuple is not valid.
:param weights_tuple: the tuple to validate. It must be a
a tuple of floats (as created by :func:_create_weights_tuple).
:param threshold: a float number used as a threshold to check that the sum
of the weights is <= 1.
If the sum is less than one, it means that there are vacancies.
Each element of the list must be >= 0, and the sum must be <= 1.
"""
w_sum = sum(weights_tuple)
if (any(i < 0. for i in weights_tuple) or
(w_sum - 1. > threshold)):
raise ValueError("The weight list is not valid (each element "
"must be positive, and the sum must be <= 1).")
def is_valid_symbol(symbol):
"""
Validates the chemical symbol name.
:return: True if the symbol is a valid chemical symbol (with correct
capitalization), or the dummy X, False otherwise.
Recognized symbols are for elements from hydrogen (Z=1) to lawrencium
(Z=103). In addition, a dummy element unknown name (Z=0) is supported.
"""
return symbol in _valid_symbols
def validate_symbols_tuple(symbols_tuple):
"""
Used to validate whether the chemical species are valid.
:param symbols_tuple: a tuple (or list) with the chemical symbols name.
:raises: UnsupportedSpeciesError if any symbol in the tuple is not a valid chemical
symbol (with correct capitalization).
Refer also to the documentation of :func:is_valid_symbol
"""
if len(symbols_tuple) == 0:
valid = False
else:
valid = all(is_valid_symbol(sym) for sym in symbols_tuple)
if not valid:
raise UnsupportedSpeciesError("At least one element of the symbol list {} has "
"not been recognized.".format(symbols_tuple))
def is_ase_atoms(ase_atoms):
"""
Check if the ase_atoms parameter is actually a ase.Atoms object.
:param ase_atoms: an object, expected to be an ase.Atoms.
:return: a boolean.
Requires the ability to import ase, by doing 'import ase'.
"""
# TODO: Check if we want to try to import ase and do something
# reasonable depending on whether ase is there or not.
import ase
return isinstance(ase_atoms, ase.Atoms)
def group_symbols(_list):
"""
Group a list of symbols to a list containing the number of consecutive
identical symbols, and the symbol itself.
Examples:
* ``['Ba','Ti','O','O','O','Ba']`` will return
``[[1,'Ba'],[1,'Ti'],[3,'O'],[1,'Ba']]``
* ``[ [ [1,'Ba'],[1,'Ti'] ],[ [1,'Ba'],[1,'Ti'] ] ]`` will return
``[[2, [ [1, 'Ba'], [1, 'Ti'] ] ]]``
:param _list: a list of elements representing a chemical formula
:return: a list of length-2 lists of the form [ multiplicity , element ]
"""
the_list = copy.deepcopy(_list)
the_list.reverse()
grouped_list = [[1, the_list.pop()]]
while the_list:
elem = the_list.pop()
if elem == grouped_list[-1][1]:
# same symbol is repeated
grouped_list[-1][0] += 1
else:
grouped_list.append([1, elem])
return grouped_list
def get_formula_from_symbol_list(_list, separator=""):
"""
Return a string with the formula obtained from the list of symbols.
Examples:
* ``[[1,'Ba'],[1,'Ti'],[3,'O']]`` will return ``'BaTiO3'``
* ``[[2, [ [1, 'Ba'], [1, 'Ti'] ] ]]`` will return ``'(BaTi)2'``
:param _list: a list of symbols and multiplicities as obtained from
the function group_symbols
:param separator: a string used to concatenate symbols. Default empty.
:return: a string
"""
list_str = []
for elem in _list:
if elem[0] == 1:
multiplicity_str = ''
else:
multiplicity_str = str(elem[0])
if isinstance(elem[1], six.string_types):
list_str.append("{}{}".format(elem[1], multiplicity_str))
elif elem[0] > 1:
list_str.append(
"({}){}".format(get_formula_from_symbol_list(elem[1],
separator=separator),
multiplicity_str))
else:
list_str.append("{}{}".format(get_formula_from_symbol_list(elem[1],
separator=separator),
multiplicity_str))
return separator.join(list_str)
def get_formula_group(symbol_list, separator=""):
"""
Return a string with the chemical formula from a list of chemical symbols.
The formula is written in a compact" way, i.e. trying to group as much as
possible parts of the formula.
.. note:: it works for instance very well if structure was obtained
from an ASE supercell.
Example of result:
``['Ba', 'Ti', 'O', 'O', 'O', 'Ba', 'Ti', 'O', 'O', 'O',
'Ba', 'Ti', 'Ti', 'O', 'O', 'O']`` will return ``'(BaTiO3)2BaTi2O3'``.
:param symbol_list: list of symbols
(e.g. ['Ba','Ti','O','O','O'])
:param separator: a string used to concatenate symbols. Default empty.
:returns: a string with the chemical formula for the given structure.
"""
def group_together(_list, group_size, offset):
"""
:param _list: a list
:param group_size: size of the groups
:param offset: beginning grouping after offset elements
:return : a list of lists made of groups of size group_size
obtained by grouping list elements together
The first elements (up to _list[offset-1]) are not grouped
example:
``group_together(['O','Ba','Ti','Ba','Ti'],2,1) =
['O',['Ba','Ti'],['Ba','Ti']]``
"""
the_list = copy.deepcopy(_list)
the_list.reverse()
grouped_list = []
for i in range(offset):
grouped_list.append([the_list.pop()])
while the_list:
l = []
for i in range(group_size):
if the_list:
l.append(the_list.pop())
grouped_list.append(l)
return grouped_list
def cleanout_symbol_list(_list):
"""
:param _list: a list of groups of symbols and multiplicities
:return : a list where all groups with multiplicity 1 have
been reduced to minimum
example: ``[[1,[[1,'Ba']]]]`` will return ``[[1,'Ba']]``
"""
the_list = []
for elem in _list:
if elem[0] == 1 and isinstance(elem[1], list):
the_list.extend(elem[1])
else:
the_list.append(elem)
return the_list
def group_together_symbols(_list, group_size):
"""
Successive application of group_together, group_symbols and
cleanout_symbol_list, in order to group a symbol list, scanning all
possible offsets, for a given group size
:param _list: the symbol list (see function group_symbols)
:param group_size: the size of the groups
:return the_symbol_list: the new grouped symbol list
:return has_grouped: True if we grouped something
"""
the_symbol_list = copy.deepcopy(_list)
has_grouped = False
offset = 0
while (not has_grouped) and (offset < group_size):
grouped_list = group_together(the_symbol_list, group_size, offset)
new_symbol_list = group_symbols(grouped_list)
if (len(new_symbol_list) < len(grouped_list)):
the_symbol_list = copy.deepcopy(new_symbol_list)
the_symbol_list = cleanout_symbol_list(the_symbol_list)
has_grouped = True
# print get_formula_from_symbol_list(the_symbol_list)
offset += 1
return the_symbol_list, has_grouped
def group_all_together_symbols(_list):
"""
Successive application of the function group_together_symbols, to group
a symbol list, scanning all possible offsets and group sizes
:param _list: the symbol list (see function group_symbols)
:return: the new grouped symbol list
"""
has_finished = False
group_size = 2
n = len(_list)
the_symbol_list = copy.deepcopy(_list)
while (not has_finished) and (group_size <= n // 2):
# try to group as much as possible by groups of size group_size
the_symbol_list, has_grouped = group_together_symbols(
the_symbol_list,
group_size)
has_finished = has_grouped
group_size += 1
# stop as soon as we managed to group something
# or when the group_size is too big to get anything
return the_symbol_list
# initial grouping of the chemical symbols
old_symbol_list = [-1]
new_symbol_list = group_symbols(symbol_list)
# successively apply the grouping procedure until the symbol list does not
# change anymore
while new_symbol_list != old_symbol_list:
old_symbol_list = copy.deepcopy(new_symbol_list)
new_symbol_list = group_all_together_symbols(old_symbol_list)
return get_formula_from_symbol_list(new_symbol_list, separator=separator)
def get_formula(symbol_list, mode='hill', separator=""):
"""
Return a string with the chemical formula.
:param symbol_list: a list of symbols, e.g. ``['H','H','O']``
:param mode: a string to specify how to generate the formula, can
assume one of the following values:
* 'hill' (default): count the number of atoms of each species,
then use Hill notation, i.e. alphabetical order with C and H
first if one or several C atom(s) is (are) present, e.g.
``['C','H','H','H','O','C','H','H','H']`` will return ``'C2H6O'``
``['S','O','O','H','O','H','O']`` will return ``'H2O4S'``
From <NAME>, <NAME>. Soc., 22 (8), pp 478–494 (1900)
* 'hill_compact': same as hill but the number of atoms for each
species is divided by the greatest common divisor of all of them, e.g.
``['C','H','H','H','O','C','H','H','H','O','O','O']``
will return ``'CH3O2'``
* 'reduce': group repeated symbols e.g.
``['Ba', 'Ti', 'O', 'O', 'O', 'Ba', 'Ti', 'O', 'O', 'O',
'Ba', 'Ti', 'Ti', 'O', 'O', 'O']`` will return ``'BaTiO3BaTiO3BaTi2O3'``
* 'group': will try to group as much as possible parts of the formula
e.g.
``['Ba', 'Ti', 'O', 'O', 'O', 'Ba', 'Ti', 'O', 'O', 'O',
'Ba', 'Ti', 'Ti', 'O', 'O', 'O']`` will return ``'(BaTiO3)2BaTi2O3'``
* 'count': same as hill (i.e. one just counts the number
of atoms of each species) without the re-ordering (take the
order of the atomic sites), e.g.
``['Ba', 'Ti', 'O', 'O', 'O','Ba', 'Ti', 'O', 'O', 'O']``
will return ``'Ba2Ti2O6'``
* 'count_compact': same as count but the number of atoms
for each species is divided by the greatest common divisor of
all of them, e.g.
``['Ba', 'Ti', 'O', 'O', 'O','Ba', 'Ti', 'O', 'O', 'O']``
will return ``'BaTiO3'``
:param separator: a string used to concatenate symbols. Default empty.
:return: a string with the formula
.. note:: in modes reduce, group, count and count_compact, the
initial order in which the atoms were appended by the user is
used to group and/or order the symbols in the formula
"""
if mode == 'group':
return get_formula_group(symbol_list, separator=separator)
# for hill and count cases, simply count the occurences of each
# chemical symbol (with some re-ordering in hill)
elif mode in ['hill', 'hill_compact']:
symbol_set = set(symbol_list)
first_symbols = []
if 'C' in symbol_set:
# remove C (and H if present) from list and put them at the
# beginning
symbol_set.remove('C')
first_symbols.append('C')
if 'H' in symbol_set:
symbol_set.remove('H')
first_symbols.append('H')
ordered_symbol_set = first_symbols + list(sorted(symbol_set))
the_symbol_list = [[symbol_list.count(elem), elem]
for elem in ordered_symbol_set]
elif mode in ['count', 'count_compact']:
ordered_symbol_indexes = sorted([symbol_list.index(elem)
for elem in set(symbol_list)])
ordered_symbol_set = [symbol_list[i] for i in ordered_symbol_indexes]
the_symbol_list = [[symbol_list.count(elem), elem]
for elem in ordered_symbol_set]
elif mode == 'reduce':
the_symbol_list = group_symbols(symbol_list)
else:
raise ValueError('Mode should be hill, hill_compact, group, '
'reduce, count or count_compact')
if mode in ['hill_compact', 'count_compact']:
from fractions import gcd
the_gcd = reduce(gcd,[e[0] for e in the_symbol_list])
the_symbol_list = [[e[0]//the_gcd,e[1]] for e in the_symbol_list]
return get_formula_from_symbol_list(the_symbol_list, separator=separator)
def get_symbols_string(symbols, weights):
"""
Return a string that tries to match as good as possible the symbols
and weights. If there is only one symbol (no alloy) with 100%
occupancy, just returns the symbol name. Otherwise, groups the full
string in curly brackets, and try to write also the composition
(with 2 precision only).
If (sum of weights<1), we indicate it with the X symbol followed
by 1-sum(weights) (still with 2 digits precision, so it can be 0.00)
:param symbols: the symbols as obtained from <kind>._symbols
:param weights: the weights as obtained from <kind>._weights
.. note:: Note the difference with respect to the symbols and the
symbol properties!
"""
if len(symbols) == 1 and weights[0] == 1.:
return symbols[0]
else:
pieces = []
for s, w in zip(symbols, weights):
pieces.append("{}{:4.2f}".format(s, w))
if has_vacancies(weights):
pieces.append('X{:4.2f}'.format(1. - sum(weights)))
return "{{{}}}".format("".join(sorted(pieces)))
def has_vacancies(weights):
"""
Returns True if the sum of the weights is less than one.
It uses the internal variable _sum_threshold as a threshold.
:param weights: the weights
:return: a boolean
"""
w_sum = sum(weights)
return not (1. - w_sum < _sum_threshold)
def symop_ortho_from_fract(cell):
"""
Creates a matrix for conversion from orthogonal to fractional
coordinates.
Taken from
svn://www.crystallography.net/cod-tools/trunk/lib/perl5/Fractional.pm,
revision 850.
:param cell: array of cell parameters (three lengths and three angles)
"""
import math
import numpy
a, b, c, alpha, beta, gamma = cell
alpha, beta, gamma = [math.pi * x / 180 for x in [alpha, beta, gamma]]
ca, cb, cg = [math.cos(x) for x in [alpha, beta, gamma]]
sg = math.sin(gamma)
return numpy.array([
[a, b * cg, c * cb],
[0, b * sg, c * (ca - cb * cg) / sg],
[0, 0,
c * math.sqrt(sg * sg - ca * ca - cb * cb + 2 * ca * cb * cg) / sg]
])
def symop_fract_from_ortho(cell):
"""
Creates a matrix for conversion from fractional to orthogonal
coordinates.
Taken from
svn://www.crystallography.net/cod-tools/trunk/lib/perl5/Fractional.pm,
revision 850.
:param cell: array of cell parameters (three lengths and three angles)
"""
import math
import numpy
a, b, c, alpha, beta, gamma = cell
alpha, beta, gamma = [math.pi * x / 180 for x in [alpha, beta, gamma]]
ca, cb, cg = [math.cos(x) for x in [alpha, beta, gamma]]
sg = math.sin(gamma)
ctg = cg / sg
D = math.sqrt(sg * sg - cb * cb - ca * ca + 2 * ca * cb * cg)
return numpy.array([
[1.0 / a, -(1.0 / a) * ctg, (ca * cg - cb) / (a * D)],
[0, 1.0 / (b * sg), -(ca - cb * cg) / (b * D * sg)],
[0, 0, sg / (c * D)],
])
def ase_refine_cell(aseatoms, **kwargs):
"""
Detect the symmetry of the structure, remove symmetric atoms and
refine unit cell.
:param aseatoms: an ase.atoms.Atoms instance
:param symprec: symmetry precision, used by spglib
:return newase: refined cell with reduced set of atoms
:return symmetry: a dictionary describing the symmetry space group
"""
from spglib import refine_cell, get_symmetry_dataset
from ase.atoms import Atoms
cell, positions, numbers = refine_cell(aseatoms, **kwargs)
refined_atoms = Atoms(numbers, scaled_positions=positions, cell=cell,
pbc=True)
sym_dataset = get_symmetry_dataset(refined_atoms, **kwargs)
unique_numbers = []
unique_positions = []
for i in set(sym_dataset['equivalent_atoms']):
unique_numbers.append(refined_atoms.numbers[i])
unique_positions.append(refined_atoms.get_scaled_positions()[i])
unique_atoms = Atoms(unique_numbers,
scaled_positions=unique_positions,
cell=cell, pbc=True)
return unique_atoms, {'hm': sym_dataset['international'],
'hall': sym_dataset['hall'],
'tables': sym_dataset['number'],
'rotations': sym_dataset['rotations'],
'translations': sym_dataset['translations']}
@optional_inline
def _get_cif_ase_inline(struct, parameters):
"""
Creates :py:class:`aiida.orm.data.cif.CifData` using ASE.
.. note:: requires ASE module.
"""
from aiida.orm.data.cif import CifData
kwargs = {}
if parameters is not None:
kwargs = parameters.get_dict()
cif = CifData(ase=struct.get_ase(**kwargs))
formula = struct.get_formula(mode='hill', separator=' ')
for i in cif.values.keys():
cif.values[i]['_symmetry_space_group_name_H-M'] = 'P 1'
cif.values[i]['_symmetry_space_group_name_Hall'] = 'P 1'
cif.values[i]['_symmetry_Int_Tables_number'] = 1
cif.values[i]['_cell_formula_units_Z'] = 1
cif.values[i]['_chemical_formula_sum'] = formula
return {'cif': cif}
def atom_kinds_to_html(atom_kind):
"""
Construct in html format
an alloy with 0.5 Ge, 0.4 Si and 0.1 vacancy is represented as
Ge<sub>0.5</sub> + Si<sub>0.4</sub> + vacancy<sub>0.1</sub>
Args:
atom_kind: a string with the name of the atomic kind, as printed by
kind.get_symbols_string(), e.g. Ba0.80Ca0.10X0.10
Returns:
html code for rendered formula
"""
# Parse the formula (TODO can be made more robust though never fails if
# it takes strings generated with kind.get_symbols_string())
import re
elements = re.findall(r'([A-Z][a-z]*)([0-1][.[0-9]*]?)?', atom_kind)
# Compose the html string
html_formula_pieces = []
for element in elements:
# replace element X by 'vacancy'
species = element[0] if element[0] != 'X' else 'vacancy'
weight = element[1] if element[1] != '' else None
if weight is not None:
html_formula_pieces.append(species + '<sub>' + weight +
'</sub>')
else:
html_formula_pieces.append(species)
html_formula = ' + '.join(html_formula_pieces)
return html_formula
class StructureData(Data):
"""
This class contains the information about a given structure, i.e. a
collection of sites together with a cell, the
boundary conditions (whether they are periodic or not) and other
related useful information.
"""
_set_incompatibilities = [("ase", "cell"), ("ase", "pbc"),
("ase", "pymatgen"), ("ase", "pymatgen_molecule"),
("ase", "pymatgen_structure"),
("cell", "pymatgen"),
("cell", "pymatgen_molecule"),
("cell", "pymatgen_structure"),
("pbc", "pymatgen"), ("pbc", "pymatgen_molecule"),
("pbc", "pymatgen_structure"),
("pymatgen", "pymatgen_molecule"),
("pymatgen", "pymatgen_structure"),
("pymatgen_molecule", "pymatgen_structure")]
_dimensionality_label = {
0: "",
1: "length",
2: "surface",
3: "volume"
}
@property
def _set_defaults(self):
parent_dict = super(StructureData, self)._set_defaults
parent_dict.update({
"pbc": [True, True, True],
"cell": [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]
})
return parent_dict
def get_dimensionality(self):
"""
This function checks the dimensionality of the structure and
calculates its length/surface/volume
:return: returns the dimensionality and length/surface/volume
"""
import numpy as np
retdict = {}
cell = np.array(self.cell)
pbc = np.array(self.pbc)
dim = len(pbc[pbc])
retdict['dim'] = dim
retdict['label'] = self._dimensionality_label[dim]
if dim == 0:
pass
elif dim == 1:
v = cell[pbc]
retdict['value'] = np.linalg.norm(v)
elif dim == 2:
vectors = cell[pbc]
retdict['value'] = np.linalg.norm(np.cross(vectors[0], vectors[1]))
elif dim == 3:
retdict['value'] = np.dot(cell[0], np.cross(cell[1], cell[2]))
else:
raise ValueError("Dimensionality {} must be <= 3".format(dim))
return retdict
def set_ase(self, aseatoms):
"""
Load the structure from a ASE object
"""
if is_ase_atoms(aseatoms):
# Read the ase structure
self.cell = aseatoms.cell
self.pbc = aseatoms.pbc
self.clear_kinds() # This also calls clear_sites
for atom in aseatoms:
self.append_atom(ase=atom)
else:
raise TypeError("The value is not an ase.Atoms object")
def set_pymatgen(self, obj, **kwargs):
"""
Load the structure from a pymatgen object.
.. note:: Requires the pymatgen module (version >= 3.0.13, usage
of earlier versions may cause errors).
"""
typestr = type(obj).__name__
try:
func = getattr(self, "set_pymatgen_{}".format(typestr.lower()))
except AttributeError:
raise AttributeError("Converter for '{}' to AiiDA structure "
"does not exist".format(typestr))
func(obj, **kwargs)
def set_pymatgen_molecule(self, mol, margin=5):
"""
Load the structure from a pymatgen Molecule object.
:param margin: the margin to be added in all directions of the
bounding box of the molecule.
.. note:: Requires the pymatgen module (version >= 3.0.13, usage
of earlier versions may cause errors).
"""
box = [max([x.coords.tolist()[0] for x in mol.sites]) -
min([x.coords.tolist()[0] for x in mol.sites]) + 2 * margin,
max([x.coords.tolist()[1] for x in mol.sites]) -
min([x.coords.tolist()[1] for x in mol.sites]) + 2 * margin,
max([x.coords.tolist()[2] for x in mol.sites]) -
min([x.coords.tolist()[2] for x in mol.sites]) + 2 * margin]
self.set_pymatgen_structure(mol.get_boxed_structure(*box))
self.pbc = [False, False, False]
def set_pymatgen_structure(self, struct):
"""
Load the structure from a pymatgen Structure object.
.. note:: periodic boundary conditions are set to True in all
three directions.
.. note:: Requires the pymatgen module (version >= 3.3.5, usage
of earlier versions may cause errors).
:raise ValueError: if there are partial occupancies together with spins.
"""
def build_kind_name(species_and_occu):
"""
Build a kind name from a pymatgen Composition, including an additional ordinal if spin is included,
e.g. it returns '<specie>1' for an atom with spin < 0 and '<specie>2' for an atom with spin > 0,
otherwise (no spin) it returns None
:param species_and_occu: a pymatgen species and occupations dictionary
:return: a string representing the kind name or None
"""
species = list(species_and_occu.keys())
occupations = list(species_and_occu.values())
has_spin = any(specie.as_dict().get('properties', {}).get('spin', 0) != 0 for specie in species)
has_partial_occupancies = (len(occupations) != 1 or occupations[0] != 1.0)
if has_partial_occupancies and has_spin:
raise ValueError('Cannot set partial occupancies and spins at the same time')
if has_spin:
symbols = [specie.symbol for specie in species]
kind_name = create_automatic_kind_name(symbols, occupations)
# If there is spin, we can only have a single specie, otherwise we would have raised above
specie = species[0]
spin = specie.as_dict().get('properties', {}).get('spin', 0)
if spin < 0:
kind_name += '1'
else:
kind_name += '2'
return kind_name
else:
return None
self.cell = struct.lattice.matrix.tolist()
self.pbc = [True, True, True]
self.clear_kinds()
for site in struct.sites:
if 'kind_name' in site.properties:
kind_name = site.properties['kind_name']
else:
kind_name = build_kind_name(site.species_and_occu)
inputs = {
'symbols': [x.symbol for x in site.species_and_occu.keys()],
'weights': [x for x in site.species_and_occu.values()],
'position': site.coords.tolist()
}
if kind_name is not None:
inputs['name'] = kind_name
self.append_atom(**inputs)
def _validate(self):
"""
Performs some standard validation tests.
"""
from aiida.common.exceptions import ValidationError
super(StructureData, self)._validate()
try:
_get_valid_cell(self.cell)
except ValueError as exc:
raise ValidationError("Invalid cell: {}".format(exc))
try:
get_valid_pbc(self.pbc)
except ValueError as exc:
raise ValidationError(
"Invalid periodic boundary conditions: {}".format(exc))
try:
# This will try to create the kinds objects
kinds = self.kinds
except ValueError as exc:
raise ValidationError(
"Unable to validate the kinds: {}".format(exc))
from collections import Counter
counts = Counter([k.name for k in kinds])
for c in counts:
if counts[c] != 1:
raise ValidationError("Kind with name '{}' appears {} times "
"instead of only one".format(
c, counts[c]))
try:
# This will try to create the sites objects
sites = self.sites
except ValueError as exc:
raise ValidationError(
"Unable to validate the sites: {}".format(exc))
for site in sites:
if site.kind_name not in [k.name for k in kinds]:
raise ValidationError(
"A site has kind {}, but no specie with that name exists"
"".format(site.kind_name))
kinds_without_sites = (
set(k.name for k in kinds) - set(s.kind_name for s in sites))
if kinds_without_sites:
raise ValidationError("The following kinds are defined, but there "
"are no sites with that kind: {}".format(
list(kinds_without_sites)))
def _prepare_xsf(self, main_file_name=""):
"""
Write the given structure to a string of format XSF (for XCrySDen).
"""
if self.is_alloy() or self.has_vacancies():
raise NotImplementedError("XSF for alloys or systems with "
"vacancies not implemented.")
sites = self.sites
return_string = "CRYSTAL\nPRIMVEC 1\n"
for cell_vector in self.cell:
return_string += " ".join(["%18.10f" % i for i in cell_vector])
return_string += "\n"
return_string += "PRIMCOORD 1\n"
return_string += "%d 1\n" % len(sites)
for site in sites:
# I checked above that it is not an alloy, therefore I take the
# first symbol
return_string += "%s " % _atomic_numbers[
self.get_kind(site.kind_name).symbols[0]]
return_string += "%18.10f %18.10f %18.10f\n" % tuple(site.position)
return return_string.encode('utf-8'), {}
def _prepare_cif(self, main_file_name=""):
"""
Write the given structure to a string of format CIF.
"""
from aiida.orm.data.cif import CifData
cif = CifData(ase=self.get_ase())
return cif._prepare_cif()
def _prepare_tcod(self, main_file_name="", **kwargs):
"""
Write the given structure to a string of format TCOD CIF.
"""
from aiida.tools.dbexporters.tcod import export_cif
return export_cif(self, **kwargs), {}
def _prepare_chemdoodle(self, main_file_name=""):
"""
Write the given structure to a string of format required by ChemDoodle.
"""
import numpy as np
from itertools import product
import aiida.utils.json as json
supercell_factors=[1, 1, 1]
# Get cell vectors and atomic position
lattice_vectors = np.array(self.get_attr('cell'))
base_sites = self.get_attr('sites')
start1 = -int(supercell_factors[0] / 2)
start2 = -int(supercell_factors[1] / 2)
start3 = -int(supercell_factors[2] / 2)
stop1 = start1 + supercell_factors[0]
stop2 = start2 + supercell_factors[1]
stop3 = start3 + supercell_factors[2]
grid1 = range(start1, stop1)
grid2 = range(start2, stop2)
grid3 = range(start3, stop3)
atoms_json = []
# Manual recenter of the structure
center = (lattice_vectors[0] + lattice_vectors[1] +
lattice_vectors[2]) / 2.
for ix, iy, iz in product(grid1, grid2, grid3):
for base_site in base_sites:
shift = (ix * lattice_vectors[0] + iy * lattice_vectors[1] + \
iz * lattice_vectors[2] - center).tolist()
kind_name = base_site['kind_name']
kind_string = self.get_kind(kind_name).get_symbols_string()
atoms_json.append(
{'l': kind_string,
'x': base_site['position'][0] + shift[0],
'y': base_site['position'][1] + shift[1],
'z': base_site['position'][2] + shift[2],
# 'atomic_elements_html': kind_string
'atomic_elements_html': atom_kinds_to_html(kind_string)
})
cell_json = {
"t": "UnitCell",
"i": "s0",
"o": (-center).tolist(),
"x": (lattice_vectors[0] - center).tolist(),
"y": (lattice_vectors[1] - center).tolist(),
"z": (lattice_vectors[2] - center).tolist(),
"xy": (lattice_vectors[0] + lattice_vectors[1]
- center).tolist(),
"xz": (lattice_vectors[0] + lattice_vectors[2]
- center).tolist(),
"yz": (lattice_vectors[1] + lattice_vectors[2]
- center).tolist(),
"xyz": (lattice_vectors[0] + lattice_vectors[1]
+ lattice_vectors[2] - center).tolist(),
}
return_dict = {"s": [cell_json],
"m": [{"a": atoms_json}],
"units": 'Å'
}
return json.dumps(return_dict), {}
def _prepare_xyz(self, main_file_name=""):
"""
Write the given structure to a string of format XYZ.
"""
if self.is_alloy() or self.has_vacancies():
raise NotImplementedError("XYZ for alloys or systems with "
"vacancies not implemented.")
sites = self.sites
cell = self.cell
return_list = ["{}".format(len(sites))]
return_list.append('Lattice="{} {} {} {} {} {} {} {} {}" pbc="{} {} {}"'.format(
cell[0][0], cell[0][1], cell[0][2],
cell[1][0], cell[1][1], cell[1][2],
cell[2][0], cell[2][1], cell[2][2],
self.pbc[0], self.pbc[1], self.pbc[2]
))
for site in sites:
# I checked above that it is not an alloy, therefore I take the
# first symbol
return_list.append("{:6s} {:18.10f} {:18.10f} {:18.10f}".format(
self.get_kind(site.kind_name).symbols[0],
site.position[0], site.position[1], site.position[2]))
return_string = "\n".join(return_list)
return return_string.encode('utf-8'), {}
def _parse_xyz(self, inputstring):
"""
Read the structure from a string of format XYZ.
"""
# idiom to get to the last block
atoms = None
for _, _, atoms in xyz_parser_iterator(inputstring):
pass
if atoms is None:
raise TypeError("The data does not contain any XYZ data")
self.clear_kinds()
self.pbc = (False, False, False)
for sym, position in atoms:
self.append_atom(symbols=sym, position=position)
def _adjust_default_cell(self, vacuum_factor=1.0, vacuum_addition=10.0,
pbc=(False, False, False)):
"""
If the structure was imported from an xyz file, it lacks a defined cell,
and the default cell is taken ([[1,0,0], [0,1,0], [0,0,1]]),
leading to an unphysical definition of the structure.
This method will adjust the cell
"""
import numpy as np
from ase.visualize import view
from aiida.common.utils import get_extremas_from_positions
# First, set PBC
# All the checks are done in get_valid_pbc called by set_pbc, no need to check anything here
self.set_pbc(pbc)
# Calculating the minimal cell:
positions = np.array([site.position for site in self.sites])
position_min, position_max = get_extremas_from_positions(positions)
# Translate the structure to the origin, such that the minimal values in each dimension
# amount to (0,0,0)
positions -= position_min
for index, site in enumerate(self.get_attr('sites')):
site['position'] = list(positions[index])
# The orthorhombic cell that (just) accomodates the whole structure is now given by the
# extremas of position in each dimension:
minimal_orthorhombic_cell_dimensions = np.array(
get_extremas_from_positions(positions)[1])
minimal_orthorhombic_cell_dimensions = np.dot(vacuum_factor,
minimal_orthorhombic_cell_dimensions)
minimal_orthorhombic_cell_dimensions += vacuum_addition
# Transform the vector (a, b, c ) to [[a,0,0], [0,b,0], [0,0,c]]
newcell = np.diag(minimal_orthorhombic_cell_dimensions)
self.set_cell(newcell.tolist())
def get_desc(self):
"""
Returns a string with infos retrieved from StructureData node's
properties
:param self: the StructureData node
:return: retsrt: the description string
"""
return self.get_formula(mode='hill_compact')
def get_symbols_set(self):
"""
Return a set containing the names of all elements involved in
this structure (i.e., for it joins the list of symbols for each
kind k in the structure).
:returns: a set of strings of element names.
"""
return set(itertools.chain.from_iterable(
kind.symbols for kind in self.kinds))
def get_formula(self, mode='hill', separator=""):
"""
Return a string with the chemical formula.
:param mode: a string to specify how to generate the formula, can
assume one of the following values:
* 'hill' (default): count the number of atoms of each species,
then use Hill notation, i.e. alphabetical order with C and H
first if one or several C atom(s) is (are) present, e.g.
``['C','H','H','H','O','C','H','H','H']`` will return ``'C2H6O'``
``['S','O','O','H','O','H','O']`` will return ``'H2O4S'``
From <NAME>, J. Am. Chem. Soc., 22 (8), pp 478–494 (1900)
* 'hill_compact': same as hill but the number of atoms for each
species is divided by the greatest common divisor of all of them, e.g.
``['C','H','H','H','O','C','H','H','H','O','O','O']``
will return ``'CH3O2'``
* 'reduce': group repeated symbols e.g.
``['Ba', 'Ti', 'O', 'O', 'O', 'Ba', 'Ti', 'O', 'O', 'O',
'Ba', 'Ti', 'Ti', 'O', 'O', 'O']`` will return ``'BaTiO3BaTiO3BaTi2O3'``
* 'group': will try to group as much as possible parts of the formula
e.g.
``['Ba', 'Ti', 'O', 'O', 'O', 'Ba', 'Ti', 'O', 'O', 'O',
'Ba', 'Ti', 'Ti', 'O', 'O', 'O']`` will return ``'(BaTiO3)2BaTi2O3'``
* 'count': same as hill (i.e. one just counts the number
of atoms of each species) without the re-ordering (take the
order of the atomic sites), e.g.
``['Ba', 'Ti', 'O', 'O', 'O','Ba', 'Ti', 'O', 'O', 'O']``
will return ``'Ba2Ti2O6'``
* 'count_compact': same as count but the number of atoms
for each species is divided by the greatest common divisor of
all of them, e.g.
``['Ba', 'Ti', 'O', 'O', 'O','Ba', 'Ti', 'O', 'O', 'O']``
will return ``'BaTiO3'``
:param separator: a string used to concatenate symbols. Default empty.
:return: a string with the formula
.. note:: in modes reduce, group, count and count_compact, the
initial order in which the atoms were appended by the user is
used to group and/or order the symbols in the formula
"""
symbol_list = [self.get_kind(s.kind_name).get_symbols_string()
for s in self.sites]
return get_formula(symbol_list, mode=mode, separator=separator)
def get_site_kindnames(self):
"""
Return a list with length equal to the number of sites of this structure,
where each element of the list is the kind name of the corresponding site.
.. note:: This is NOT necessarily a list of chemical symbols! Use
``[ self.get_kind(s.kind_name).get_symbols_string() for s in self.sites]``
for chemical symbols
:return: a list of strings
"""
return [this_site.kind_name for this_site in self.sites]
def get_composition(self):
"""
Returns the chemical composition of this structure as a dictionary,
where each key is the kind symbol (e.g. H, Li, Ba),
and each value is the number of occurences of that element in this
structure. For BaZrO3 it would return {'Ba':1, 'Zr':1, 'O':3}.
No reduction with smallest common divisor!
:returns: a dictionary with the composition
"""
symbols_list = [self.get_kind(s.kind_name).get_symbols_string()
for s in self.sites]
composition = {
symbol: symbols_list.count(symbol)
for symbol
in set(symbols_list)
}
return composition
def get_ase(self):
"""
Get the ASE object.
Requires to be able to import ase.
:return: an ASE object corresponding to this
:py:class:`StructureData <aiida.orm.data.structure.StructureData>`
object.
.. note:: If any site is an alloy or has vacancies, a ValueError
is raised (from the site.get_ase() routine).
"""
return self._get_object_ase()
def get_pymatgen(self,**kwargs):
"""
Get pymatgen object. Returns Structure for structures with
periodic boundary conditions (in three dimensions) and Molecule
otherwise.
:param add_spin: True to add the spins to the pymatgen structure.
Default is False (no spin added).
.. note:: The spins are set according to the following rule:
* if the kind name ends with 1 -> spin=+1
* if the kind name ends with 2 -> spin=-1
.. note:: Requires the pymatgen module (version >= 3.0.13, usage
of earlier versions may cause errors).
"""
return self._get_object_pymatgen(**kwargs)
def get_pymatgen_structure(self,**kwargs):
"""
Get the pymatgen Structure object.
:param add_spin: True to add the spins to the pymatgen structure.
Default is False (no spin added).
.. note:: The spins are set according to the following rule:
* if the kind name ends with 1 -> spin=+1
* if the kind name ends with 2 -> spin=-1
.. note:: Requires the pymatgen module (version >= 3.0.13, usage
of earlier versions may cause errors).
:return: a pymatgen Structure object corresponding to this
:py:class:`StructureData <aiida.orm.data.structure.StructureData>`
object.
:raise ValueError: if periodic boundary conditions do not hold
in at least one dimension of real space.
"""
return self._get_object_pymatgen_structure(**kwargs)
def get_pymatgen_molecule(self):
"""
Get the pymatgen Molecule object.
.. note:: Requires the pymatgen module (version >= 3.0.13, usage
of earlier versions may cause errors).
:return: a pymatgen Molecule object corresponding to this
:py:class:`StructureData <aiida.orm.data.structure.StructureData>`
object.
"""
return self._get_object_pymatgen_molecule()
def append_kind(self, kind):
"""
Append a kind to the
:py:class:`StructureData <aiida.orm.data.structure.StructureData>`.
It makes a copy of the kind.
:param kind: the site to append, must be a Kind object.
"""
from aiida.common.exceptions import ModificationNotAllowed
if self.is_stored:
raise ModificationNotAllowed(
"The StructureData object cannot be modified, "
"it has already been stored")
new_kind = Kind(kind=kind) # So we make a copy
if kind.name in [k.name for k in self.kinds]:
raise ValueError("A kind with the same name ({}) already exists."
"".format(kind.name))
# If here, no exceptions have been raised, so I add the site.
self._append_to_attr('kinds', new_kind.get_raw())
# Note, this is a dict (with integer keys) so it allows for empty
# spots!
if not hasattr(self, '_internal_kind_tags'):
self._internal_kind_tags = {}
self._internal_kind_tags[len(
self.get_attr('kinds')) - 1] = kind._internal_tag
def append_site(self, site):
"""
Append a site to the
:py:class:`StructureData <aiida.orm.data.structure.StructureData>`.
It makes a copy of the site.
:param site: the site to append. It must be a Site object.
"""
from aiida.common.exceptions import ModificationNotAllowed
if self.is_stored:
raise ModificationNotAllowed(
"The StructureData object cannot be modified, "
"it has already been stored")
new_site = Site(site=site) # So we make a copy
if site.kind_name not in [k.name for k in self.kinds]:
raise ValueError("No kind with name '{}', available kinds are: "
"{}".format(site.kind_name,
[k.name for k in self.kinds]))
# If here, no exceptions have been raised, so I add the site.
self._append_to_attr('sites', new_site.get_raw())
def append_atom(self, **kwargs):
"""
Append an atom to the Structure, taking care of creating the
corresponding kind.
:param ase: the ase Atom object from which we want to create a new atom
(if present, this must be the only parameter)
:param position: the position of the atom (three numbers in angstrom)
:param symbols: passed to the constructor of the Kind object.
:param weights: passed to the constructor of the Kind object.
:param name: passed to the constructor of the Kind object. See also the note below.
.. note :: Note on the 'name' parameter (that is, the name of the kind):
* if specified, no checks are done on existing species. Simply,
a new kind with that name is created. If there is a name
clash, a check is done: if the kinds are identical, no error
is issued; otherwise, an error is issued because you are trying
to store two different kinds with the same name.
* if not specified, the name is automatically generated. Before
adding the kind, a check is done. If other species with the
same properties already exist, no new kinds are created, but
the site is added to the existing (identical) kind.
(Actually, the first kind that is encountered).
Otherwise, the name is made unique first, by adding to the string
containing the list of chemical symbols a number starting from 1,
until an unique name is found
.. note :: checks of equality of species are done using
the :py:meth:`~aiida.orm.data.structure.Kind.compare_with` method.
"""
aseatom = kwargs.pop('ase', None)
if aseatom is not None:
if kwargs:
raise ValueError("If you pass 'ase' as a parameter to "
"append_atom, you cannot pass any further"
"parameter")
position = aseatom.position
kind = Kind(ase=aseatom)
else:
position = kwargs.pop('position', None)
if position is None:
raise ValueError("You have to specify the position of the "
"new atom")
# all remaining parameters
kind = Kind(**kwargs)
# I look for identical species only if the name is not specified
_kinds = self.kinds
if 'name' not in kwargs:
# If the kind is identical to an existing one, I use the existing
# one, otherwise I replace it
exists_already = False
for idx, existing_kind in enumerate(_kinds):
try:
existing_kind._internal_tag = self._internal_kind_tags[idx]
except KeyError:
# self._internal_kind_tags does not contain any info for
# the kind in position idx: I don't have to add anything
# then, and I continue
pass
if (kind.compare_with(existing_kind)[0]):
kind = existing_kind
exists_already = True
break
if not exists_already:
# There is not an identical kind.
# By default, the name of 'kind' just contains the elements.
# I then check that the name of 'kind' does not already exist,
# and if it exists I add a number (starting from 1) until I
# find a non-used name.
existing_names = [k.name for k in _kinds]
simplename = kind.name
counter = 1
while kind.name in existing_names:
kind.name = "{}{}".format(simplename, counter)
counter += 1
self.append_kind(kind)
else: # 'name' was specified
old_kind = None
for existing_kind in _kinds:
if existing_kind.name == kwargs['name']:
old_kind = existing_kind
break
if old_kind is None:
self.append_kind(kind)
else:
is_the_same, firstdiff = kind.compare_with(old_kind)
if is_the_same:
kind = old_kind
else:
raise ValueError("You are explicitly setting the name "
"of the kind to '{}', that already "
"exists, but the two kinds are different!"
" (first difference: {})".format(
kind.name, firstdiff))
site = Site(kind_name=kind.name, position=position)
self.append_site(site)
# def _set_site_type(self, new_site, reset_type_if_needed):
# """
# Check if the site can be added (i.e., if no other sites with the same type exist, or if
# they exist, then they are equal) and possibly sets its type.
#
# Args:
# new_site: the new site to check, must be a Site object.
# reset_type_if_needed: if False, an exception is raised if a site with same type but different
# properties (mass, symbols, weights, ...) is found.
# If True, and an atom with same type but different properties is found, all the sites
# already present in self.sites are checked to see if there is a site with the same properties.
# Then, the same type is set. Otherwise, a new type name is chosen adding a number to the site
# name such that the type is different from the existing ones.
# """
# from aiida.common.exceptions import ModificationNotAllowed
#
# if not self._to_be_stored:
# raise ModificationNotAllowed("The StructureData object cannot be modified, "
# "it has already been stored")
#
# type_list = self.get_types()
# if type_list:
# types, positions = zip(*type_list)
# else:
# types = []
# positions = []
#
# if new_site.type not in types:
# # There is no element with this type, OK to insert
# return
#
# # I get the index of the type, and the
# # first atom of this type (there should always be at least one!)
# type_idx = types.index(new_site.type)
# site_idx = positions[type_idx][0]
#
# # If it is of the same type, I am happy
# is_same_type, differences_str = new_site.compare_type(self.sites[site_idx])
# if is_same_type:
# return
#
# # If I am here, the type string is the same, but they are actually of different type!
#
# if not reset_type_if_needed:
# errstr = ("The site you are trying to insert is of type '{}'. However, another site already "
# "exists with same type, but with different properties! ({})".format(
# new_site.type, differences_str))
# raise ValueError(errstr)
#
# # I check if there is a atom of the same type
# for site in self.sites:
# is_same_type, _ = new_site.compare_type(site)
# if is_same_type:
# new_site.type = site.type
# return
#
# # If I am here, I didn't find any existing site which is of the same type
# existing_type_names = [the_type for the_type in types if the_type.startswith(new_site.type)]
#
# append_int = 1
# while True:
# new_typename = "{:s}{:d}".format(new_site.type, append_int)
# if new_typename not in existing_type_names:
# break
# append_int += 1
# new_site.type = new_typename
def clear_kinds(self):
"""
Removes all kinds for the StructureData object.
.. note:: Also clear all sites!
"""
from aiida.common.exceptions import ModificationNotAllowed
if self.is_stored:
raise ModificationNotAllowed(
"The StructureData object cannot be modified, "
"it has already been stored")
self._set_attr('kinds', [])
self._internal_kind_tags = {}
self.clear_sites()
def clear_sites(self):
"""
Removes all sites for the StructureData object.
"""
from aiida.common.exceptions import ModificationNotAllowed
if self.is_stored:
raise ModificationNotAllowed(
"The StructureData object cannot be modified, "
"it has already been stored")
self._set_attr('sites', [])
@property
def sites(self):
"""
Returns a list of sites.
"""
try:
raw_sites = self.get_attr('sites')
except AttributeError:
raw_sites = []
return [Site(raw=i) for i in raw_sites]
@property
def kinds(self):
"""
Returns a list of kinds.
"""
try:
raw_kinds = self.get_attr('kinds')
except AttributeError:
raw_kinds = []
return [Kind(raw=i) for i in raw_kinds]
def get_kind(self, kind_name):
"""
Return the kind object associated with the given kind name.
:param kind_name: String, the name of the kind you want to get
:return: The Kind object associated with the given kind_name, if
a Kind with the given name is present in the structure.
:raise: ValueError if the kind_name is not present.
"""
# Cache the kinds, if stored, for efficiency
if self.is_stored:
try:
kinds_dict = self._kinds_cache
except AttributeError:
self._kinds_cache = {_.name: _ for _ in self.kinds}
kinds_dict = self._kinds_cache
else:
kinds_dict = {_.name: _ for _ in self.kinds}
# Will raise ValueError if the kind is not present
try:
return kinds_dict[kind_name]
except KeyError:
raise ValueError("Kind name '{}' unknown".format(kind_name))
def get_kind_names(self):
"""
Return a list of kind names (in the same order of the ``self.kinds``
property, but return the names rather than Kind objects)
.. note:: This is NOT necessarily a list of chemical symbols! Use
get_symbols_set for chemical symbols
:return: a list of strings.
"""
return [k.name for k in self.kinds]
@property
def cell(self):
"""
Returns the cell shape.
:return: a 3x3 list of lists.
"""
return copy.deepcopy(self.get_attr('cell'))
@cell.setter
def cell(self, value):
self.set_cell(value)
def set_cell(self, value):
from aiida.common.exceptions import ModificationNotAllowed
if self.is_stored:
raise ModificationNotAllowed(
"The StructureData object cannot be modified, "
"it has already been stored")
the_cell = _get_valid_cell(value)
self._set_attr('cell', the_cell)
def reset_cell(self, new_cell):
"""
Reset the cell of a structure not yet stored to a new value.
:param new_cell: list specifying the cell vectors
:raises:
ModificationNotAllowed: if object is already stored
"""
from aiida.common.exceptions import ModificationNotAllowed
if self.is_stored:
raise ModificationNotAllowed()
self._set_attr('cell', new_cell)
def reset_sites_positions(self, new_positions, conserve_particle=True):
"""
Replace all the Site positions attached to the Structure
:param new_positions: list of (3D) positions for every sites.
:param conserve_particle: if True, allows the possibility of removing a site.
currently not implemented.
:raises ModificationNotAllowed: if object is stored already
:raises ValueError: if positions are invalid
.. note:: it is assumed that the order of the new_positions is
given in the same order of the one it's substituting, i.e. the
kind of the site will not be checked.
"""
from aiida.common.exceptions import ModificationNotAllowed
if self.is_stored:
raise ModificationNotAllowed()
if not conserve_particle:
# TODO:
raise NotImplementedError
else:
# test consistency of th enew input
n_sites = len(self.sites)
if n_sites != len(new_positions) and conserve_particle:
raise ValueError(
"the new positions should be as many as the previous structure.")
new_sites = []
for i in range(n_sites):
try:
this_pos = [float(j) for j in new_positions[i]]
except ValueError:
raise ValueError(
"Expecting a list of floats. Found instead {}"
.format(new_positions[i]))
if len(this_pos) != 3:
raise ValueError("Expecting a list of lists of length 3. "
"found instead {}".format(len(this_pos)))
# now append this Site to the new_site list.
new_site = Site(site=self.sites[i]) # So we make a copy
new_site.position = copy.deepcopy(this_pos)
new_sites.append(new_site)
# now clear the old sites, and substitute with the new ones
self.clear_sites()
for this_new_site in new_sites:
self.append_site(this_new_site)
@property
def pbc(self):
"""
Get the periodic boundary conditions.
:return: a tuple of three booleans, each one tells if there are periodic
boundary conditions for the i-th real-space direction (i=1,2,3)
"""
# return copy.deepcopy(self._pbc)
return (
self.get_attr('pbc1'), self.get_attr('pbc2'), self.get_attr('pbc3'))
@pbc.setter
def pbc(self, value):
self.set_pbc(value)
def set_pbc(self, value):
from aiida.common.exceptions import ModificationNotAllowed
if self.is_stored:
raise ModificationNotAllowed(
"The StructureData object cannot be modified, "
"it has already been stored")
the_pbc = get_valid_pbc(value)
# self._pbc = the_pbc
self._set_attr('pbc1', the_pbc[0])
self._set_attr('pbc2', the_pbc[1])
self._set_attr('pbc3', the_pbc[2])
@property
def cell_lengths(self):
"""
Get the lengths of cell lattice vectors in angstroms.
"""
import numpy
cell = self.cell
return [
numpy.linalg.norm(cell[0]),
numpy.linalg.norm(cell[1]),
numpy.linalg.norm(cell[2]),
]
@cell_lengths.setter
def cell_lengths(self, value):
self.set_cell_lengths(value)
def set_cell_lengths(self, value):
raise NotImplementedError("Modification is not implemented yet")
@property
def cell_angles(self):
"""
Get the angles between the cell lattice vectors in degrees.
"""
import numpy
cell = self.cell
lengths = self.cell_lengths
return [float(numpy.arccos(x) / numpy.pi * 180) for x in [
numpy.vdot(cell[1], cell[2]) / lengths[1] / lengths[2],
numpy.vdot(cell[0], cell[2]) / lengths[0] / lengths[2],
numpy.vdot(cell[0], cell[1]) / lengths[0] / lengths[1],
]]
@cell_angles.setter
def cell_angles(self, value):
self.set_cell_angles(value)
def set_cell_angles(self, value):
raise NotImplementedError("Modification is not implemented yet")
def is_alloy(self):
"""
To understand if there are alloys in the structure.
:return: a boolean, True if at least one kind is an alloy
"""
return any(s.is_alloy() for s in self.kinds)
def has_vacancies(self):
"""
To understand if there are vacancies in the structure.
:return: a boolean, True if at least one kind has a vacancy
"""
return any(s.has_vacancies() for s in self.kinds)
def get_cell_volume(self):
"""
Returns the cell volume in Angstrom^3.
:return: a float.
"""
return calc_cell_volume(self.cell)
def _get_cif(self, converter='ase', store=False, **kwargs):
"""
Creates :py:class:`aiida.orm.data.cif.CifData`.
:param converter: specify the converter. Default 'ase'.
:param store: If True, intermediate calculation gets stored in the
AiiDA database for record. Default False.
:return: :py:class:`aiida.orm.data.cif.CifData` node.
"""
from aiida.orm.data.parameter import ParameterData
from . import structure # This same module
param = ParameterData(dict=kwargs)
try:
conv_f = getattr(structure, '_get_cif_{}_inline'.format(converter))
except AttributeError:
raise ValueError(
"No such converter '{}' available".format(converter))
ret_dict = conv_f(struct=self, parameters=param, store=store)
return ret_dict['cif']
def _get_object_phonopyatoms(self):
"""
Converts StructureData to PhonopyAtoms
:return: a PhonopyAtoms object
"""
from phonopy.structure.atoms import Atoms as PhonopyAtoms
atoms = PhonopyAtoms(symbols=[_.kind_name for _ in self.sites])
# Phonopy internally uses scaled positions, so you must store cell first!
atoms.set_cell(self.cell)
atoms.set_positions([_.position for _ in self.sites])
return atoms
def _get_object_ase(self):
"""
Converts
:py:class:`StructureData <aiida.orm.data.structure.StructureData>`
to ase.Atoms
:return: an ase.Atoms object
"""
import ase
asecell = ase.Atoms(cell=self.cell, pbc=self.pbc)
_kinds = self.kinds
for site in self.sites:
asecell.append(site.get_ase(kinds=_kinds))
return asecell
def _get_object_pymatgen(self,**kwargs):
"""
Converts
:py:class:`StructureData <aiida.orm.data.structure.StructureData>`
to pymatgen object
:return: a pymatgen Structure for structures with periodic boundary
conditions (in three dimensions) and Molecule otherwise
.. note:: Requires the pymatgen module (version >= 3.0.13, usage
of earlier versions may cause errors).
"""
if self.pbc == (True, True, True):
return self._get_object_pymatgen_structure(**kwargs)
else:
return self._get_object_pymatgen_molecule(**kwargs)
def _get_object_pymatgen_structure(self,**kwargs):
"""
Converts
:py:class:`StructureData <aiida.orm.data.structure.StructureData>`
to pymatgen Structure object
:param add_spin: True to add the spins to the pymatgen structure.
Default is False (no spin added).
.. note:: The spins are set according to the following rule:
* if the kind name ends with 1 -> spin=+1
* if the kind name ends with 2 -> spin=-1
:return: a pymatgen Structure object corresponding to this
:py:class:`StructureData <aiida.orm.data.structure.StructureData>`
object
:raise ValueError: if periodic boundary conditions does not hold
in at least one dimension of real space; if there are partial occupancies
together with spins (defined by kind names ending with '1' or '2').
.. note:: Requires the pymatgen module (version >= 3.0.13, usage
of earlier versions may cause errors)
"""
from pymatgen.core.structure import Structure
if self.pbc != (True, True, True):
raise ValueError("Periodic boundary conditions must apply in "
"all three dimensions of real space")
species = []
additional_kwargs = {}
if (kwargs.pop('add_spin',False) and
any([n.endswith('1') or n.endswith('2') for n in self.get_kind_names()])):
# case when spins are defined -> no partial occupancy allowed
from pymatgen.core.structure import Specie
oxidation_state = 0 # now I always set the oxidation_state to zero
for s in self.sites:
k = self.get_kind(s.kind_name)
if len(k.symbols)!=1 or (len(k.weights)!=1 or sum(k.weights)<1.):
raise ValueError("Cannot set partial occupancies and spins "
"at the same time")
species.append(Specie(k.symbols[0],oxidation_state,
properties={'spin': -1 if k.name.endswith('1')
else 1 if k.name.endswith('2') else 0}))
else:
# case when no spin are defined
for s in self.sites:
k = self.get_kind(s.kind_name)
species.append({s: w for s, w in zip(k.symbols, k.weights)})
if any([create_automatic_kind_name(self.get_kind(name).symbols,self.get_kind(name).weights)!=name
for name in self.get_site_kindnames()]):
# add "kind_name" as a properties to each site, whenever
# the kind_name cannot be automatically obtained from the symbols
additional_kwargs['site_properties'] = {'kind_name': self.get_site_kindnames()}
if kwargs:
raise ValueError("Unrecognized parameters passed to pymatgen "
"converter: {}".format(kwargs.keys()))
positions = [list(x.position) for x in self.sites]
return Structure(self.cell, species, positions,
coords_are_cartesian=True,**additional_kwargs)
def _get_object_pymatgen_molecule(self,**kwargs):
"""
Converts
:py:class:`StructureData <aiida.orm.data.structure.StructureData>`
to pymatgen Molecule object
:return: a pymatgen Molecule object corresponding to this
:py:class:`StructureData <aiida.orm.data.structure.StructureData>`
object.
.. note:: Requires the pymatgen module (version >= 3.0.13, usage
of earlier versions may cause errors)
"""
from pymatgen.core.structure import Molecule
if kwargs:
raise ValueError("Unrecognized parameters passed to pymatgen "
"converter: {}".format(kwargs.keys()))
species = []
for s in self.sites:
k = self.get_kind(s.kind_name)
species.append({s: w for s, w in zip(k.symbols, k.weights)})
positions = [list(x.position) for x in self.sites]
return Molecule(species, positions)
class Kind(object):
"""
This class contains the information about the species (kinds) of the system.
It can be a single atom, or an alloy, or even contain vacancies.
"""
def __init__(self, **kwargs):
"""
Create a site.
One can either pass:
:param raw: the raw python dictionary that will be converted to a
Kind object.
:param ase: an ase Atom object
:param kind: a Kind object (to get a copy)
Or alternatively the following parameters:
:param symbols: a single string for the symbol of this site, or a list
of symbol strings
:param weights: (optional) the weights for each atomic species of
this site.
If only a single symbol is provided, then this value is
optional and the weight is set to 1.
:param mass: (optional) the mass for this site in atomic mass units.
If not provided, the mass is set by the
self.reset_mass() function.
:param name: a string that uniquely identifies the kind, and that
is used to identify the sites.
"""
# Internal variables
self._mass = None
self._symbols = None
self._weights = None
self._name = None
# It will be remain to None in general; it is used to further
# identify this species. At the moment, it is used only when importing
# from ASE, if the species had a tag (different from zero).
## NOTE! This is not persisted on DB but only used while the class
# is loaded in memory (i.e., it is not output with the get_raw() method)
self._internal_tag = None
# Logic to create the site from the raw format
if 'raw' in kwargs:
if len(kwargs) != 1:
raise ValueError("If you pass 'raw', then you cannot pass "
"any other parameter.")
raw = kwargs['raw']
try:
self.set_symbols_and_weights(raw['symbols'], raw['weights'])
except KeyError:
raise ValueError("You didn't specify either 'symbols' or "
"'weights' in the raw site data.")
try:
self.mass = raw['mass']
except KeyError:
raise ValueError("You didn't specify the site mass in the "
"raw site data.")
try:
self.name = raw['name']
except KeyError:
raise ValueError("You didn't specify the name in the "
"raw site data.")
elif 'kind' in kwargs:
if len(kwargs) != 1:
raise ValueError("If you pass 'kind', then you cannot pass "
"any other parameter.")
oldkind = kwargs['kind']
try:
self.set_symbols_and_weights(oldkind.symbols, oldkind.weights)
self.mass = oldkind.mass
self.name = oldkind.name
self._internal_tag = oldkind._internal_tag
except AttributeError:
raise ValueError("Error using the Kind object. Are you sure "
"it is a Kind object? [Introspection says it is "
"{}]".format(str(type(oldkind))))
elif 'ase' in kwargs:
aseatom = kwargs['ase']
if len(kwargs) != 1:
raise ValueError("If you pass 'ase', then you cannot pass "
"any other parameter.")
try:
import numpy
self.set_symbols_and_weights([aseatom.symbol], [1.])
# ASE sets mass to numpy.nan for unstable species
if not numpy.isnan(aseatom.mass):
self.mass = aseatom.mass
else:
self.reset_mass()
except AttributeError:
raise ValueError("Error using the aseatom object. Are you sure "
"it is a ase.atom.Atom object? [Introspection says it is "
"{}]".format(str(type(aseatom))))
if aseatom.tag != 0:
self.set_automatic_kind_name(tag=aseatom.tag)
self._internal_tag = aseatom.tag
else:
self.set_automatic_kind_name()
else:
if 'symbols' not in kwargs:
raise ValueError("'symbols' need to be "
"specified (at least) to create a Site object. Otherwise, "
"pass a raw site using the 'raw' parameter.")
weights = kwargs.pop('weights', None)
self.set_symbols_and_weights(kwargs.pop('symbols'), weights)
try:
self.mass = kwargs.pop('mass')
except KeyError:
self.reset_mass()
try:
self.name = kwargs.pop('name')
except KeyError:
self.set_automatic_kind_name()
if kwargs:
raise ValueError("Unrecognized parameters passed to Kind "
"constructor: {}".format(kwargs.keys()))
def get_raw(self):
"""
Return the raw version of the site, mapped to a suitable dictionary.
This is the format that is actually used to store each kind of the
structure in the DB.
:return: a python dictionary with the kind.
"""
return {
'symbols': self.symbols,
'weights': self.weights,
'mass': self.mass,
'name': self.name,
}
# def get_ase(self):
# """
# Return a ase.Atom object for this kind, setting the position to
# the origin.
#
# Note: If any site is an alloy or has vacancies, a ValueError is
# raised (from the site.get_ase() routine).
# """
# import ase
# if self.is_alloy() or self.has_vacancies():
# raise ValueError("Cannot convert to ASE if the site is an alloy "
# "or has vacancies.")
# aseatom = ase.Atom(position=[0.,0.,0.], symbol=self.symbols[0],
# mass=self.mass)
# return aseatom
def reset_mass(self):
"""
Reset the mass to the automatic calculated value.
The mass can be set manually; by default, if not provided,
it is the mass of the constituent atoms, weighted with their
weight (after the weight has been normalized to one to take
correctly into account vacancies).
This function uses the internal _symbols and _weights values and
thus assumes that the values are validated.
It sets the mass to None if the sum of weights is zero.
"""
w_sum = sum(self._weights)
if abs(w_sum) < _sum_threshold:
self._mass = None
return
normalized_weights = (i / w_sum for i in self._weights)
element_masses = (_atomic_masses[sym] for sym in self._symbols)
# Weighted mass
self._mass = sum(i * j for i, j in zip(normalized_weights, element_masses))
@property
def name(self):
"""
Return the name of this kind.
The name of a kind is used to identify the species of a site.
:return: a string
"""
return self._name
@name.setter
def name(self, value):
"""
Set the name of this site (a string).
"""
self._name = six.text_type(value)
def set_automatic_kind_name(self, tag=None):
"""
Set the type to a string obtained with the symbols appended one
after the other, without spaces, in alphabetical order;
if the site has a vacancy, a X is appended at the end too.
"""
name_string = create_automatic_kind_name(self.symbols,self.weights)
if tag is None:
self.name = name_string
else:
self.name = "{}{}".format(name_string, tag)
def compare_with(self, other_kind):
"""
Compare with another Kind object to check if they are different.
.. note:: This does NOT check the 'type' attribute. Instead, it compares
(with reasonable thresholds, where applicable): the mass, and the list
of symbols and of weights. Moreover, it compares the
``_internal_tag``, if defined (at the moment, defined automatically
only when importing the Kind from ASE, if the atom has a non-zero tag).
Note that the _internal_tag is only used while the class is loaded,
but is not persisted on the database.
:return: A tuple with two elements. The first one is True if the two sites
are 'equivalent' (same mass, symbols and weights), False otherwise.
The second element of the tuple is a string,
which is either None (if the first element was True), or contains
a 'human-readable' description of the first difference encountered
between the two sites.
"""
# Check length of symbols
if len(self.symbols) != len(other_kind.symbols):
return (False, "Different length of symbols list")
# Check list of symbols
for i in range(len(self.symbols)):
if self.symbols[i] != other_kind.symbols[i]:
return (False, "Symbol at position {:d} are different "
"({} vs. {})".format(
i + 1, self.symbols[i], other_kind.symbols[i]))
# Check weights (assuming length of weights and of symbols have same
# length, which should be always true
for i in range(len(self.weights)):
if self.weights[i] != other_kind.weights[i]:
return (False, "Weight at position {:d} are different "
"({} vs. {})".format(
i + 1, self.weights[i], other_kind.weights[i]))
# Check masses
if abs(self.mass - other_kind.mass) > _mass_threshold:
return (False, "Masses are different ({} vs. {})"
"".format(self.mass, other_kind.mass))
if self._internal_tag != other_kind._internal_tag:
return (False, "Internal tags are different ({} vs. {})"
"".format(self._internal_tag,
other_kind._internal_tag))
# If we got here, the two Site objects are similar enough
# to be considered of the same kind
return (True, "")
@property
def mass(self):
"""
The mass of this species kind.
:return: a float
"""
return self._mass
@mass.setter
def mass(self, value):
the_mass = float(value)
if the_mass <= 0:
raise ValueError("The mass must be positive.")
self._mass = the_mass
@property
def weights(self):
"""
Weights for this species kind. Refer also to
:func:validate_symbols_tuple for the validation rules on the weights.
"""
return copy.deepcopy(self._weights)
@weights.setter
def weights(self, value):
"""
If value is a number, a single weight is used. Otherwise, a list or
tuple of numbers is expected.
None is also accepted, corresponding to the list [1.].
"""
weights_tuple = _create_weights_tuple(value)
if len(weights_tuple) != len(self._symbols):
raise ValueError("Cannot change the number of weights. Use the "
"set_symbols_and_weights function instead.")
validate_weights_tuple(weights_tuple, _sum_threshold)
self._weights = weights_tuple
def get_symbols_string(self):
"""
Return a string that tries to match as good as possible the symbols
of this kind. If there is only one symbol (no alloy) with 100%
occupancy, just returns the symbol name. Otherwise, groups the full
string in curly brackets, and try to write also the composition
(with 2 precision only).
.. note:: If there is a vacancy (sum of weights<1), we indicate it
with the X symbol followed by 1-sum(weights) (still with 2
digits precision, so it can be 0.00)
.. note:: Note the difference with respect to the symbols and the
symbol properties!
"""
return get_symbols_string(self._symbols, self._weights)
@property
def symbol(self):
"""
If the kind has only one symbol, return it; otherwise, raise a
ValueError.
"""
if len(self._symbols) == 1:
return self._symbols[0]
else:
raise ValueError("This kind has more than one symbol (it is an "
"alloy): {}".format(self._symbols))
@property
def symbols(self):
"""
List of symbols for this site. If the site is a single atom,
pass a list of one element only, or simply the string for that atom.
For alloys, a list of elements.
.. note:: Note that if you change the list of symbols, the kind
name remains unchanged.
"""
return copy.deepcopy(self._symbols)
@symbols.setter
def symbols(self, value):
"""
If value is a string, a single symbol is used. Otherwise, a list or
tuple of strings is expected.
I set a copy of the list, so to avoid that the content changes
after the value is set.
"""
symbols_tuple = _create_symbols_tuple(value)
if len(symbols_tuple) != len(self._weights):
raise ValueError("Cannot change the number of symbols. Use the "
"set_symbols_and_weights function instead.")
validate_symbols_tuple(symbols_tuple)
self._symbols = symbols_tuple
def set_symbols_and_weights(self, symbols, weights):
"""
Set the chemical symbols and the weights for the site.
.. note:: Note that the kind name remains unchanged.
"""
symbols_tuple = _create_symbols_tuple(symbols)
weights_tuple = _create_weights_tuple(weights)
if len(symbols_tuple) != len(weights_tuple):
raise ValueError("The number of symbols and weights must coincide.")
validate_symbols_tuple(symbols_tuple)
validate_weights_tuple(weights_tuple, _sum_threshold)
self._symbols = symbols_tuple
self._weights = weights_tuple
def is_alloy(self):
"""
To understand if kind is an alloy.
:return: True if the kind has more than one element (i.e.,
len(self.symbols) != 1), False otherwise.
"""
return len(self._symbols) != 1
def has_vacancies(self):
"""
Returns True if the sum of the weights is less than one.
It uses the internal variable _sum_threshold as a threshold.
:return: a boolean
"""
return has_vacancies(self._weights)
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, str(self))
def __str__(self):
symbol = self.get_symbols_string()
return "name '{}', symbol '{}'".format(self.name, symbol)
class Site(object):
"""
This class contains the information about a given site of the system.
It can be a single atom, or an alloy, or even contain vacancies.
"""
def __init__(self, **kwargs):
"""
Create a site.
:param kind_name: a string that identifies the kind (species) of this site.
This has to be found in the list of kinds of the StructureData
object.
Validation will be done at the StructureData level.
:param position: the absolute position (three floats) in angstrom
"""
self._kind_name = None
self._position = None
if 'site' in kwargs:
site = kwargs.pop('site')
if kwargs:
raise ValueError("If you pass 'site', you cannot pass any "
"further parameter to the Site constructor")
if not isinstance(site, Site):
raise ValueError("'site' must be of type Site")
self.kind_name = site.kind_name
self.position = site.position
elif 'raw' in kwargs:
raw = kwargs.pop('raw')
if kwargs:
raise ValueError("If you pass 'raw', you cannot pass any "
"further parameter to the Site constructor")
try:
self.kind_name = raw['kind_name']
self.position = raw['position']
except KeyError as exc:
raise ValueError("Invalid raw object, it does not contain any "
"key {}".format(exc.args[0]))
except TypeError:
raise ValueError("Invalid raw object, it is not a dictionary")
else:
try:
self.kind_name = kwargs.pop('kind_name')
self.position = kwargs.pop('position')
except KeyError as exc:
raise ValueError("You need to specify {}".format(exc.args[0]))
if kwargs:
raise ValueError("Unrecognized parameters: {}".format(
kwargs.keys))
def get_raw(self):
"""
Return the raw version of the site, mapped to a suitable dictionary.
This is the format that is actually used to store each site of the
structure in the DB.
:return: a python dictionary with the site.
"""
return {
'position': self.position,
'kind_name': self.kind_name,
}
def get_ase(self, kinds):
"""
Return a ase.Atom object for this site.
:param kinds: the list of kinds from the StructureData object.
.. note:: If any site is an alloy or has vacancies, a ValueError
is raised (from the site.get_ase() routine).
"""
from collections import defaultdict
import ase
# I create the list of tags
tag_list = []
used_tags = defaultdict(list)
for k in kinds:
# Skip alloys and vacancies
if k.is_alloy() or k.has_vacancies():
tag_list.append(None)
# If the kind name is equal to the specie name,
# then no tag should be set
elif six.text_type(k.name) == six.text_type(k.symbols[0]):
tag_list.append(None)
else:
# Name is not the specie name
if k.name.startswith(k.symbols[0]):
try:
new_tag = int(k.name[len(k.symbols[0])])
tag_list.append(new_tag)
used_tags[k.symbols[0]].append(new_tag)
continue
except ValueError:
pass
tag_list.append(k.symbols[0]) # I use a string as a placeholder
for i in range(len(tag_list)):
# If it is a string, it is the name of the element,
# and I have to generate a new integer for this element
# and replace tag_list[i] with this new integer
if isinstance(tag_list[i], six.string_types):
# I get a list of used tags for this element
existing_tags = used_tags[tag_list[i]]
if existing_tags:
new_tag = max(existing_tags) + 1
else: # empty list
new_tag = 1
# I store it also as a used tag!
used_tags[tag_list[i]].append(new_tag)
# I update the tag
tag_list[i] = new_tag
found = False
for k, t in zip(kinds, tag_list):
if k.name == self.kind_name:
kind = k
tag = t
found = True
break
if not found:
raise ValueError("No kind '{}' has been found in the list of kinds"
"".format(self.kind_name))
if kind.is_alloy() or kind.has_vacancies():
raise ValueError("Cannot convert to ASE if the kind represents "
"an alloy or it has vacancies.")
aseatom = ase.Atom(position=self.position,
symbol=str(kind.symbols[0]),
mass=kind.mass)
if tag is not None:
aseatom.tag = tag
return aseatom
@property
def kind_name(self):
"""
Return the kind name of this site (a string).
The type of a site is used to decide whether two sites are identical
(same mass, symbols, weights, ...) or not.
"""
return self._kind_name
@kind_name.setter
def kind_name(self, value):
"""
Set the type of this site (a string).
"""
self._kind_name = six.text_type(value)
@property
def position(self):
"""
Return the position of this site in absolute coordinates,
in angstrom.
"""
return copy.deepcopy(self._position)
@position.setter
def position(self, value):
"""
Set the position of this site in absolute coordinates,
in angstrom.
"""
try:
internal_pos = tuple(float(i) for i in value)
if len(internal_pos) != 3:
raise ValueError
# value is not iterable or elements are not floats or len != 3
except (ValueError, TypeError):
raise ValueError("Wrong format for position, must be a list of "
"three float numbers.")
self._position = internal_pos
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, str(self))
def __str__(self):
return "kind name '{}' @ {},{},{}".format(self.kind_name,
self.position[0],
self.position[1],
self.position[2])
# get_structuredata_from_qeinput has been moved to:
# aiida.tools.codespecific.quantumespresso.qeinputparser
```
#### File: implementation/sqlalchemy/computer.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from copy import copy
import six
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.session import make_transient
from aiida.common.utils import type_check
from aiida.backends.sqlalchemy.models.computer import DbComputer
from aiida.common.exceptions import InvalidOperation
from aiida.orm.implementation.computers import BackendComputerCollection, BackendComputer
from . import utils
import aiida.utils.json as json
class SqlaComputerCollection(BackendComputerCollection):
def create(self, **attributes):
return SqlaComputer(self.backend, attributes)
def list_names(cls):
from aiida.backends.sqlalchemy import get_scoped_session
session = get_scoped_session()
return session.query(DbComputer.name).all()
def delete(self, id):
import aiida.backends.sqlalchemy
try:
session = aiida.backends.sqlalchemy.get_scoped_session()
session.query(DbComputer).get(id).delete()
session.commit()
except SQLAlchemyError as exc:
raise InvalidOperation("Unable to delete the requested computer: it is possible that there "
"is at least one node using this computer (original message: {})".format(exc))
def from_dbmodel(self, computer):
"""
Construct a SqlaComputer instance from the corresponding database entry
:param computer: The DbComputer instance
:return: The Computer instance
:rtype: :class:`aiida.orm.implementation.sqlalchemy.computer.SqlaComputer`
"""
return SqlaComputer.from_dbmodel(computer, self.backend)
class SqlaComputer(BackendComputer):
@classmethod
def from_dbmodel(cls, dbmodel, backend):
type_check(dbmodel, DbComputer)
computer = SqlaComputer.__new__(cls)
super(SqlaComputer, computer).__init__(backend)
computer._dbcomputer = utils.ModelWrapper(dbmodel)
return computer
@property
def uuid(self):
return six.text_type(self._dbcomputer.uuid)
@property
def pk(self):
return self._dbcomputer.id
@property
def id(self):
return self._dbcomputer.id
def __init__(self, backend, attributes):
super(SqlaComputer, self).__init__(backend)
self._dbcomputer = utils.ModelWrapper(DbComputer(**attributes))
def set(self, **kwargs):
for key, val in kwargs.items():
if hasattr(self._dbcomputer, key):
setattr(self._dbcomputer, key, val)
else:
self._dbcomputer._metadata[key] = val
@property
def is_stored(self):
return self._dbcomputer.id is not None
def copy(self):
from aiida.backends.sqlalchemy import get_scoped_session
session = get_scoped_session()
if not self.is_stored:
raise InvalidOperation("You can copy a computer only after having stored it")
newdbcomputer = copy(self._dbcomputer)
make_transient(newdbcomputer)
session.add(newdbcomputer)
newobject = self.__class__(dbcomputer=newdbcomputer)
return newobject
@property
def dbcomputer(self):
return self._dbcomputer._model
def store(self):
try:
self._dbcomputer.save()
except SQLAlchemyError:
raise ValueError(
"Integrity error, probably the hostname already exists in the"
" DB")
return self
@property
def name(self):
return self._dbcomputer.name
@property
def description(self):
return self._dbcomputer.description
@property
def hostname(self):
return self._dbcomputer.hostname
def get_metadata(self):
return self._dbcomputer._metadata
def set_metadata(self, metadata_dict):
self._dbcomputer._metadata = metadata_dict
def get_transport_params(self):
"""
Return transport params stored in dbcomputer instance
"""
return self._dbcomputer.transport_params
def set_transport_params(self, val):
try:
json.dumps(val) # Check if json compatible
self._dbcomputer.transport_params = val
except ValueError:
raise ValueError("The set of transport_params are not JSON-able")
def get_name(self):
return self._dbcomputer.name
def set_name(self, val):
self._dbcomputer.name = val
def get_hostname(self):
return self._dbcomputer.hostname
def set_hostname(self, val):
self._dbcomputer.hostname = val
def get_description(self):
return self._dbcomputer.description
def set_description(self, val):
self._dbcomputer.description = val
def is_enabled(self):
return self._dbcomputer.enabled
def set_enabled_state(self, enabled):
self._dbcomputer.enabled = enabled
def get_scheduler_type(self):
return self._dbcomputer.scheduler_type
def set_scheduler_type(self, scheduler_type):
self._dbcomputer.scheduler_type = scheduler_type
def get_transport_type(self):
return self._dbcomputer.transport_type
def set_transport_type(self, val):
self._dbcomputer.transport_type = val
```
#### File: orm/implementation/users.py
```python
from __future__ import absolute_import
import abc
import six
from . import backends
__all__ = 'BackendUser', 'BackendUserCollection'
@six.add_metaclass(abc.ABCMeta)
class BackendUser(backends.BackendEntity):
"""
This is the base class for User information in AiiDA. An implementing
backend needs to provide a concrete version.
"""
# pylint: disable=invalid-name
REQUIRED_FIELDS = ['first_name', 'last_name', 'institution']
@property
def uuid(self):
"""
For now users do not have UUIDs so always return false
:return: None
"""
return None
@abc.abstractproperty
def email(self):
pass
@abc.abstractmethod
@email.setter
def email(self, val):
pass
@abc.abstractmethod
def get_password(self):
pass
@abc.abstractmethod
def set_password(self, new_pass):
pass
@abc.abstractproperty
def first_name(self):
pass
@abc.abstractmethod
@first_name.setter
def first_name(self, val):
pass
@abc.abstractproperty
def last_name(self):
pass
@abc.abstractmethod
@last_name.setter
def last_name(self, val):
pass
@abc.abstractproperty
def institution(self):
pass
@abc.abstractmethod
@institution.setter
def institution(self, val):
pass
@abc.abstractproperty
def is_active(self):
pass
@abc.abstractmethod
@is_active.setter
def is_active(self, val):
pass
@abc.abstractproperty
def last_login(self):
pass
@abc.abstractmethod
@last_login.setter
def last_login(self, val):
pass
@abc.abstractproperty
def date_joined(self):
pass
@abc.abstractmethod
@date_joined.setter
def date_joined(self, val):
pass
class BackendUserCollection(backends.BackendCollection):
# pylint: disable=too-few-public-methods
ENTRY_TYPE = BackendUser
```
#### File: translator/data/structure.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from aiida.restapi.translator.data import DataTranslator
from aiida.restapi.common.exceptions import RestInputValidationError
from aiida.common.exceptions import LicensingException
class StructureDataTranslator(DataTranslator):
"""
Translator relative to resource 'structures' and aiida class StructureData
"""
# A label associated to the present class (coincides with the resource name)
__label__ = "structures"
# The AiiDA class one-to-one associated to the present class
from aiida.orm.data.structure import StructureData
_aiida_class = StructureData
# The string name of the AiiDA class
_aiida_type = "data.structure.StructureData"
# The string associated to the AiiDA class in the query builder lexicon
_qb_type = _aiida_type + '.'
_result_type = __label__
def __init__(self, **kwargs):
"""
Initialise the parameters.
Create the basic query_help
"""
super(StructureDataTranslator, self).__init__(Class=self.__class__, **kwargs)
@staticmethod
def get_visualization_data(node, visformat='xsf'):
"""
Returns: data in specified format. If visformat is not specified returns data
in xsf format in order to visualize the structure with JSmol.
"""
response = {}
response["str_viz_info"] = {}
if visformat in node.get_export_formats():
try:
response["str_viz_info"]["data"] = node._exportcontent(visformat)[0].decode('utf-8') # pylint: disable=protected-access
response["str_viz_info"]["format"] = visformat
except LicensingException as exc:
response = str(exc)
else:
raise RestInputValidationError("The format {} is not supported.".format(visformat))
# Add extra information
response["dimensionality"] = node.get_dimensionality()
response["pbc"] = node.pbc
response["formula"] = node.get_formula()
return response
@staticmethod
def get_downloadable_data(node, download_format="cif"):
"""
Generic function extented for structure data
:param node: node object that has to be visualized
:param download_format: file extension format
:returns: data in selected format to download
"""
response = {}
if download_format in node.get_export_formats():
try:
response["data"] = node._exportcontent(download_format)[0] # pylint: disable=protected-access
response["status"] = 200
response["filename"] = node.uuid + "_structure." + download_format
except LicensingException as exc:
response["status"] = 500
response["data"] = str(exc)
return response
```
#### File: scheduler/plugins/test_sge.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import unittest
import logging
from aiida.scheduler.plugins.sge import *
text_qstat_ext_urg_xml_test = """<?xml version='1.0'?>
<job_info xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<queue_info>
<job_list state="running">
<JB_job_number>1212299</JB_job_number>
<JAT_prio>10.05000</JAT_prio>
<JAT_ntix>1.00000</JAT_ntix>
<JB_nurg>0.00000</JB_nurg>
<JB_urg>1000</JB_urg>
<JB_rrcontr>1000</JB_rrcontr>
<JB_wtcontr>0</JB_wtcontr>
<JB_dlcontr>0</JB_dlcontr>
<JB_name>Heusler</JB_name>
<JB_owner>dorigm7s</JB_owner>
<JB_project>ams.p</JB_project>
<JB_department>defaultdepartment</JB_department>
<state>r</state>
<JAT_start_time>2013-06-18T12:08:23</JAT_start_time>
<cpu_usage>81.00000</cpu_usage>
<mem_usage>15.96530</mem_usage>
<io_usage>0.00667</io_usage>
<tickets>126559</tickets>
<JB_override_tickets>0</JB_override_tickets>
<JB_jobshare>0</JB_jobshare>
<otickets>0</otickets>
<ftickets>0</ftickets>
<stickets>126559</stickets>
<JAT_share>0.27043</JAT_share>
<queue_name>serial.q@node080</queue_name>
<slots>1</slots>
</job_list>
</queue_info>
<job_info>
<job_list state="pending">
<JB_job_number>1212263</JB_job_number>
<JAT_prio>0.16272</JAT_prio>
<JAT_ntix>0.01127</JAT_ntix>
<JB_nurg>0.07368</JB_nurg>
<JB_urg>8000</JB_urg>
<JB_rrcontr>8000</JB_rrcontr>
<JB_wtcontr>0</JB_wtcontr>
<JB_dlcontr>0</JB_dlcontr>
<JB_name>Heusler</JB_name>
<JB_owner>dorigm7s</JB_owner>
<JB_project>ams.p</JB_project>
<JB_department>defaultdepartment</JB_department>
<state>qw</state>
<JB_submission_time>2013-06-18T12:00:57</JB_submission_time>
<tickets>1426</tickets>
<JB_override_tickets>0</JB_override_tickets>
<JB_jobshare>0</JB_jobshare>
<otickets>0</otickets>
<ftickets>0</ftickets>
<stickets>1426</stickets>
<JAT_share>0.00419</JAT_share>
<queue_name></queue_name>
<slots>8</slots>
</job_list>
<job_list state="pending">
<JB_job_number>1212322</JB_job_number>
<JAT_prio>0.00000</JAT_prio>
<JAT_ntix>0.00000</JAT_ntix>
<JB_nurg>0.00000</JB_nurg>
<JB_urg>0</JB_urg>
<JB_rrcontr>0</JB_rrcontr>
<JB_wtcontr>0</JB_wtcontr>
<JB_dlcontr>0</JB_dlcontr>
<JB_name>Heusler</JB_name>
<JB_owner>dorigm7s</JB_owner>
<JB_project>ams.p</JB_project>
<JB_department>defaultdepartment</JB_department>
<state>hqw</state>
<JB_submission_time>2013-06-18T12:09:47</JB_submission_time>
<tickets>0</tickets>
<JB_override_tickets>0</JB_override_tickets>
<JB_jobshare>0</JB_jobshare>
<otickets>0</otickets>
<ftickets>0</ftickets>
<stickets>0</stickets>
<JAT_share>0.00000</JAT_share>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
</job_info>
</job_info>"""
text_qstat_ext_urg_xml_test_raise = """<?xml version='1.0'?>
<job_info xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<queue_info>
</queue_info>
<job_info>
<job_list state="running">
<JB_job_number></JB_job_number>
<JB_owner>dorigm7s</JB_owner>
<state>qw</state>
</job_list>
</job_info>
</job_info>
"""
text_xml_parsing_fails_raise = """<?xml version='1.0'?>
<job_info xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<queue_infoXXXXXXXX>
</queue_info>
<job_info>
<job_list state="running">
<JB_job_number></JB_job_number>
<JB_owner>dorigm7s</JB_owner>
<state>qw</state>
</job_list>
</job_info>
</job_info>
"""
text_check_queue_job_info = """<?xml version='1.0'?>
<job_info xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<job_info>
<job_list state="running">
<JB_job_number>99</JB_job_number>
<JB_owner>dorigm7s</JB_owner>
<state>qw</state>
</job_list>
</job_info>
</job_info>
"""
test_raw_data = """<job_list state="running">
<JB_job_number>1212299</JB_job_number>
<JAT_prio>10.05000</JAT_prio>
<JAT_ntix>1.00000</JAT_ntix>
<JB_nurg>0.00000</JB_nurg>
<JB_urg>1000</JB_urg>
<JB_rrcontr>1000</JB_rrcontr>
<JB_wtcontr>0</JB_wtcontr>
<JB_dlcontr>0</JB_dlcontr>
<JB_name>Heusler</JB_name>
<JB_owner>dorigm7s</JB_owner>
<JB_project>ams.p</JB_project>
<JB_department>defaultdepartment</JB_department>
<state>r</state>
<JAT_start_time>2013-06-18T12:08:23</JAT_start_time>
<cpu_usage>81.00000</cpu_usage>
<mem_usage>15.96530</mem_usage>
<io_usage>0.00667</io_usage>
<tickets>126559</tickets>
<JB_override_tickets>0</JB_override_tickets>
<JB_jobshare>0</JB_jobshare>
<otickets>0</otickets>
<ftickets>0</ftickets>
<stickets>126559</stickets>
<JAT_share>0.27043</JAT_share>
<queue_name>serial.q@node080</queue_name>
<slots>1</slots>
</job_list>"""
class TestCommand(unittest.TestCase):
def test_get_joblist_command(self):
sge = SgeScheduler()
# TEST 1:
sge_get_joblist_command = sge._get_joblist_command(user='ExamplUsr')
self.assertTrue('qstat' in sge_get_joblist_command)
self.assertTrue('-xml' in sge_get_joblist_command)
self.assertTrue('-ext' in sge_get_joblist_command)
self.assertTrue('-u' in sge_get_joblist_command)
self.assertTrue('-urg' in sge_get_joblist_command)
self.assertTrue('ExamplUsr' in sge_get_joblist_command)
# TEST 2:
sge_get_joblist_command = sge._get_joblist_command()
self.assertTrue('qstat' in sge_get_joblist_command)
self.assertTrue('-xml' in sge_get_joblist_command)
self.assertTrue('-ext' in sge_get_joblist_command)
self.assertTrue('-u' in sge_get_joblist_command)
self.assertTrue('-urg' in sge_get_joblist_command)
self.assertTrue('*' in sge_get_joblist_command)
def test_detailed_jobinfo_command(self):
sge = SgeScheduler()
sge_get_djobinfo_command = sge._get_detailed_jobinfo_command('123456')
self.assertTrue('123456' in sge_get_djobinfo_command)
self.assertTrue('qacct' in sge_get_djobinfo_command)
self.assertTrue('-j' in sge_get_djobinfo_command)
def test_get_submit_command(self):
sge = SgeScheduler()
sge_get_submit_command = sge._get_submit_command('script.sh')
self.assertTrue('qsub' in sge_get_submit_command)
self.assertTrue('terse' in sge_get_submit_command)
self.assertTrue('script.sh' in sge_get_submit_command)
def test_parse_submit_output(self):
sge = SgeScheduler()
# TEST 1:
sge_parse_submit_output = sge._parse_submit_output(0, ' 1176936', '')
self.assertTrue('1176936' in sge_parse_submit_output)
# TEST 2:
logging.disable(logging.ERROR)
with self.assertRaisesRegexp(SchedulerError, '^Error during submission, retval=1'):
sge_parse_submit_output = sge._parse_submit_output(1, '', '')
logging.disable(logging.NOTSET)
def test_parse_joblist_output(self):
sge = SgeScheduler()
retval = 0
stdout = text_qstat_ext_urg_xml_test
stderr = ''
job_list = sge._parse_joblist_output(retval, stdout, stderr)
# Is job_list parsed correctly?:
job_on_cluster = 3
job_parsed = len(job_list)
self.assertEquals(job_parsed, job_on_cluster)
# Check if different job states are realized:
job_running = 1
job_running_parsed = len([j for j in job_list if j.job_state \
and j.job_state == JOB_STATES.RUNNING])
self.assertEquals(job_running, job_running_parsed)
job_held = 1
job_held_parsed = len([j for j in job_list if j.job_state \
and j.job_state == JOB_STATES.QUEUED_HELD])
self.assertEquals(job_held, job_held_parsed)
job_queued = 1
job_queued_parsed = len([j for j in job_list if j.job_state \
and j.job_state == JOB_STATES.QUEUED])
self.assertEquals(job_queued, job_queued_parsed)
# check if job id is recognized:
running_jobs = ['1212299']
parsed_running_jobs = [j.job_id for j in job_list if j.job_state \
and j.job_state == JOB_STATES.RUNNING]
self.assertEquals(set(running_jobs), set(parsed_running_jobs))
dispatch_time = [self._parse_time_string('2013-06-18T12:08:23')]
parsed_dispatch_time = [j.dispatch_time for j in job_list if j.dispatch_time]
self.assertEquals(set(dispatch_time), set(parsed_dispatch_time))
submission_times = [
self._parse_time_string('2013-06-18T12:00:57'),
self._parse_time_string('2013-06-18T12:09:47')
]
parsed_submission_times = [j.submission_time for j in job_list if j.submission_time]
self.assertEquals(set(submission_times), set(parsed_submission_times))
running_jobs = [test_raw_data]
parsed_running_jobs = [j.raw_data for j in job_list if j.job_state \
and j.job_state == JOB_STATES.RUNNING]
self.assertEquals(set(running_jobs), set(parsed_running_jobs))
# job_list_raise=sge._parse_joblist_output(retval, \
# text_qstat_ext_urg_xml_test_raise, stderr)
logging.disable(logging.ERROR)
stdout = text_xml_parsing_fails_raise
with self.assertRaises(SchedulerParsingError):
sge._parse_joblist_output(retval, stdout, stderr)
stdout = text_check_queue_job_info
with self.assertRaises(SchedulerError):
sge._parse_joblist_output(retval, stdout, stderr)
# Test: Is the except of IndexErrors raised correctly?
stdout = text_qstat_ext_urg_xml_test_raise
with self.assertRaises(IndexError):
sge._parse_joblist_output(retval, stdout, stderr)
logging.disable(logging.NOTSET)
def test_submit_script(self):
from aiida.scheduler.datastructures import JobTemplate
sge = SgeScheduler()
job_tmpl = JobTemplate()
job_tmpl.job_resource = sge.create_job_resource(parallel_env="mpi8", tot_num_mpiprocs=16)
job_tmpl.working_directory = "/home/users/dorigm7s/test"
job_tmpl.submit_as_hold = None
job_tmpl.rerunnable = None
job_tmpl.email = None
job_tmpl.email_on_started = None
job_tmpl.email_on_terminated = None
job_tmpl.job_name = "BestJobEver"
job_tmpl.sched_output_path = None
job_tmpl.sched_join_files = None
job_tmpl.queue_name = "FavQ.q"
job_tmpl.priority = None
job_tmpl.max_wallclock_seconds = "3600" # "23:59:59"
job_tmpl.job_environment = {"HOME": "/home/users/dorigm7s/", "WIENROOT": "$HOME:/WIEN2k"}
submit_script_text = sge._get_submit_script_header(job_tmpl)
self.assertTrue('#$ -wd /home/users/dorigm7s/test' in submit_script_text)
self.assertTrue('#$ -N BestJobEver' in submit_script_text)
self.assertTrue('#$ -q FavQ.q' in submit_script_text)
self.assertTrue('#$ -l h_rt=01:00:00' in submit_script_text)
# self.assertTrue( 'export HOME=/home/users/dorigm7s/'
# in submit_script_text )
self.assertTrue("# ENVIRONMENT VARIABLES BEGIN ###" in submit_script_text)
self.assertTrue("export HOME='/home/users/dorigm7s/'" in submit_script_text)
self.assertTrue("export WIENROOT='$HOME:/WIEN2k'" in submit_script_text)
@staticmethod
def _parse_time_string(string, fmt='%Y-%m-%dT%H:%M:%S'):
"""
Parse a time string in the format returned from qstat -xml -ext and
returns a datetime object.
Example format: 2013-06-13T11:53:11
"""
import time
import datetime
try:
time_struct = time.strptime(string, fmt)
except Exception as exc:
raise ValueError("Unable to parse time string {}, the message " "was {}".format(string, exc))
# I convert from a time_struct to a datetime object going through
# the seconds since epoch, as suggested on stackoverflow:
# http://stackoverflow.com/questions/1697815
return datetime.datetime.fromtimestamp(time.mktime(time_struct))
```
#### File: aiida_core/.ci/test_setup.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import unittest
import os
from os.path import abspath
from click.testing import CliRunner
from pgtest.pgtest import PGTest
from aiida.cmdline.commands.cmd_setup import setup
from aiida.cmdline.commands.cmd_quicksetup import quicksetup
from aiida.control.postgres import Postgres
from aiida.backends import settings as backend_settings
class QuicksetupTestCase(unittest.TestCase):
"""Test `verdi quicksetup`."""
def setUp(self):
self.runner = CliRunner()
self.backend = os.environ.get('TEST_AIIDA_BACKEND', 'django')
def test_user_setup(self):
"""
Test `verdi quicksetup` non-interactively
"""
backend_settings.AIIDADB_PROFILE = None
result = self.runner.invoke(quicksetup, [
'--backend={}'.format(self.backend), '--email=<EMAIL>', '--first-name=Giuseppe',
'--last-name=Verdi', '--institution=Scala', '--db-name=aiida_giuseppe_{}'.format(
self.backend), '--repository=aiida_giuseppe_{}'.format(self.backend), 'giuseppe-{}'.format(self.backend)
])
self.assertFalse(result.exception, msg=get_debug_msg(result))
def test_postgres_failure(self):
"""
Test `verdi quicksetup` non-interactively
"""
backend_settings.AIIDADB_PROFILE = None
result = self.runner.invoke(
quicksetup, [
'--backend={}'.format(self.backend), '--email=<EMAIL>', '--first-name=Giuseppe',
'--last-name=Verdi', '--institution=Scala', '--db-port=1111', '--db-name=aiida_giuseppe2_{}'.format(
self.backend), '--repository=aiida_giuseppe2_{}'.format(
self.backend), '--non-interactive', 'giuseppe2-{}'.format(self.backend)
],
input='nohost\n1111\naiida_giuseppe2_{}\npostgres\n\n'.format(self.backend),
catch_exceptions=False)
self.assertFalse(result.exception, msg=get_debug_msg(result))
class SetupTestCase(unittest.TestCase):
"""Test `verdi setup`."""
def setUp(self):
self.runner = CliRunner()
backend = os.environ.get('TEST_AIIDA_BACKEND', 'django')
self.backend = 'django' if backend == 'django' else 'sqlalchemy'
self.pg_test = PGTest()
self.postgres = Postgres(port=self.pg_test.port, interactive=False, quiet=True)
self.postgres.dbinfo = self.pg_test.dsn
self.postgres.determine_setup()
self.dbuser = 'aiida_SetupTestCase'
self.dbpass = '<PASSWORD>'
self.dbname = 'aiida_test_setup_{}'.format(self.backend)
self.postgres.create_dbuser(self.dbuser, self.dbpass)
self.postgres.create_db(self.dbuser, self.dbname)
self.repository = abspath('./aiida_radames_{}'.format(self.backend))
def tearDown(self):
self.postgres.drop_db(self.dbname)
self.postgres.drop_dbuser(self.dbuser)
self.pg_test.close()
def test_user_setup(self):
"""
Test `verdi setup` non-interactively
"""
backend_settings.AIIDADB_PROFILE = None
result = self.runner.invoke(setup, [
'--non-interactive', '--backend={}'.format(self.backend), '--email=<EMAIL>',
'--first-name=Radames', '--last-name=Verdi', '--institution=Scala', '--repository={}'.format(
self.repository), '--db-host=localhost', '--db-port={}'.format(
self.pg_test.port), '--db-name={}'.format(self.dbname), '--db-username={}'.format(
self.dbuser), '--db-password={}'.format(self.dbpass), 'radames_{}'.format(self.backend)
])
self.assertFalse(result.exception, msg=get_debug_msg(result))
def test_user_configure(self):
"""
Test `verdi setup` configure user
"""
backend_settings.AIIDADB_PROFILE = None
self.runner.invoke(setup, [
'--non-interactive', '--backend={}'.format(self.backend), '--email=<EMAIL>',
'--first-name=Radames', '--last-name=Verdi', '--institution=Scala', '--repository={}'.format(
self.repository), '--db-host=localhost', '--db-port={}'.format(
self.pg_test.port), '--db-name={}'.format(self.dbname), '--db-username={}'.format(
self.dbuser), '--db-password={}'.format(self.dbpass), 'radames2_{}'.format(self.backend)
])
tpl = '{email}\n{first_name}\n{last_name}\n{institution}\nyes\n{email}\n{engine}\n\n\n\n\n\n{repo}\nno\n\n'
backend_settings.AIIDADB_PROFILE = None
result = self.runner.invoke(
setup, ['radames2_{}'.format(self.backend), '--only-config'],
input=tpl.format(
email='<EMAIL>',
first_name='Radames2',
last_name='Verdi2',
institution='Scala2',
engine='postgresql_psycopg2',
repo=self.repository),
catch_exceptions=False)
self.assertFalse(result.exception, msg=get_debug_msg(result))
def get_debug_msg(result):
msg = '{}\n---\nOutput:\n{}'
return msg.format(result.exception, result.output)
if __name__ == '__main__':
unittest.main()
```
#### File: workflows/workfunctions/example_problem_plain_python.py
```python
a = 1
b = 2
c = 3
def add(a, b):
return a + b
def multiply(a, b):
return a * b
result = multiply(add(a, b), c)
```
#### File: workflows/workfunctions/example_problem_workfunction_data_types.py
```python
from aiida.orm.data.int import Int
from aiida.work.workfunctions import workfunction
a = Int(1)
b = Int(2)
c = Int(3)
@workfunction
def add(a, b):
return Int(a + b)
@workfunction
def multiply(a, b):
return Int(a * b)
result = multiply(add(a, b), c)
```
#### File: workflows/workfunctions/example_problem_workfunction_decorator.py
```python
from aiida.work.workfunctions import workfunction
a = 1
b = 2
c = 3
@workfunction
def add(a, b):
return a + b
@workfunction
def multiply(a, b):
return a * b
result = multiply(add(a, b), c)
``` |
{
"source": "joepvd/art-bot",
"score": 2
} |
#### File: art-bot/artbotlib/buildinfo.py
```python
import json
import re
from . import util
def buildinfo_for_release(so, name, release_img):
img_name = "machine-os-content" if name == "rhcos" else name # rhcos shortcut...
if ".ci." in re.sub(".*:", "", release_img):
so.say("Sorry, no ART build info for a CI image.")
return
if ":" in release_img:
# assume it's a pullspec already; make sure it's a known domain
if not re.match(r'(quay.io|registry.svc.ci.openshift.org)/', release_img):
so.say("Sorry, I can only look up pullspecs for quay.io or registry.svc.ci.")
return
elif "nightly" in release_img:
suffix = "-s390x" if "s390x" in release_img else "-ppc64le" if "ppc64le" in release_img else ""
release_img = f"registry.svc.ci.openshift.org/ocp{suffix}/release{suffix}:{release_img}"
else:
# assume public release name
release_img = f"quay.io/openshift-release-dev/ocp-release:{release_img}"
if not re.search(r'-(s390x|ppc64le|x86_64)$', release_img):
# assume x86_64 if not specified; TODO: handle older images released without -x86_64 in pullspec
release_img = f"{release_img}-x86_64"
rc, stdout, stderr = util.cmd_gather(f"oc adm release info {release_img} --image-for {img_name}")
if rc:
so.say(f"Sorry, I wasn't able to query the release image pullspec {release_img}.")
util.please_notify_art_team_of_error(so, stderr)
return
pullspec = stdout.strip()
rc, stdout, stderr = util.cmd_gather(f"oc image info {pullspec} -o json")
if rc:
so.say(f"Sorry, I wasn't able to query the component image pullspec {pullspec}.")
util.please_notify_art_team_of_error(so, stderr)
return
try:
data = json.loads(stdout)
except Exception as exc:
so.say(f"Sorry, I wasn't able to decode the JSON info for pullspec {pullspec}.")
util.please_notify_art_team_of_error(so, str(exc))
return
if img_name == "machine-os-content":
# always a special case... not a brew build
try:
rhcos_build = data["config"]["config"]["Labels"]["version"]
except Exception as exc:
so.say(f"Sorry, I expected a 'version' label for pullspec {pullspec} but didn't see one. Weird huh?")
return
so.say(f"image {pullspec} came from RHCOS build {rhcos_build}")
return
try:
labels = data["config"]["config"]["Labels"]
name = labels["com.redhat.component"]
version = labels["version"]
release = labels["release"]
except Exception as exc:
so.say(f"Sorry, one of the component, version, or release labels is missing for pullspec {pullspec}. Weird huh?")
return
nvr = f"{name}-{version}-{release}"
so.say(f"{img_name} image {pullspec} came from brew build {nvr}")
return
``` |
{
"source": "JoepWeijers/jenkins-failed-build-warning-light",
"score": 3
} |
#### File: JoepWeijers/jenkins-failed-build-warning-light/jenkins-failed-build-warning-light.py
```python
import datetime
import requests
import time
import sys
import CHIP_IO.GPIO as GPIO
POLL_INTERVAL_SECONDS = 3
FAILED_JOB_COLORS = ['yellow', 'red']
PIN = "XIO-P4"
ON = GPIO.LOW
OFF = GPIO.HIGH
WORKDAY_START_TIME = datetime.time(8, 45)
WORKDAY_END_TIME = datetime.time(18, 0)
def during_working_hours():
now = datetime.datetime.today()
return now.weekday() in range(0,5) and WORKDAY_START_TIME < now.time() and now.time() < WORKDAY_END_TIME
def get_number_of_failed_jenkins_jobs(view_url):
req = requests.get(view_url + '/api/json?tree=jobs[color]')
if req.status_code != 200:
print "Failed to get status of Jenkins jobs, returned status: {}, content:\n{}".format(req.status_code, req.text)
job_results = [job for job in req.json()['jobs'] if job['color'] in FAILED_JOB_COLORS]
return len(job_results)
if __name__ == '__main__':
if len(sys.argv) == 1:
raise Exception("No Jenkins view URL passed. Usage: python jenkins-failed-build-warning-light.py http://jenkins.example.com/view/Main")
view_url = sys.argv[1]
print "Start monitoring Jenkins view {}".format(view_url)
GPIO.cleanup()
GPIO.setup(PIN, GPIO.OUT)
current_state = OFF
GPIO.output(PIN, current_state)
try:
while True:
if during_working_hours():
try:
number_of_failed_jobs = get_number_of_failed_jenkins_jobs(view_url)
if number_of_failed_jobs == 0:
print "Everything is OK"
current_state = OFF
else:
print "There are {} failing jobs".format(number_of_failed_jobs)
current_state = ON
GPIO.output(PIN, current_state)
except Exception as e:
print "Failed to get update status of Jenkins jobs: {}".format(str(e))
else:
print "Nobody is in the office"
if current_state == ON:
current_state = OFF
GPIO.output(PIN, current_state)
time.sleep(POLL_INTERVAL_SECONDS)
except (SystemExit, KeyboardInterrupt):
GPIO.output(PIN, OFF)
GPIO.cleanup()
``` |
{
"source": "joeqiao12/Paddle",
"score": 2
} |
#### File: utils/code_gen/api_gen.py
```python
import os
import yaml
import argparse
from api_base import BaseAPI
class ForwardAPI(BaseAPI):
prefix_tensor_name = 'dense_'
def __init__(self, api_item_yaml):
super(ForwardAPI, self).__init__(api_item_yaml)
def get_return_type(self, out_type_list):
return out_type_list[0] if len(
out_type_list) == 1 else "std::tuple<" + ",".join(
out_type_list) + ">"
def gene_output(self, output_type_list):
kernel_output = ""
output_names = []
output_create = ""
if len(output_type_list) == 1:
kernel_output = 'dense_out'
output_names.append('dense_out')
output_create = f"""
{self.outputs['return_type']} out;
auto dense_out = SetKernelOutput(kernel_backend, &out);"""
elif len(output_type_list) > 1:
output_create = f"""
{self.outputs['return_type']} out;"""
for i in range(len(output_type_list)):
kernel_output = kernel_output + f'dense_out_{i}, '
output_names.append(f'dense_out_{i}')
output_create = output_create + f"""
auto dense_out_{i} = SetKernelOutput(kernel_backend, &std::get<{i}>(out));"""
kernel_output = kernel_output[:-2]
else:
raise ValueError(
"{} : Output error: the output should not be empty.".format(
self.api))
return kernel_output, output_names, output_create
def header_include():
return """
#include <tuple>
#include "paddle/pten/api/include/tensor.h"
#include "paddle/pten/common/scalar.h"
#include "paddle/pten/common/scalar_array.h"
"""
def source_include(header_file_path):
return f"""
#include "{header_file_path}"
#include <memory>
#include "glog/logging.h"
#include "paddle/pten/api/lib/api_registry.h"
#include "paddle/pten/api/lib/api_utils.h"
#include "paddle/pten/api/lib/data_transform.h"
#include "paddle/pten/api/lib/kernel_dispatch.h"
#include "paddle/pten/api/lib/utils/storage.h"
#include "paddle/pten/core/kernel_registry.h"
#include "paddle/pten/infermeta/binary.h"
#include "paddle/pten/infermeta/multiary.h"
#include "paddle/pten/infermeta/nullary.h"
#include "paddle/pten/infermeta/unary.h"
#include "paddle/pten/kernels/declarations.h"
"""
def api_register():
return """
PT_REGISTER_API(Math);
"""
def api_namespace():
return ("""
namespace paddle {
namespace experimental {
""", """
} // namespace experimental
} // namespace paddle
""")
def generate_api(api_yaml_path, header_file_path, source_file_path):
with open(api_yaml_path, 'r') as f:
apis = yaml.load(f, Loader=yaml.FullLoader)
header_file = open(header_file_path, 'w')
source_file = open(source_file_path, 'w')
namespace = api_namespace()
header_file.write("#pragma once\n")
header_file.write(header_include())
header_file.write(namespace[0])
include_header_file = "paddle/pten/api/include/api.h"
source_file.write(source_include(include_header_file))
source_file.write(namespace[0])
for api in apis:
api_code = ForwardAPI(api)
print(api_code.gene_api_declaration())
header_file.write(api_code.gene_api_declaration())
source_file.write(api_code.gene_api_code())
header_file.write(namespace[1])
source_file.write(namespace[1])
source_file.write(api_register())
header_file.close()
source_file.close()
def main():
parser = argparse.ArgumentParser(
description='Generate PaddlePaddle C++ API files')
parser.add_argument(
'--api_yaml_path',
help='path to api yaml file',
default='python/paddle/utils/code_gen/api.yaml')
parser.add_argument(
'--api_header_path',
help='output of generated api header code file',
default='paddle/pten/api/include/api.h')
parser.add_argument(
'--api_source_path',
help='output of generated api source code file',
default='paddle/pten/api/lib/api.cc')
options = parser.parse_args()
api_yaml_path = options.api_yaml_path
header_file_path = options.api_header_path
source_file_path = options.api_source_path
generate_api(api_yaml_path, header_file_path, source_file_path)
if __name__ == '__main__':
main()
``` |
{
"source": "joequant/algobroker",
"score": 2
} |
#### File: algobroker/algobroker/broker_bitmex.py
```python
import my_path
import pprint
import algobroker
from algobroker import Broker
from cryptoexchange import bitmex
import sys
import traceback
class BrokerBitmex(Broker):
def __init__(self):
Broker.__init__(self, "broker_bitmex")
self.api = None
def process_data(self, data):
if self.api == None:
self.error("keys not initialized")
self.error(pprint.pformat(data))
return
cmd = data.get('cmd', "None")
self.debug("processing data command %s" % cmd)
if cmd == 'order':
self.api.place_order(data.get('quantity', None),
data.get('symbol', None),
data.get('price', None))
elif cmd == 'cancel':
self.debug("cancelling order")
self.api.cancel(data.get('orderID', None))
self.debug("orders cancelled")
elif cmd == 'cancel_all':
self.debug("getting order list")
orders = self.api.open_orders()
self.debug("cancelling orders")
for i in orders:
self.api.cancel(i.get('orderID', None))
self.debug("orders cancelled")
elif cmd == "report_all":
orders = self.api.open_orders()
self.info(pprint.pformat(data))
elif cmd == 'position':
self.info(pprint.pformat(self.api.position()))
else:
raise RuntimeError("unknown data %s" % cmd)
def process_control(self, data):
if algobroker.Broker.process_control(self, data):
return True
cmd = data.get("cmd", "None")
if cmd == "auth":
self.info("received auth message")
base_url = data.get('base_url', None)
login = data.get('login', None)
password = data.get('password', None)
otpToken = data.get('otpToken', None)
apiKey = data.get('apiKey', None)
apiSecret = data.get('apiSecret', None)
orderIDPrefix = data.get('orderIDPrefix', 'algo_bitmex_')
self.api = bitmex.BitMEX(base_url,
login,
password,
otpToken,
apiKey,
apiSecret,
orderIDPrefix)
try:
self.api.authenticate()
self.debug("get positions")
self.debug(pprint.pformat(self.api.position()))
except:
self.error("Authentication error")
self.api = None
else:
raise RuntimeError("unknown command %s" % cmd)
if __name__ == "__main__":
bp = BrokerBitmex()
bp.run()
```
#### File: algobroker/algobroker/broker_twilio.py
```python
import my_path
import pprint
import algobroker
from algobroker import Broker
import twilio
class BrokerTwilio(Broker):
def __init__(self):
Broker.__init__(self, "broker_twilio")
self.api = None
self.src_number = None
self.dst_number = None
def process_data(self, data):
if self.api == None or \
self.src_number == None or \
self.dst_number == None:
self.error("keys not initialized")
if (data['cmd'] == "alert" and
data['type'] == 'sms'):
params = {
'from': self.src_number,
'to': self.dst_number[data['to']],
'body': data['text'],
}
self.debug(pprint.pformat(params))
response = self.api.messages.create(**params)
self.debug(pprint.pformat(str(response)))
else:
self.error("unknown item")
self.error(pprint.pformat(data))
def process_control(self, data):
algobroker.Broker.process_control(self, data)
if data.get('cmd', None) == "auth":
self.auth_id = data['TWILIO_AUTH_ID']
self.auth_token = data['TWILIO_AUTH_TOKEN']
self.api = twilio.rest.TwilioRestClient(self.auth_id,
self.auth_token)
self.src_number = data['src_number']
self.dst_number = data['dst_number']
if __name__ == "__main__":
bp = BrokerTwilio()
bp.run()
``` |
{
"source": "joequant/bitquant-local",
"score": 3
} |
#### File: ipython/examples/blackscholes.py
```python
from scipy import stats
import math
def date_fraction(start, to):
import calendar
from datetime import datetime
(year, month) = to.split("-")
(weekday, lastday) = calendar.monthrange(int(year), int(month))
return abs((datetime(int(year), int(month), lastday)-datetime.strptime(start, "%Y-%m-%d")).days) / 365.0
def black_scholes (cp, s, k, t, v, rf, div):
""" Price an option using the Black-Scholes model.
s: initial stock price
k: strike price
t: expiration time
v: volatility
rf: risk-free rate
div: dividend
cp: +1/-1 for call/put
"""
if (s <= 0.0):
s = 0.0
if (t == 0.0 or v==0.0 or s==0):
return max(0.0, cp * (s-k))
d1 = (math.log(s/k)+(rf-div+0.5*math.pow(v,2))*t)/(v*math.sqrt(t))
d2 = d1 - v*math.sqrt(t)
optprice = (cp*s*math.exp(-div*t)*stats.norm.cdf(cp*d1)) - (cp*k*math.exp(-rf*t)*stats.norm.cdf(cp*d2))
return optprice
if __name__ == "__main__":
print(date_fraction("2015-07-02", "2015-09"))
# In[ ]:
``` |
{
"source": "joequant/Fudge-Python",
"score": 2
} |
#### File: Fudge-Python/fudgemsg/codecs.py
```python
import struct
from fudgemsg.types import INDICATOR
from fudgemsg import utils
def enc_indicator(val=None):
"""Encode a Fudge Indicator Type.
This is a null-length type."""
return ''
def enc_bool(val):
"""Encode a boolean as either a \0x00 or \0x01"""
# No '?' decode format in py2.4
if val:
return '\x01'
return '\x00'
def enc_byte(val):
"""encode a single unsignd byte"""
return struct.pack('!B', val)
def enc_short(val):
"""Encode a single signed int16"""
return struct.pack("!h", val)
def enc_int(val):
"""Encode a single signed int32"""
assert val >= utils.MIN_INT or val <= utils.MAX_INT
return struct.pack("!l", val)
def enc_long(val):
"""Encode a single signed int64"""
return struct.pack("!q", val)
def enc_float(val):
"""Encode a single float"""
return struct.pack("!f", val)
def enc_double(val):
"""Encode a single double"""
return struct.pack("!d", val)
def enc_unicode(val):
"""encode a single unicode string"""
utf8 = val.encode("utf-8")
fmt = "!%ss"% len(utf8)
return struct.pack(fmt, utf8)
def enc_str(val):
"""Encode a non-unicode string i.e a byte[]."""
return str(val)
def _unpack(fmt, encoded):
"""A simple wrapper around struct.unpack
Arguments:
fmt: The format string (see struct docs for details)
encoded: The encoded array"""
length = struct.calcsize(fmt)
return struct.unpack(fmt, encoded[:length])[0]
def dec_indicator(encoded):
"""decode a Fudge Indicator.
Returns:
A Singleton INDICATOR object"""
return INDICATOR
def dec_bool(encoded):
"""Decode a single boolean"""
# No '?' decode format in py2.4
if encoded[0] != '\x00':
return True
else:
return False
def dec_byte(encoded):
"""Decode a single unsigned byte"""
i = _unpack('!B', encoded)
return i % 2**8
def dec_short(encoded):
"""Decode a single signed short"""
i = _unpack('!h', encoded)
return (i + 2**15) % 2**16 - 2**15
def dec_int(encoded):
"""Decode a single signed int"""
i = _unpack('!l', encoded)
return (i + 2**31) % 2**32 - 2**31
def dec_long(encoded):
"""Decode a single signed long"""
i = _unpack('!q', encoded)
return (i + 2**63) % 2**64 - 2**63
def dec_float(encoded):
"""Decode a single signed float"""
return _unpack('!f', encoded)
def dec_double(encoded):
"""Decode a single signed double"""
return _unpack('!d', encoded)
def dec_unicode(encoded):
"""Decode a single unicode string"""
fmt = '!%ss'% len(encoded)
utf8 = struct.unpack(fmt, encoded)[0]
return unicode(utf8, "utf-8")
def dec_str(encoded):
"""Decode a non-unicode string i.e. byte[].
"""
return str(encoded)
# Header helpers
def enc_name(encoded):
"""encode a single name string"""
return struct.pack("!B", len(encoded)) + encoded
def dec_name(encoded):
"""Decode a name from field prefix string"""
length = ord(encoded[0])
return unicode(encoded[1:length+1])
# Arrays
def enc_array(encode_fn, encoded):
"""Encode an array, usually of numbers. We use a type \
specific encode function"""
# TODO(jamesc) - Slow but correct...
out = ''
for val in encoded:
out = out + encode_fn(val)
return out
def dec_array(decode_fn, width, encoded):
assert len(encoded)%width == 0
out = []
num_elements = len(encoded)/width
for val in range(0, num_elements):
out.append(decode_fn(encoded[val*width:val*width+width]))
return out
```
#### File: tests/func_tests/test_simpletest.py
```python
import unittest
import cStringIO
from fudgemsg.message import Message, Envelope
from fudgemsg import registry
from fudgemsg import types
from fudgemsg import utils
from nose.plugins.skip import SkipTest
MY_NAME = u"Random Person"
ADDRESS = [u"123 Fake Street", u"Some City",
u"P0S T4L", u"Country"]
class TestSimpleTest(unittest.TestCase):
def test_simpletest(self):
"""Equivalent to the examples/simpletest"""
MSG_TYPE = registry.DEFAULT_REGISTRY[types.FUDGEMSG_TYPE_ID]
message = Message()
message.add(MY_NAME, name=u"name")
message.add(19801231L, ordinal=4, name=u"dob" )
submsg = Message()
for line, ordinal in zip(ADDRESS, range(len(ADDRESS))):
submsg.add(line, ordinal=ordinal)
message.add(submsg, name=u"address")
e = Envelope(message)
writer = cStringIO.StringIO()
e.encode(writer)
bytes = writer.getvalue()
self.assertEquals(110, len(bytes))
returned = Envelope.decode(bytes)
self.assertEquals(0, returned.schema_version)
self.assertEquals(0, returned.directives)
returned_message = returned.message
self.assertEquals(3, len(returned_message.fields))
f0 = returned_message.fields[0]
self.assertEquals(u'name', f0.name)
self.assertEquals(None, f0.ordinal)
self.assertEquals(MY_NAME, f0.value)
f1 = returned_message.fields[1]
self.assertEquals(u'dob', f1.name)
self.assertEquals(4, f1.ordinal)
self.assertEquals(19801231L, f1.value)
submsg = returned_message.fields[2]
self.assertEquals(u'address', submsg.name)
self.assertEquals(None, submsg.ordinal)
self.assertEquals(4, len(submsg.value.fields))
```
#### File: fudgemsg/tests/test_message.py
```python
import unittest
import cStringIO
from fudgemsg.message import Envelope, Message
from fudgemsg.types import INDICATOR
from nose.plugins.skip import SkipTest
class messageTests(unittest.TestCase):
def setUp(self):
self._output = cStringIO.StringIO()
def assertOutput(self, encoded):
self.assertEquals(encoded, self._output.getvalue())
def test_empty_envelope(self):
empty = Envelope(Message())
empty.encode(self._output)
self.assertOutput('\x00\x00\x00\x00\x00\x00\x00\x08')
def test_simple_message(self):
"""A Very simple message - a single indicator field"""
message = Message()
message.encode(self._output)
self.assertOutput('')
self._output.reset()
message.add(INDICATOR)
message.encode(self._output)
self.assertOutput('\x80\x00')
m = Message.decode('\x80\x00')
self.assertEquals(1, len(m.fields))
f = m.fields[0]
self.assertEquals(INDICATOR, f.value)
def test_message_with_multi_fields(self):
"""Check the encoding of a message with a few fields as
a message .
"""
encoded_mess = '\x80\x00\x90\x00\x00\x02\x80\x01\x01'
encoded_env = '\x00\x00\x00\x00\x00\x00\x00\x11' + encoded_mess
message = Message()
message.add(INDICATOR)
message.add(INDICATOR, ordinal=2)
message.add(True, classname='bool')
message.encode(self._output)
self.assertOutput(encoded_mess)
m = Message.decode(encoded_mess)
self.assertEquals(3, len(m.fields))
def test_envelope_with_multi_fields(self):
"""Check the encoding of a message with a few fields both as
an envelope.
"""
encoded_mess = '\x80\x00\x90\x00\x00\x02\x80\x01\x01'
encoded_env = '\x00\x00\x00\x00\x00\x00\x00\x11' + encoded_mess
message = Message()
message.add(INDICATOR)
message.add(INDICATOR, ordinal=2)
message.add(True, classname='bool')
e = Envelope(message)
e.encode(self._output)
self.assertOutput(encoded_env)
def test_simpletest_strings_submsg(self):
"""Test we can/encode the address submsg bit
of simpletest"""
address = [u'123 Fake Street', u'Some City',
u'P0S T4L', u'Country']
message = Message()
for line, ordinal in zip(address, range(len(address))):
message.add(line, ordinal=ordinal)
writer = cStringIO.StringIO()
message.encode(writer)
bytes = writer.getvalue()
m = Message.decode(bytes)
```
#### File: fudgemsg/tests/test_prefix.py
```python
import unittest
from fudgemsg.prefix import *
from fudgemsg import utils
class FieldPrefixTests(unittest.TestCase):
def setUp(self):
pass
def testCreate(self):
fixedwidth, variablewidth, has_ordinal, has_name = \
decode_prefix(0x98) # fixed, ordinal, name
self.assertTrue(fixedwidth)
self.assertTrue(has_ordinal)
self.assertTrue(has_name)
self.assertEquals(0, variablewidth)
byte = encode_prefix(fixedwidth, variablewidth, has_ordinal, has_name)
self.assertEquals(0x98, byte)
def testVariableWidth(self):
"""There are 4 varwidth options"""
ZERO = 0x00 # 0000 0000
ONE = 0x20 # 0010 0000
TWO = 0x40 # 0100 0000
FOUR = 0x60 # 0110 0000
fixedwidth, variablewidth, has_ordinal, has_name = \
decode_prefix(ZERO)
self.assertFalse(fixedwidth)
self.assertEquals(0, variablewidth)
byte = encode_prefix(fixedwidth, variablewidth, has_ordinal, has_name)
self.assertEquals(ZERO, byte)
fixedwidth, variablewidth, has_ordinal, has_name = \
decode_prefix(ONE)
self.assertFalse(fixedwidth)
self.assertEquals(1, variablewidth)
byte = encode_prefix(fixedwidth, variablewidth, has_ordinal, has_name)
self.assertEquals(ONE, byte)
fixedwidth, variablewidth, has_ordinal, has_name = \
decode_prefix(TWO)
self.assertFalse(fixedwidth)
self.assertEquals(2, variablewidth)
byte = encode_prefix(fixedwidth, variablewidth, has_ordinal, has_name)
self.assertEquals(TWO, byte)
fixedwidth, variablewidth, has_ordinal, has_name = \
decode_prefix(FOUR)
self.assertFalse(fixedwidth)
self.assertEquals(4, variablewidth)
byte = encode_prefix(fixedwidth, variablewidth, has_ordinal, has_name)
self.assertEquals(FOUR, byte)
```
#### File: Fudge-Python/fudgemsg/types.py
```python
INDICATOR_TYPE_ID = 0
BOOLEAN_TYPE_ID = 1
BYTE_TYPE_ID = 2
SHORT_TYPE_ID = 3
INT_TYPE_ID = 4
LONG_TYPE_ID = 5
BYTEARRAY_TYPE_ID = 6
SHORTARRAY_TYPE_ID = 7
INTARRAY_TYPE_ID = 8
LONGARRAY_TYPE_ID = 9
FLOAT_TYPE_ID = 10
DOUBLE_TYPE_ID = 11
FLOATARRAY_TYPE_ID = 12
DOUBLEARRAY_TYPE_ID = 13
STRING_TYPE_ID = 14
FUDGEMSG_TYPE_ID = 15
# No 16
BYTEARRAY4_TYPE_ID = 17
BYTEARRAY8_TYPE_ID = 18
BYTEARRAY16_TYPE_ID = 19
BYTEARRAY20_TYPE_ID = 20
BYTEARRAY32_TYPE_ID = 21
BYTEARRAY64_TYPE_ID = 22
BYTEARRAY128_TYPE_ID = 23
BYTEARRAY256_TYPE_ID = 24
BYTEARRAY512_TYPE_ID = 25
FUDGE_TYPE_NAMES = {
0 : "indicator",
1 : "boolean",
2 : "byte",
3 : "short",
4 : "int",
5 : "long",
6 : "byte[]",
SHORTARRAY_TYPE_ID : "short[]",
INTARRAY_TYPE_ID : "int[]",
LONGARRAY_TYPE_ID : "long[]",
FLOAT_TYPE_ID : "float",
DOUBLE_TYPE_ID : "double",
FLOATARRAY_TYPE_ID : "float[]",
DOUBLEARRAY_TYPE_ID :"double[]",
STRING_TYPE_ID : "string",
FUDGEMSG_TYPE_ID : "message",
BYTEARRAY4_TYPE_ID : "byte[4]",
BYTEARRAY8_TYPE_ID : "byte[8]",
BYTEARRAY16_TYPE_ID : "byte[16]",
BYTEARRAY20_TYPE_ID : "byte[20]",
BYTEARRAY32_TYPE_ID : "byte[32]",
BYTEARRAY64_TYPE_ID : "byte[64]",
BYTEARRAY128_TYPE_ID : "byte[128]",
BYTEARRAY256_TYPE_ID : "byte[256]",
BYTEARRAY512_TYPE_ID : "byte[512]",
}
def size_unicode(arg):
"""Calculate the size of a unicode string"""
return len(arg.encode('utf-8'))
def size_str(arg):
"""Return the size of a bytestring"""
return len(arg)
class Indicator(object):
"""A instance of a Fudge Indicator object.
This is a zero-length type, and we normally just return
this singleton instance."""
def __repr__(self):
return "Indicator()"
INDICATOR = Indicator()
```
#### File: fudgemsg/utils/hexdump.py
```python
import sys
def ascii(val):
if 32 <= val <= 126:
return chr(val)
else:
return '.'
ASC_MAP = [ ascii(x) for x in range(256) ]
class HexPrinter(object):
"""Print a Fudge Message as a byte array.
This basically isn't aware of any Fudge Structure, just
does a hexdump"""
def __init__(self, writer=sys.stdout, width=16):
"""Create a new HexPrinter
Arguments:
writer: the writer stream to output to
width: how many bytes per line (Default:16)
"""
self._writer = writer
self._width = width
def format(self, message):
"""Output a formatted message to the underlying writer
Arguments:
message: the message to write
"""
hexwidth = 3 * self._width + 1
start = 0
while start < len(message):
end = start + self._width
if end > len(message):
end = len(message)
line = message[start:end]
hex_line = [ "%02x" % ord(val)+' ' for val in line]
# Add middle space
hex_line.insert(self._width/2, ' ')
asc_line = [ ASC_MAP[ord(val)] for val in line]
self._writer.write("%08x %-*s |%s|\n" %
(start, hexwidth, ''.join(hex_line), ''.join(asc_line)))
start += self._width
self._writer.write("%08x\n" % len(message))
```
#### File: fudgemsg/utils/render.py
```python
import sys
from cStringIO import StringIO
from fudge import types
def max_len(fields):
mlen = 0
for field in fields:
if len(field) > mlen:
mlen = len(field)
return mlen
class PrettyPrinter(object):
"""A PrettyPrinter for Fudge messages.
Based on the Java FudgeMsgFormatter.
"""
DEFAULT_INDENT = 2
def __init__(self, writer = sys.stdout, indent = DEFAULT_INDENT):
"""Create a new PrettyPrinter
Arguments:
writer: the writer stream to output to
indent: how much to indent a sub-message (Default:2)
"""
self._writer = writer
self._indent = indent
def format(self, message, depth=0):
"""Output a formatted message to the underlying writer
Arguments:
message: the message to write
depth: The depth of this message/sub-message (Default:0)
"""
if not message.fields:
return
num_fields = len(message.fields)
fieldspecs = []
for field, index in zip(message.fields, range(num_fields)):
fieldspec = self._get_fieldspec(field, index, depth)
fieldspecs.append(fieldspec)
max_fieldspec_width = max_len(fieldspecs)
typenames = [x.type_.name() for x in message.fields]
max_typename_width = max_len(typenames)
for field, fieldspec in zip(message.fields, fieldspecs) :
self._format_field(field, fieldspec, depth, \
max_fieldspec_width, max_typename_width)
def _format_field(self, field, fieldspec, depth, max_fs, max_tn):
"""Format a single field on a line"""
typename = field.type_.name()
self._writer.write("%-*s %-*s "%(max_fs, fieldspec, max_tn, typename) )
if field.is_type(types.FUDGEMSG_TYPE_ID):
self._writer.write('\n')
self.format(field.value, depth + 1)
else:
self._write_typed_value(field.type_, field.value)
self._writer.write('\n')
self._writer.flush()
def _get_fieldspec(self, field, index, depth):
"""Create a string representation of a Field specification header.
Arguments:
field : The field
index : Index within the current message of this field
depth : Depth of current message
Return:
Formatted string representation of the Field header
"""
buf = StringIO()
buf.write(' ' * self._indent * depth)
buf.write(str(index))
buf.write('-')
if field.ordinal is not None:
buf.write('(%s)'%field.ordinal)
if field.name:
buf.write(' ')
if field.name:
buf.write(field.name)
return buf.getvalue()
def _output_array(self, value, truncate=8):
num_elements = len(value)
if truncate > num_elements:
truncate = num_elements
self._writer.write('[')
self._writer.write(', '.join(str(x) for x in value[:truncate]))
if truncate < num_elements:
self._writer.write(" ... %d more"%(num_elements - truncate))
self._writer.write(']')
def _write_typed_value(self, type_, value):
renderers = {
types.SHORTARRAY_TYPE_ID : self._output_array,
types.FLOATARRAY_TYPE_ID : self._output_array,
types.DOUBLEARRAY_TYPE_ID : self._output_array,
types.INTARRAY_TYPE_ID : self._output_array,
types.LONGARRAY_TYPE_ID : self._output_array,
types.BYTEARRAY_TYPE_ID : self._output_array,
types.BYTEARRAY4_TYPE_ID : self._output_array,
types.BYTEARRAY8_TYPE_ID : self._output_array,
types.BYTEARRAY16_TYPE_ID : self._output_array,
types.BYTEARRAY20_TYPE_ID : self._output_array,
types.BYTEARRAY32_TYPE_ID : self._output_array,
types.BYTEARRAY64_TYPE_ID : self._output_array,
types.BYTEARRAY128_TYPE_ID : self._output_array,
types.BYTEARRAY256_TYPE_ID : self._output_array,
types.BYTEARRAY512_TYPE_ID : self._output_array,
}
try :
renderers[type_.type_id](value)
except KeyError:
self._writer.write(str(value))
``` |
{
"source": "joequant/libact",
"score": 3
} |
#### File: multilabel/tests/test_binary_relevance.py
```python
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from sklearn import datasets
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
import sklearn.linear_model
from libact.base.dataset import Dataset
from libact.models import LogisticRegression
from libact.models.multilabel import BinaryRelevance
class BinaryRelevanceTestCase(unittest.TestCase):
def setUp(self):
X, Y = datasets.make_multilabel_classification(random_state=1126)
self.X_train, self.X_test, self.Y_train, self.Y_test = \
train_test_split(X, Y, test_size=0.3, random_state=1126)
def test_binary_relevance_lr(self):
br = BinaryRelevance(base_clf=LogisticRegression(random_state=1126))
br.train(Dataset(self.X_train, self.Y_train))
br_pred_train = br.predict(self.X_train).astype(int)
br_pred_test = br.predict(self.X_test).astype(int)
br_pred_proba_train = br.predict_proba(self.X_train).astype(float)
br_pred_proba_test = br.predict_proba(self.X_test).astype(float)
for i in range(np.shape(self.Y_train)[1]):
clf = sklearn.linear_model.LogisticRegression(random_state=1126)
clf.fit(self.X_train, self.Y_train[:, i])
assert_array_equal(clf.predict(self.X_train).astype(int),
br_pred_train[:, i])
assert_array_equal(clf.predict(self.X_test).astype(int),
br_pred_test[:, i])
assert_array_equal(clf.predict_proba(self.X_train)[:, 1].astype(float),
br_pred_proba_train[:, i])
assert_array_equal(clf.predict_proba(self.X_test)[:, 1].astype(float),
br_pred_proba_test[:, i])
self.assertEqual(
np.mean(np.abs(self.Y_test - br_pred_test).mean(axis=1)),
br.score(Dataset(self.X_test, self.Y_test), 'hamming'))
self.assertRaises(NotImplementedError,
lambda: br.score(Dataset(self.X_test, self.Y_test),
criterion='not_exist'))
def test_binary_relevance_parallel(self):
br = BinaryRelevance(base_clf=LogisticRegression(random_state=1126),
n_jobs=1)
br.train(Dataset(self.X_train, self.Y_train))
br_par = BinaryRelevance(
base_clf=LogisticRegression(random_state=1126), n_jobs=2)
br_par.train(Dataset(self.X_train, self.Y_train))
assert_array_equal(br.predict(self.X_test).astype(int),
br_par.predict(self.X_test).astype(int))
if __name__ == '__main__':
unittest.main()
```
#### File: query_strategies/tests/test_variance_reduction.py
```python
import unittest
from numpy.testing import assert_array_equal
import numpy as np
from libact.base.dataset import Dataset
from libact.models import LogisticRegression
from libact.query_strategies import VarianceReduction
from .utils import run_qs
class VarianceReductionTestCase(unittest.TestCase):
"""Variance reduction test case using artifitial dataset"""
def setUp(self):
self.X = [[-2, -1], [1, 1], [-1, -2], [-1, -1], [1, 2], [2, 1]]
self.y = [0, 1, 0, 1, 0, 1]
self.quota = 4
def test_variance_reduction(self):
trn_ds = Dataset(self.X,
np.concatenate([self.y[:2],
[None] * (len(self.y) - 2)]))
qs = VarianceReduction(trn_ds, model=LogisticRegression(), sigma=0.1)
qseq = run_qs(trn_ds, qs, self.y, self.quota)
assert_array_equal(qseq, np.array([4, 5, 2, 3]))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joequant/metakernel",
"score": 3
} |
#### File: metakernel/magics/edit_magic.py
```python
from metakernel import Magic, option
import os
class EditMagic(Magic):
def line_edit(self, filename):
"""
%edit FILENAME - load code from filename into next cell for editing
This line magic will open the file in the next cell, and allow
you edit it.
This is a shortcut for %load, and appending a "%%file" as first line.
Example:
%edit myprogram.ss
"""
orig_filename = filename
if filename.startswith("~"):
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
text = open(filename).read()
self.kernel.payload.append({"source": "set_next_input",
"text": "%%file " + orig_filename + "\n" + text})
def register_magics(kernel):
kernel.register_magics(EditMagic)
``` |
{
"source": "joequant/sptrader",
"score": 2
} |
#### File: sptrader/scripts/webui.py
```python
import os
import sys
import time
import threading
import json
import errno
import datetime
import pkg_resources
import tendo.singleton
from queue import Queue
from flask import Flask, Response, jsonify, request, abort
#me = tendo.singleton.SingleInstance()
location = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(location, "..", "sptrader"))
sys.path.insert(0, os.path.join(location, ".."))
data_dir = os.path.join(location, "..", "data")
#blacklist backtrader version
btversion = pkg_resources.get_distribution("backtrader").version
if btversion == "172.16.17.32":
print("WARNING!!!!")
print("BACKTRADER 172.16.17.32 is BROKEN with sptrader")
from sse import ServerSentEvent
import sptrader
import strategy
import strategy.strategylist
try:
os.makedirs(data_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
ticker_file = os.path.join(data_dir, "ticker-%s.txt")
VERSION="0.11.1"
def get_ticker(s):
'''Get ticker file'''
if "/" in s:
raise ValueError
return ticker_file % s
sp = sptrader.SPTrader()
log_subscriptions = []
empty_cache = {"connected": {},
"account_info": None}
info_cache = empty_cache
ticker_products = set()
app = Flask(__name__,
static_url_path="/static",
static_folder=os.path.join(location, "..",
"static"))
@app.route("/")
def hello():
'''Main application file'''
return app.send_static_file("sptrader.html")
def send_dict(event_id, msg):
'''Send dictionary as event'''
for sub in log_subscriptions[:]:
sub.put((event_id, msg))
class Config(object):
'''Load and save config'''
def __init__(self):
self.config = {}
self.config['strategy_data'] = {}
self.config['backtest_data'] = {}
try:
with open(os.path.join(data_dir, "config.json")) as fp:
self.config.update(json.load(fp))
except IOError as error:
pass
for r in ['strategy_data']:
for k in self.config[r]:
self.config[r][k]['status'] = "stopped"
self.config[r][k]['comment'] = ""
send_dict("LocalStrategyStatus",
{"strategy": self.config[r][k]['strategy'],
"id": self.config[r][k]['id'],
"status": "stopped",
"comment": ""})
def save(self):
with open(os.path.join(data_dir, "config.json"), 'w') as fp:
json.dump(self.config, fp, indent=4, sort_keys=True)
def get(self, s):
return self.config.get(s, None)
def set(self, s, v, save=True):
self.config[s] = v
if save:
self.save()
def save_stratdata(self, root, kwargs, save=True):
sid = kwargs['id']
self.config[root][sid] = kwargs
if save:
self.save()
def get_stratdata_by_strategy(self, root):
d = {}
for k, v in self.config[root].items():
if v['strategy'] not in d:
d[v['strategy']] = []
d[v['strategy']].append(v)
for k in strategy.strategy_list():
if k not in d:
d[k] = []
return d
my_config = Config()
def send_cdata(event_id, data):
send_dict(event_id, {"data": sp.cdata_to_py(data[0])})
@app.route("/login-info")
def logininfo():
d = {"status": "%d" % sp.get_login_status(80)}
if my_config.get('logininfo') is not None:
d['info'] = my_config.get('logininfo')
if info_cache['connected'] is not None:
d['connected'] = info_cache['connected']
if info_cache['account_info'] is not None:
d['account_info'] = info_cache['account_info']
d['account_fields'] = sp.fields("SPApiAccInfo")
d['strategy_list'] = strategy.strategy_list()
d['strategy_headers'] = {}
for i in strategy.strategy_list():
d['strategy_headers'][i] = strategy.headers(i)
d['strategy_data'] = my_config.get_stratdata_by_strategy('strategy_data')
d['backtest_data'] = my_config.get_stratdata_by_strategy('backtest_data')
d['versions'] = [
["web-backend", VERSION],
["backtrader", btversion]
]
return jsonify(d)
@sp.ffi.callback("LoginReplyAddr")
def login_reply(ret_code, ret_msg):
if ret_code == 0:
ret_msg = ''
else:
ret_msg = sp.ffi.string(ret_msg).decode('utf-8')
send_dict("LoginReply", {
"ret_code": ret_code,
"ret_msg": ret_msg
})
sp.register_login_reply(login_reply)
@sp.ffi.callback("ConnectedReplyAddr")
def connected_reply(host_type, con_status):
send_dict("ConnectedReply", {
"host_type": host_type,
"con_status": con_status
})
info_cache['connected'][host_type] = con_status
if host_type == 83 and con_status == 2:
sp.register_ticker_update(ticker_update)
for p in ticker_products:
sp.subscribe_ticker(p, 1)
sp.register_connecting_reply(connected_reply)
@sp.ffi.callback("ApiOrderRequestFailedAddr")
def order_request_failed(action, order, error_code, error_msg):
send_dict("OrderRequestFailed", {
"action": ord(action),
"data": sp.cdata_to_py(order[0]),
"error_code": error_code,
"error_msg": sp.ffi.string(error_msg).decode('utf-8')})
sp.register_order_request_failed(order_request_failed)
@sp.ffi.callback("ApiOrderReportAddr")
def order_report(rec_no, data):
send_dict("OrderReport", {
"rec_no": rec_no,
"data": sp.cdata_to_py(data[0])
})
sp.register_order_report(order_report)
@sp.ffi.callback("ApiOrderBeforeSendReportAddr")
def api_order_before_send_report(data):
send_cdata("OrderBeforeSendReport", data)
sp.register_order_before_send_report(api_order_before_send_report)
@sp.ffi.callback("AccountLoginReplyAddr")
def account_login_reply(accNo, ret_code, ret_msg):
send_dict("AccountLoginReply", {
"accNo": accNo,
"ret_code": ret_code,
"ret_msg": ret_msg
})
sp.register_account_login_reply(account_login_reply)
@sp.ffi.callback("AccountLogoutReplyAddr")
def account_logout_reply(ret_code, ret_msg):
send_dict("AccountLogoutReply", {
"ret_code": ret_code,
"ret_msg": ret_msg
})
sp.register_account_logout_reply(account_logout_reply)
@sp.ffi.callback("AccountInfoPushAddr")
def account_info_push(data):
info_cache['account_info'] = sp.cdata_to_py(data[0])
send_cdata("AccountInfoPush", data)
sp.register_account_info_push(account_info_push)
@sp.ffi.callback("AccountPositionPushAddr")
def account_position_push(data):
send_cdata("AccountPositionPush", data)
sp.register_account_position_push(account_position_push)
@sp.ffi.callback("UpdatedAccountPositionPushAddr")
def updated_account_position_push(data):
send_cdata("UpdatedAccountPositionPush", data)
sp.register_updated_account_position_push(updated_account_position_push)
@sp.ffi.callback("UpdatedAccountBalancePushAddr")
def updated_account_balance_push(data):
send_cdata("UpdatedAccountBalancePush", data)
sp.register_updated_account_balance_push(updated_account_balance_push)
@sp.ffi.callback("ApiTradeReportAddr")
def trade_report(rec_no, data):
send_cdata("TradeReport", data)
sp.register_trade_report(trade_report)
@sp.ffi.callback("ApiPriceUpdateAddr")
def api_price_update(data):
send_cdata("PriceUpdate", data)
sp.register_price_update(api_price_update)
@sp.ffi.callback("ApiTickerUpdateAddr")
def api_ticker_update(data):
send_cdata("TickerUpdate", data)
sp.register_ticker_update(api_ticker_update)
@sp.ffi.callback("PswChangeReplyAddr")
def psw_change_reply(ret_code, ret_msg):
send_cdata("PswChangeReply", {"ret_code": ret_code,
"ret_msg": ret_msg})
sp.register_psw_change_reply(psw_change_reply)
@sp.ffi.callback("ProductListByCodeReplyAddr")
def product_list_by_code(inst_code, is_ready, ret_msg):
data = {
"inst_code": inst_code,
"is_ready": is_ready,
"ret_msg": ret_msg,
"data": sp.get_product()}
send_dict("ProductListByCodeReply", data)
sp.register_product_list_by_code_reply(product_list_by_code)
@sp.ffi.callback("InstrumentListReplyAddr")
def instrument_list_reply(is_ready, ret_msg):
data = {"is_ready": is_ready,
"ret_msg": ret_msg,
"data": sp.get_instrument()}
send_dict("InstrumentListReply", data)
sp.register_instrument_list_reply(instrument_list_reply)
@sp.ffi.callback("BusinessDateReplyAddr")
def business_date_reply(business_date):
send_dict("BusinessDateReply", {
"business_date": business_date
})
sp.register_business_date_reply(business_date_reply)
@sp.ffi.callback("ApiMMOrderBeforeSendReportAddr")
def api_mm_order_before_send_report(mm_order):
send_cdata("MMOrderBeforeSendReport", mm_order)
sp.register_mm_order_before_send_report(api_mm_order_before_send_report)
@sp.ffi.callback("ApiMMOrderRequestFailedAddr")
def api_mm_order_request_failed(mm_order, err_code, err_msg):
send_dict("MMOrderRequestFailed",
{"data": sp.cdata_to_py(mm_order[0]),
"err_code": sp.cdata_to_py(err_code),
"err_msg": sp.cdata_to_py(err_msg)})
sp.register_mm_order_request_failed(api_mm_order_request_failed)
@sp.ffi.callback("ApiQuoteRequestReceivedAddr")
def quote_request_received(product_code, buy_sell, qty):
send_dict("QuoteRequestReceived",
{"product_code": product_code,
"buy_sell": buy_sell,
"qty": qty})
sp.register_quote_request_received_report(quote_request_received)
def monitor_file(filename, newdata=False):
try:
tickerfile = open(filename)
except FileNotFoundError:
open(filename, 'a').close()
tickerfile = open(filename)
if newdata:
tickerfile.seek(0, 2)
def gen():
try:
while True:
line = tickerfile.readline()
if not line:
time.sleep(0.1)
continue
yield line
except GeneratorExit: # Or maybe use flask signals
tickerfile.close()
return Response(gen(), mimetype="text/plain")
# -------
@app.route("/login", methods=['POST'])
def login():
if not request.form:
abort(400)
sp.set_login_info(request.form["host"],
int(request.form["port"]),
request.form["license"],
request.form["app_id"],
request.form["user_id"],
request.form["password"])
my_config.set('logininfo', {
"host": request.form['host'],
"port": int(request.form['port']),
"license": request.form['license'],
"app_id": request.form['app_id'],
"user_id": request.form['user_id']
})
return jsonify({"retval": sp.login()})
@app.route("/ping")
def ping():
msg = {
"msg": "Ping"
}
for sub in log_subscriptions[:]:
sub.put(("ping", msg))
return "OK"
@app.route("/logout")
def logout():
global info_cache
info_cache = empty_cache
sp.logout()
return "OK"
# ----------- Ticker code------
@sp.ffi.callback("ApiTickerUpdateAddr")
def ticker_update(data):
send_cdata("ApiTickerUpdate", data)
t = sp.cdata_to_py(data[0])
tickerfile = open(get_ticker(t['ProdCode']), "a")
dt = datetime.datetime.fromtimestamp(float(t['TickerTime']))
price = "%.2f" % float(t['Price'])
tickerfile.write("%s; %s; %s; %s; %s; %d\n" %
(dt.strftime("%Y/%m/%d/%H/%M/%S"),
price,
price,
price,
price,
int(t['Qty'])))
tickerfile.close()
sp.register_ticker_update(ticker_update)
@app.route("/ticker/subscribe/<string:products>")
def subscribe_ticker(products):
for p in products.split(","):
ticker_products.add(p)
if sp.ready() == 0:
sp.subscribe_ticker(p, 1)
if sp.ready() != 0:
return "NOLOGIN"
else:
send_dict("TickerUpdate",
{"data": list(ticker_products)})
return "OK"
@app.route("/ticker/unsubscribe/<string:products>")
def unsubscribe_ticker(products):
for p in products.split(","):
ticker_products.discard(p)
if sp.ready() == 0:
sp.subscribe_ticker(p, 0)
if sp.ready() != 0:
return "NOLOGIN"
else:
send_dict("TickerUpdate",
{"data": list(ticker_products)})
return "OK"
@app.route("/ticker/list")
def list_ticker():
return jsonify({"data": list(ticker_products)})
@app.route("/ticker/view/<string:product>")
def view_ticker(product):
return monitor_file(get_ticker(product))
@app.route("/ticker/clear/<string:product>")
def clear_ticker(product):
fo = open(get_ticker(product), "w")
fo.truncate()
fo.close()
return "OK"
@app.route("/account-info-push-send")
def account_info_push_send():
acc_info = sp.get_acc_info()
if acc_info is not None:
info_cache['account_info'] = acc_info
send_dict("AccountInfoPush", {"data": acc_info})
return "OK"
else:
return "NODATA"
# ----------- Strategy ------
class StrategyList(object):
def __init__(self):
self.stratlist = {}
def start(self, kwargs):
if id not in self.stratlist or \
self.stratlist[kwargs['id']][0] is None:
(p, q) = strategy.run(kwargs)
self.stratlist[kwargs['id']] = (p, q)
kwargs['status'] = 'running'
kwargs['comment'] = ''
my_config.save_stratdata('strategy_data', kwargs)
t = threading.Thread(target=strategy_listener, args=(p, q))
t.daemon = True
t.start()
send_dict("LocalStrategyStatus",
{"strategy": kwargs['strategy'],
"id": kwargs['id'],
"status": "running",
"comment": ""})
return "OK"
def stop(self, info, terminate=False):
sid = info.get('id', None)
if sid in self.stratlist:
(p, q) = self.stratlist[sid]
cache = my_config.config['strategy_data'][sid]
cache['status'] = info['status']
cache['comment'] = info['comment']
my_config.save_stratdata('strategy_data', cache)
if q is not None:
q.close()
if p is not None and terminate:
p.terminate()
p.join()
self.stratlist.pop(sid)
send_dict("LocalStrategyStatus",
{"strategy": info['strategy'],
"id": info['id'],
"status": info['status'],
"comment": info['comment']})
return "OK"
def data(self, stratname):
retval = []
for k, v in self.stratlist.items():
if v[2]['strategy'] == stratname:
retval.append(v[2])
return retval
def terminate_all(self):
for k, v in self.stratlist.items():
if v[0] is not None:
p = v[0]
p.terminate()
p.join()
def pause(self, f):
raise NotImplementedError
stratlist = StrategyList()
def strategy_listener(p, q):
try:
while True:
(s, sid, status, comment) = q.get()
info = {"strategy": s,
"id": sid,
"status": status,
"comment": comment}
if status == "error":
stratlist.stop(info, terminate=True)
return
elif status == "done":
stratlist.stop(info, terminate=False)
return
except GeneratorExit: # Or maybe use flask signals
return
@app.route("/strategy/start", methods=['POST'])
def strategy_start():
if not request.form:
abort(400)
return stratlist.start(request.form.to_dict())
@app.route("/strategy/stop", methods=['POST'])
def strategy_stop():
if not request.form:
abort(400)
f = request.form.to_dict()
f['status'] = "done"
f['comment'] = ""
return stratlist.stop(f, terminate=True)
@app.route("/strategy/remove-row/<string:sid>")
def strategy_remove(sid):
del my_config.config['strategy_data'][sid]
return "OK"
@app.route("/strategy/log/<string:strategy_id>")
def strategy_log(strategy_id):
return monitor_file(os.path.join(data_dir,
"log-%s.txt" % strategy_id))
# -----------------------------
@app.route("/backtest", methods=['POST'])
def backtest():
d = request.form.to_dict()
my_config.config['backtest_data'][d['id']] = d
my_config.save()
return strategy.backtest(d)
@app.route("/backtest/remove-row/<string:sid>")
def backtest_remove(sid):
del my_config.config['backtest_data'][sid]
my_config.save()
return "OK"
# ---------------------------
@app.route("/orders/read")
def orders_read():
pass
# ---------------------------
@app.route("/trade/list")
def list_trade():
return jsonify({"data": sp.get_all_trades()})
# -----------------
@app.route("/order/list")
def order_list():
return jsonify({"data": sp.get_all_orders()})
@app.route("/order/add", methods=['POST'])
def order_add():
if request.form:
f = request.form.to_dict()
elif request.json:
f = request.json
else:
abort(400)
inactive = bool(int(f.pop("Inactive", 0)))
if inactive:
return str(sp.order_add_inactive(f))
else:
return str(sp.order_add(f))
@app.route("/order/delete", methods=['POST'])
def order_delete():
if request.form:
f = request.form.to_dict()
elif request.json:
f = request.json
else:
abort(400)
if 'IntOrderNo' not in f or \
'ProdCode' not in f or \
'ClOrderId' not in f:
orders = sp.get_all_orders()
found = False
for o in orders:
found = True
for k, v in f.items():
if str(o[k]) != str(v):
found = False
break
if found:
fin = {}
fin['IntOrderNo'] = str(o['IntOrderNo'])
fin['ProdCode'] = str(o['ProdCode'])
fin['ClOrderId'] = str(o['ClOrderId'])
return str(sp.order_delete(o))
abort(400)
return str(sp.order_delete(f))
@app.route("/order/delete-all", methods=['POST'])
def order_delete_all():
return str(sp.order_delete_all())
@app.route("/order/activate", methods=['POST'])
def order_activate():
if request.form:
f = request.form.to_dict()
elif request.json:
f = request.json
else:
abort(400)
if 'IntOrderNo' not in f:
abort(400)
return str(sp.order_activate(f))
@app.route("/order/inactivate", methods=['POST'])
def order_inactivate():
if request.form:
f = request.form.to_dict()
elif request.json:
f = request.json
else:
abort(400)
if 'IntOrderNo' not in f:
abort(400)
return str(sp.order_inactivate(f))
@app.route("/price/subscribe/<string:products>")
def subscribe_price(products):
if sp.ready() != 0:
return "NOT READY"
for p in products.split(","):
sp.subscribe_price(p, 1)
return "OK"
@app.route("/price/unsubscribe/<string:products>")
def unsubscribe_price(products):
if sp.ready() != 0:
return "NOT READY"
for p in products.split(","):
sp.subscribe_price(p, 0)
return "OK"
@app.route("/account-info")
def get_account_info():
if sp.ready() != 0:
return "NOT READY"
sp.get_acc_bal_count()
return "OK"
# include tag so that different items get different streams
# flask seems set things up so that one subscribe is one stream
@app.route("/log/subscribe/<string:data>")
def subscribe(data):
def gen():
q = Queue()
log_subscriptions.append(q)
try:
while True:
(event_id, result) = q.get()
ev = ServerSentEvent(result, event_id)
yield ev.encode()
except GeneratorExit: # Or maybe use flask signals
log_subscriptions.remove(q)
return Response(gen(), mimetype="text/event-stream")
@app.route("/schema/<string:structure>")
def schema(structure):
return jsonify({"retval": sp.fields(structure)})
@app.route("/exit")
def exit_done():
exit()
return "OK"
if __name__ == "__main__":
try:
app.debug = True
app.run(threaded=True,use_reloader=False)
except KeyboardInterrupt:
stratlist.terminate_all()
```
#### File: sptrader/sptrader/spstrategy.py
```python
import sys # To find out the script name (in argv[0])
import logging
import backtrader as bt
import inspect
import spreport
# Create a Strategy
class SharpPointStrategy(bt.Strategy):
params = (
('log', sys.stdout),
('loglevel', logging.INFO),
('report', "debug"),
('id', None),
('tickersource', None),
('dataname', None),
('loglevel_default', logging.INFO),
('order_mode', "active")
)
headers = [
{'headerName': "Loglevel",
'field': "loglevel"},
{'headerName': "Report",
'field': "report",
'select' : ["debug", "trade"]},
{'headerName': "Order Mode",
'field': "order_mode",
'select' : ["active", "inactive"]}
]
def __init__(self):
super().__init__()
self.report = spreport.report_list[self.p.report](self)
self.set_tradehistory(True)
@classmethod
def header_list(cls):
a = []
for base_class in inspect.getmro(cls):
if issubclass(base_class, SharpPointStrategy):
a[:0] = base_class.headers
return a
def log(self, *args, dt=None, level=None):
self.report.log(*args, dt=dt, level=level)
def buy(self, **kwargs):
kwargs['Ref'] = self.p.id
if self.p.order_mode == "inactive":
kwargs['Inactive'] = 1
else:
kwargs['Inactive'] = 0
self.report.buy(kwargs)
return super().buy(**kwargs)
def sell(self, **kwargs):
kwargs['Ref'] = self.p.id
if self.p.order_mode == "inactive":
kwargs['Inactive'] = 1
else:
kwargs['Inactive'] = 0
self.report.sell(kwargs)
return super().sell(**kwargs)
def notify_order(self, order):
self.report.notify_order(order)
def notify_trade(self, trade):
self.report.notify_trade(trade)
def next(self):
raise NotImplementedError
```
#### File: sptrader/sptrader/sptrader.py
```python
from cffi import FFI
import atexit
import os
import struct
import cffi_to_py
import sys
from enum import Enum
if 8 * struct.calcsize("P") != 64:
print("sptrader only supported for 64 bit")
print("sptrader_api string needs to be checked for 32-bit")
exit
location = os.path.dirname(os.path.realpath(__file__))
dll_location = os.path.join(location, "..", "dll")
ffi = FFI()
spapi_cdef = """
typedef signed long int __int64_t;
typedef unsigned long int __uint64_t;
typedef char tinyint;
typedef unsigned char u_tinyint;
typedef unsigned char u_char;
typedef unsigned short u_short;
typedef unsigned int u_int;
typedef unsigned long u_long;
typedef long long bigint;
typedef unsigned long long u_bigint;
typedef char STR4[4];
typedef char STR16[16];
typedef char STR40[40];
typedef struct
{
int32_t Qty;
int32_t DepQty;
int32_t LongQty;
int32_t ShortQty;
double TotalAmt;
double DepTotalAmt;
double LongTotalAmt;
double ShortTotalAmt;
double PLBaseCcy;
double PL;
double ExchangeRate;
STR16 AccNo;
STR16 ProdCode;
char LongShort;
tinyint DecInPrice;
} SPApiPos;
typedef struct
{
double Price;
double StopLevel;
double UpLevel;
double UpPrice;
double DownLevel;
double DownPrice;
bigint ExtOrderNo;
int32_t IntOrderNo;
int32_t Qty;
int32_t TradedQty;
int32_t TotalQty;
int32_t ValidTime;
int32_t SchedTime;
int32_t TimeStamp;
uint32_t OrderOptions;
STR16 AccNo;
STR16 ProdCode;
STR16 Initiator;
STR16 Ref;
STR16 Ref2;
STR16 GatewayCode;
STR40 ClOrderId;
char BuySell;
char StopType;
char OpenClose;
tinyint CondType;
tinyint OrderType;
tinyint ValidType;
tinyint Status;
tinyint DecInPrice;
tinyint OrderAction;
int32_t UpdateTime;
int32_t UpdateSeqNo;
} SPApiOrder;
typedef struct
{
bigint BidExtOrderNo;
bigint AskExtOrderNo;
long BidAccOrderNo;
long AskAccOrderNo;
double BidPrice;
double AskPrice;
long BidQty;
long AskQty;
long SpecTime;
u_long OrderOptions;
STR16 ProdCode;
STR16 AccNo;
STR40 ClOrderId;
STR40 OrigClOrdId;
tinyint OrderType;
tinyint ValidType;
tinyint DecInPrice;
} SPApiMMOrder;
typedef struct
{
int32_t RecNo;
double Price;
bigint TradeNo;
bigint ExtOrderNo;
int32_t IntOrderNo;
int32_t Qty;
int32_t TradeDate;
int32_t TradeTime;
STR16 AccNo;
STR16 ProdCode;
STR16 Initiator;
STR16 Ref;
STR16 Ref2;
STR16 GatewayCode;
STR40 ClOrderId;
char BuySell;
char OpenClose;
tinyint Status;
tinyint DecInPrice;
double OrderPrice;
STR40 TradeRef;
int32_t TotalQty;
int32_t RemainingQty;
int32_t TradedQty;
double AvgTradedPrice;
} SPApiTrade;
typedef struct
{
double Margin;
double ContractSize;
STR16 MarketCode;
STR16 InstCode;
STR40 InstName;
STR40 InstName1;
STR40 InstName2;
STR4 Ccy;
char DecInPrice;
char InstType;
} SPApiInstrument;
typedef struct
{
STR16 ProdCode;
char ProdType;
STR40 ProdName;
STR16 Underlying;
STR16 InstCode;
int32_t ExpiryDate;
char CallPut;
int32_t Strike;
int32_t LotSize;
STR40 ProdName1;
STR40 ProdName2;
char OptStyle;
int32_t TickSize;
}SPApiProduct;
typedef struct
{
double Bid[20];
int32_t BidQty[20];
int32_t BidTicket[20];
double Ask[20];
int32_t AskQty[20];
int32_t AskTicket[20];
double Last[20];
int32_t LastQty[20];
int32_t LastTime[20];
double Equil;
double Open;
double High;
double Low;
double Close;
int32_t CloseDate;
double TurnoverVol;
double TurnoverAmt;
int32_t OpenInt;
STR16 ProdCode;
STR40 ProdName;
char DecInPrice;
int32_t ExStateNo;
int32_t TradeStateNo;
bool Suspend;
int32_t ExpiryYMD;
int32_t ContractYMD;
int32_t Timestamp;
} SPApiPrice;
typedef struct
{
double Price;
int32_t Qty;
int32_t TickerTime;
int32_t DealSrc;
STR16 ProdCode;
char DecInPrice;
} SPApiTicker;
typedef struct
{
double NAV;
double BuyingPower;
double CashBal;
double MarginCall;
double CommodityPL;
double LockupAmt;
double CreditLimit;
double MaxMargin;
double MaxLoanLimit;
double TradingLimit;
double RawMargin;
double IMargin;
double MMargin;
double TodayTrans;
double LoanLimit;
double TotalFee;
double LoanToMR;
double LoanToMV;
STR16 AccName;
STR4 BaseCcy;
STR16 MarginClass;
STR16 TradeClass;
STR16 ClientId;
STR16 AEId;
char AccType;
char CtrlLevel;
char Active;
char MarginPeriod;
} SPApiAccInfo;
typedef struct
{
double CashBf;
double TodayCash;
double NotYetValue;
double Unpresented;
double TodayOut;
STR4 Ccy;
} SPApiAccBal;
typedef struct
{
STR4 Ccy;
double Rate;
} SPApiCcyRate;
typedef void (SPDLLCALL *LoginReplyAddr)(long ret_code, char *ret_msg);
typedef void (SPDLLCALL *ConnectedReplyAddr)(long host_type, long con_status);
typedef void (SPDLLCALL *ApiOrderRequestFailedAddr)(tinyint action,
SPApiOrder *order, long err_code, char *err_msg);
typedef void (SPDLLCALL *ApiOrderReportAddr)(long rec_no, SPApiOrder *order);
typedef void (SPDLLCALL *ApiOrderBeforeSendReportAddr)(SPApiOrder *order);
typedef void (SPDLLCALL *AccountLoginReplyAddr)(char *accNo,
long ret_code, char* ret_msg);
typedef void (SPDLLCALL *AccountLogoutReplyAddr)(long ret_code, char* ret_msg);
typedef void (SPDLLCALL *AccountInfoPushAddr)(SPApiAccInfo *acc_info);
typedef void (SPDLLCALL *AccountPositionPushAddr)(SPApiPos *pos);
typedef void (SPDLLCALL *UpdatedAccountPositionPushAddr)(SPApiPos *pos);
typedef void (SPDLLCALL *UpdatedAccountBalancePushAddr)(SPApiAccBal *acc_bal);
typedef void (SPDLLCALL *ApiTradeReportAddr)(long rec_no, SPApiTrade *trade);
typedef void (SPDLLCALL *ApiPriceUpdateAddr)(SPApiPrice *price);
typedef void (SPDLLCALL *ApiTickerUpdateAddr)(SPApiTicker *ticker);
typedef void (SPDLLCALL *PswChangeReplyAddr)(long ret_code, char *ret_msg);
typedef void (SPDLLCALL *ProductListByCodeReplyAddr)(char *inst_code,
bool is_ready, char *ret_msg);
typedef void (SPDLLCALL *InstrumentListReplyAddr)(bool is_ready,
char *ret_msg);
typedef void (SPDLLCALL *BusinessDateReplyAddr)(long business_date);
typedef void (SPDLLCALL *ApiMMOrderBeforeSendReportAddr)
(SPApiMMOrder *mm_order);
typedef void (SPDLLCALL *ApiMMOrderRequestFailedAddr)(SPApiMMOrder *mm_order,
long err_code, char *err_msg);
typedef void (SPDLLCALL *ApiQuoteRequestReceivedAddr)(char *product_code,
char buy_sell, long qty);
void SPAPI_RegisterLoginReply(LoginReplyAddr addr);
void SPAPI_RegisterConnectingReply(ConnectedReplyAddr addr);
void SPAPI_RegisterOrderReport(ApiOrderReportAddr addr);
void SPAPI_RegisterOrderRequestFailed(ApiOrderRequestFailedAddr addr);
void SPAPI_RegisterOrderBeforeSendReport(ApiOrderBeforeSendReportAddr addr);
void SPAPI_RegisterAccountLoginReply(AccountLoginReplyAddr addr);
void SPAPI_RegisterAccountLogoutReply(AccountLogoutReplyAddr addr);
void SPAPI_RegisterAccountInfoPush(AccountInfoPushAddr addr);
void SPAPI_RegisterAccountPositionPush(AccountPositionPushAddr addr);
void
SPAPI_RegisterUpdatedAccountPositionPush(UpdatedAccountPositionPushAddr addr);
void
SPAPI_RegisterUpdatedAccountBalancePush(UpdatedAccountBalancePushAddr addr);
void SPAPI_RegisterTradeReport(ApiTradeReportAddr addr);
void SPAPI_RegisterApiPriceUpdate(ApiPriceUpdateAddr addr);
void SPAPI_RegisterTickerUpdate(ApiTickerUpdateAddr addr);
void SPAPI_RegisterPswChangeReply(PswChangeReplyAddr addr);
void SPAPI_RegisterProductListByCodeReply(ProductListByCodeReplyAddr addr);
void SPAPI_RegisterInstrumentListReply(InstrumentListReplyAddr addr);
void SPAPI_RegisterBusinessDateReply(BusinessDateReplyAddr addr);
void SPAPI_RegisterMMOrderRequestFailed(ApiMMOrderRequestFailedAddr addr);
void SPAPI_RegisterMMOrderBeforeSendReport(
ApiMMOrderBeforeSendReportAddr addr);
void SPAPI_RegisterQuoteRequestReceivedReport(
ApiQuoteRequestReceivedAddr addr);
int SPAPI_Initialize();
void SPAPI_SetLoginInfo(char *host,
int port, char *license, char *app_id, char *user_id, char *password);
int SPAPI_Login();
int SPAPI_GetLoginStatus(char *user_id, short host_id);
int SPAPI_AddOrder(SPApiOrder *order);
int SPAPI_AddInactiveOrder(SPApiOrder* order);
int SPAPI_ChangeOrder(char *user_id,
SPApiOrder* order, double org_price, long org_qty);
int SPAPI_ChangeOrderBy(char *user_id,
char *acc_no, long accOrderNo, double org_price,
long org_qty, double newPrice, long newQty);
int SPAPI_DeleteOrderBy(char *user_id,
char *acc_no, long accOrderNo, char* productCode, char* clOrderId);
int SPAPI_DeleteAllOrders(char *user_id, char *acc_no);
int SPAPI_ActivateAllOrders(char *user_id, char *acc_no);
int SPAPI_InactivateAllOrders(char *user_id, char *acc_no);
int SPAPI_ActivateOrderBy(char *user_id, char *acc_no, long accOrderNo);
int SPAPI_InactivateOrderBy(char *user_id, char *acc_no, long accOrderNo);
int SPAPI_GetOrderCount(char *user_id, char* acc_no);
int SPAPI_GetOrderByOrderNo(char *user_id, char *acc_no,
long int_order_no, SPApiOrder *order);
int SPAPI_GetPosCount(char *user_id);
int SPAPI_GetPosByProduct(char *user_id, char *prod_code, SPApiPos *pos);
void SPAPI_Uninitialize();
int SPAPI_Logout(char *user_id);
int SPAPI_AccountLogin(char *user_id, char *acc_no);
int SPAPI_AccountLogout(char *user_id, char *acc_no);
int SPAPI_GetTradeCount(char *user_id, char *acc_no);
int SPAPI_SubscribePrice(char *user_id, char *prod_code, int mode);
int SPAPI_SubscribeTicker(char *user_id, char *prod_code, int mode);
int SPAPI_ChangePassword(char *user_id, char *old_password,
char *new_password);
int SPAPI_GetDllVersion(char *dll_ver_no, char *dll_rel_no, char *dll_suffix);
int SPAPI_GetAccBalCount(char* user_id);
int SPAPI_GetAccBalByCurrency(char *user_id, char *ccy, SPApiAccBal *acc_bal);
int SPAPI_GetCcyRateByCcy(char *user_id, char *ccy, double *rate);
int SPAPI_GetAccInfo(char *user_id, SPApiAccInfo *acc_info);
int SPAPI_GetPriceByCode(char *user_id, char *prod_code, SPApiPrice *price);
int SPAPI_SetApiLogPath(char *path);
int SPAPI_LoadProductInfoListByCode(char *inst_code);
int SPAPI_GetProductCount();
int SPAPI_GetProductByCode(char *prod_code, SPApiProduct *prod);
int SPAPI_LoadInstrumentList();
int SPAPI_GetInstrumentCount();
int SPAPI_GetInstrumentByCode(char *inst_code, SPApiInstrument *inst);
int SPAPI_SetLanguageId(int langid);
int SPAPI_SendMarketMakingOrder(char *user_id, SPApiMMOrder *mm_order);
int SPAPI_SubscribeQuoteRequest(char *user_id, char *prod_code, int mode);
int SPAPI_SubscribeAllQuoteRequest(char *user_id, int mode);
int SPAPI_GetAllTradesByArray(char *user_id, char *acc_no,
SPApiTrade* apiTradeList);
int SPAPI_GetOrdersByArray(char *user_id, char *acc_no,
SPApiOrder* apiOrderList);
int SPAPI_GetAllAccBalByArray(char *user_id, SPApiAccBal* apiAccBalList);
int SPAPI_GetInstrumentByArray(SPApiInstrument* apiInstList);
int SPAPI_GetProductByArray(SPApiProduct* apiProdList);
"""
spapi = None
if os.name == "nt":
ffi.cdef(spapi_cdef.replace("SPDLLCALL", "__stdcall"))
ffi.dlopen(os.path.join(dll_location, "libeay32.dll"))
ffi.dlopen(os.path.join(dll_location, "ssleay32.dll"))
spapi = ffi.dlopen(os.path.join(dll_location, "spapidllm64.dll"))
else:
ffi.cdef(spapi_cdef.replace("SPDLLCALL", ""))
ffi.dlopen(os.path.join(dll_location, "libapiwrapper.so"),
ffi.RTLD_GLOBAL | ffi.RTLD_NOW)
spapi = ffi.dlopen(os.path.join(dll_location, "linux-shim.so"))
# Remember to convert unicode strings to byte strings otherwise
# ctypes will assume that the characters are wchars and not
# ordinary characters
class SPTrader(object):
ffi = ffi
api = spapi
ffi_conv = cffi_to_py.FfiConverter(ffi)
def __init__(self):
self.api.SPAPI_SetLanguageId(0)
self.api.SPAPI_Initialize()
self.user = None
self.acc_no = None
def ready(self):
if self.user is None:
return -1
else:
return 0
def register_login_reply(self, login_reply_func):
self.api.SPAPI_RegisterLoginReply(login_reply_func)
def register_connecting_reply(self, connected_reply_func):
self.api.SPAPI_RegisterConnectingReply(connected_reply_func)
def register_order_report(self, func):
self.api.SPAPI_RegisterOrderReport(func)
def register_order_request_failed(self, func):
self.api.SPAPI_RegisterOrderRequestFailed(func)
def register_order_before_send_report(self, func):
self.api.SPAPI_RegisterOrderBeforeSendReport(func)
def register_account_login_reply(self, func):
self.api.SPAPI_RegisterAccountLoginReply(func)
def register_account_logout_reply(self, func):
self.api.SPAPI_RegisterAccountLogoutReply(func)
def register_account_info_push(self, account_info_func):
self.api.SPAPI_RegisterAccountInfoPush(account_info_func)
def register_account_position_push(self, func):
self.api.SPAPI_RegisterAccountPositionPush(func)
def register_updated_account_position_push(self, func):
self.api.SPAPI_RegisterUpdatedAccountPositionPush(func)
def register_updated_account_balance_push(self, func):
self.api.SPAPI_RegisterUpdatedAccountBalancePush(func)
def register_trade_report(self, func):
self.api.SPAPI_RegisterTradeReport(func)
def register_price_update(self, func):
self.api.SPAPI_RegisterApiPriceUpdate(func)
def register_ticker_update(self, func):
self.api.SPAPI_RegisterTickerUpdate(func)
def register_psw_change_reply(self, func):
self.api.SPAPI_RegisterPswChangeReply(func)
def register_product_list_by_code_reply(self, func):
self.api.SPAPI_RegisterProductListByCodeReply(func)
def register_instrument_list_reply(self, func):
self.api.SPAPI_RegisterInstrumentListReply(func)
def register_business_date_reply(self, func):
self.api.SPAPI_RegisterBusinessDateReply(func)
def register_mm_order_request_failed(self, func):
self.api.SPAPI_RegisterMMOrderRequestFailed(func)
def register_mm_order_before_send_report(self, func):
self.api.SPAPI_RegisterMMOrderBeforeSendReport(func)
def register_quote_request_received_report(self, func):
self.api.SPAPI_RegisterQuoteRequestReceivedReport(func)
def load_instrument_list(self):
return self.api.SPAPI_LoadInstrumentList()
def set_login_info(self,
host,
port,
license,
app_id,
user_id,
password):
self.user = user_id.encode("utf-8")
self.acc_no = self.user
self.api.SPAPI_SetLoginInfo(host.encode("utf-8"),
port,
license.encode("utf-8"),
app_id.encode("utf-8"),
self.user,
password.encode("utf-8"))
def login(self):
return self.api.SPAPI_Login()
def get_login_status(self, status_id):
if self.user is None:
return -1
return self.api.SPAPI_GetLoginStatus(self.user, status_id)
def get_instrument_count(self):
return self.api.SPAPI_GetInstrumentCount()
def get_instrument(self):
count = self.get_instrument_count()
if count <= 0:
return []
buffer = self.ffi.new("SPApiInstrument[%d]" % (count))
if self.api.SPAPI_GetInstrumentByArray(buffer) == 0:
return self.cdata_to_py(buffer)
else:
return []
def get_product_count(self):
return self.api.SPAPI_GetInstrumentCount()
def get_product(self):
count = self.get_product_count()
if count <= 0:
return []
buffer = self.ffi.new("SPApiProduct[%d]" % (count))
if self.api.SPAPI_GetProductByArray(buffer) == 0:
return []
return self.cdata_to_py(buffer)
def get_acc_info(self):
if self.user is None:
return None
buffer = self.ffi.new("SPApiAccInfo[1]")
self.api.SPAPI_GetAccInfo(self.user, buffer)
return self.cdata_to_py(buffer[0])
def get_acc_bal_count(self):
return self.api.SPAPI_GetAccBalCount(self.user)
def get_order_count(self):
return self.api.SPAPI_GetOrderCount(self.user, self.acc_no)
def get_all_orders(self):
if self.ready() != 0:
return []
count = self.get_order_count()
if count <= 0:
return []
buffer = self.ffi.new("SPApiOrder[%d]" % (count))
if self.api.SPAPI_GetOrdersByArray(self.user,
self.acc_no,
buffer) != 0:
return []
return self.cdata_to_py(buffer)
def get_trade_count(self):
return self.api.SPAPI_GetTradeCount(self.user, self.acc_no)
def get_all_trades(self):
if self.ready() != 0:
return []
count = self.get_trade_count()
if count <= 0:
return []
buffer = self.ffi.new("SPApiTrade[%d]" % (count))
if self.api.SPAPI_GetAllTradesByArray(self.user,
self.acc_no,
buffer) != 0:
return []
return self.cdata_to_py(buffer)
def get_position_count(self):
return SPAPI_GetPosCount(self.user)
def get_price_by_code(self, code):
price = self.ffi.new("SPApiPrice[1]")
self.api.SPAPI_GetPriceByCode(self.user, code.encode("utf-8"), price)
return self.cdata_to_py(price)
def subscribe_price(self, prod, value):
self.api.SPAPI_SubscribePrice(self.user,
prod.encode("utf-8"), value)
def subscribe_ticker(self, prod, value):
self.api.SPAPI_SubscribeTicker(self.user,
prod.encode("utf-8"), value)
def logout(self):
user = self.user
if user is not None:
self.user = None
self.acc_no = None
return self.api.SPAPI_Logout(user)
def cdata_to_py(self, s):
return self.ffi_conv.to_py(s)
def fields(self, s):
return self.ffi_conv.fields(s)
def order_add(self, data):
data['AccNo'] = self.acc_no
data['Initiator'] = self.user
buffer = self.ffi.new("SPApiOrder[1]")
self.ffi_conv.from_py(buffer, data)
if buffer is None:
return -2
return self.api.SPAPI_AddOrder(buffer)
def order_add_inactive(self, data):
data['AccNo'] = self.acc_no
data['Initiator'] = self.user
buffer = self.ffi.new("SPApiOrder[1]")
self.ffi_conv.from_py(buffer, data)
if buffer is None:
return -2
return self.api.SPAPI_AddInactiveOrder(buffer)
def order_delete(self, data):
accOrderNo = int(data['IntOrderNo'])
return self.api.SPAPI_DeleteOrderBy(self.user,
self.acc_no,
accOrderNo,
data['ProdCode'].encode("utf-8"),
data['ClOrderId'].encode("utf-8"))
def order_delete_all(self, data):
return self.api.SPAPI_DeleteAllOrders(self.user,
self.acc_no)
def order_activate(self, data):
accOrderNo = int(data['IntOrderNo'])
return self.api.SPAPI_ActivateOrderBy(self.user,
self.acc_no,
accOrderNo)
def order_inactivate(self, data):
accOrderNo = int(data['IntOrderNo'])
return self.api.SPAPI_InactivateOrderBy(self.user,
self.acc_no,
accOrderNo)
def __del__(self):
pass
# self.api.SPAPI_Uninitialize()
```
#### File: sptrader/sptrader/spwriter.py
```python
import backtrader as bt
import itertools
class SharpPointWriter(bt.WriterFile):
params = (
('csvsep', '; '),
)
def __init__(self, *args, **kwargs):
super(SharpPointWriter, self).__init__(*args, **kwargs)
def start(self):
pass
def writedict(self, dct, level=0, recurse=False):
pass
def writeiterable(self, iterable, func=None, counter=''):
if self.p.csv_counter:
iterable = itertools.chain([counter], iterable)
if func is not None:
iterable = map(lambda x: func(x), iterable)
items = list(iterable)[3:9]
items[0] = items[0].replace(" ", "/")
items[0] = items[0].replace(":", "/").replace("-", "/")
line = self.p.csvsep.join(items)
self.writeline(line)
```
#### File: sptrader/strategy/__init__.py
```python
import sys
import os
import io
import base64
import matplotlib
matplotlib.use('Agg', warn=False, force=True)
import backtrader as bt
import matplotlib.pyplot as plt
from backtrader.plot.plot import Plot
from multiprocessing import Process, Queue
import logging
import pprint
import copy
import inspect
# must import first to initialize metaclass
from spfeed import SharpPointCSVData
from spbroker import SharpPointBroker
from spbacktester import SharpPointBackTester
import spstore
import strategy.strategylist
import datetime
from pytz import timezone
import traceback
import jitter
def check_params(kwargs, slist):
for s in slist:
if kwargs.get(s, None) is None or \
kwargs[s] == '':
raise ValueError('missing %s' % str(s))
def run_strategy(kwargs, q):
f = None
try:
check_params(kwargs, ['strategy', 'dataname', 'id'])
modpath = os.path.dirname(os.path.realpath(__file__))
logpath = os.path.join(modpath, '../data/log-%s.txt' %
(str(kwargs['id'])))
f = open(logpath, "a", 1)
stratargs = {}
module = strategy.strategylist.dispatch[kwargs['strategy']]
stratparams = module.params._getpairs()
for k, v in kwargs.items():
if k in stratparams:
s = stratparams[k]
if isinstance(s, int):
stratargs[k] = int(v)
elif isinstance(s, float):
stratargs[k] = float(v)
else:
stratargs[k] = v
stratargs['log'] = f
stratargs['strategy'] = module
cerebro = bt.Cerebro()
cerebro.addstrategy(**stratargs)
store = spstore.SharpPointStore(log=f)
broker = store.getbroker()
cerebro.setbroker(broker)
# Create a Data Feed
data = store.getdata(**kwargs)
data2 = bt.DataClone(dataname=data)
data2.addfilter(bt.ReplayerMinutes, compression=5)
cerebro.adddata(data)
cerebro.adddata(data2)
# Print out the starting conditions
print('Starting strategy "{}" at "{}"'.format(kwargs['strategy'],
datetime.datetime.now()),
file=f)
print('Using module file "{}"'.format(inspect.getsourcefile(module)),
file=f)
print('{}'.format(pprint.pformat(kwargs)), file=f)
# Run over everything
cerebro.run()
# Print out the final result
print('Finishing strategy "{}" at "{}"'.format(kwargs['strategy'],
datetime.datetime.now()),
file=f)
f.close()
q.put((kwargs['strategy'], kwargs['id'], "done", ""))
return None
except:
if f is not None:
print(traceback.format_exc(), file=f)
f.close()
print(traceback.format_exc())
q.put((kwargs['strategy'], kwargs['id'], "error",
repr(sys.exc_info())))
raise
def parse_date(s):
[d, t] = s.split()
l = [int(x) for x in d.split('-')] + [int(x) for x in t.split(':')]
return datetime.datetime(*l)
def run_backtest(kwargs):
check_params(kwargs, ['strategy', 'dataname'])
stratargs = {}
f = io.StringIO()
module = strategy.strategylist.dispatch[kwargs['strategy']]
stratparams = module.params._getpairs()
for k, v in kwargs.items():
if k in stratparams:
s = stratparams[k]
if isinstance(s, int):
stratargs[k] = int(v)
elif isinstance(s, float):
stratargs[k] = float(v)
else:
stratargs[k] = v
stratargs['log'] = f
stratargs['strategy'] = module
cerebro = bt.Cerebro()
cerebro.addstrategy(**stratargs)
store = spstore.SharpPointStore(log=f)
broker = store.getbroker(backtest=kwargs.get('backtest', True))
cerebro.setbroker(broker)
feedargs = copy.copy(kwargs)
if kwargs.get('backtest_start_time', '').strip() != '':
feedargs['fromdate'] = parse_date(kwargs['backtest_start_time'])
if kwargs.get('backtest_end_time', '').strip() != '':
feedargs['todate'] = parse_date(kwargs['backtest_end_time'])
# Create a Data Feed
data = store.getdata(**feedargs)
if float(kwargs['jitter']) >= 0.0:
data.addfilter(jitter.JitterFilter,
jitter=float(kwargs['jitter']))
data2 = bt.DataClone(dataname=data)
data2.addfilter(bt.ReplayerMinutes, compression=5)
cerebro.adddata(data)
cerebro.adddata(data2)
# Set the commission - 0.1% ... divide by 100 to remove the %
initial_cash = kwargs.get("initial_cash", None)
if initial_cash is not None:
cerebro.broker.setcash(float(initial_cash))
# cerebro.broker.setcommission(commission=0.0)
# Print out the starting conditions
print('Starting strategy "{}" at "{}"'.format(kwargs['strategy'],
datetime.datetime.now()),
file=f)
print('Using module file "{}"'.format(inspect.getsourcefile(module)),
file=f)
print('{}'.format(pprint.pformat(kwargs)), file=f)
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue(), file=f)
# Run over everything
cerebro.run()
imgdata = io.BytesIO()
plot_type = kwargs.get("plot", "candle")
if plot_type != "none":
plotter = Plot(style='candle', bardownfill=False)
cerebro.plot(plotter)
plt.savefig(imgdata, format='svg')
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue(),
file=f)
print('Finishing strategy "{}" at "{}"'.format(kwargs['strategy'],
datetime.datetime.now()),
file=f)
retval = """
<html>
<head>
<title>Backtest - {}</title>
</head>
<body>
""".format(kwargs['id'])
if plot_type != "none":
retval += '<img src="data:image/svg+xml;base64,%s" /><br>' % \
base64.b64encode(imgdata.getvalue()).decode('ascii')
retval += '<pre>%s</pre></body></html>' % f.getvalue()
imgdata.close()
plt.close('all')
f.close()
return retval
def run(kwargs):
q = Queue()
kwargs['newdata'] = True
kwargs['keepalive'] = True
if 'loglevel' not in kwargs:
kwargs['loglevel'] = logging.WARNING
kwargs['streaming'] = True
if 'tickersource' not in kwargs:
kwargs['tickersource'] = "ticker-%{instrument}.txt"
p = Process(target=run_strategy,
args=(kwargs, q))
p.daemon = True
p.start()
return (p, q)
def backtest(kwargs):
kwargs['newdata'] = False
kwargs['keepalive'] = False
if 'loglevel' not in kwargs:
kwargs['loglevel'] = logging.WARNING
kwargs['streaming'] = False
try:
return run_backtest(kwargs)
except:
return "<pre>" + traceback.format_exc() + "</pre>"
def strategy_list():
return list(strategy.strategylist.dispatch.keys())
def params(name):
return strategy.strategylist.dispatch[name].params._getpairs()
def headers(name):
my_headers = strategy.strategylist.dispatch[name].header_list()
defaultData = params(name)
for header in my_headers:
if 'defaultData' not in header and \
header['field'] in defaultData:
header['defaultData'] = defaultData[header['field']]
return my_headers
class TimeFilter(object):
def __init__(self, a, b):
self.start_time = self.string_to_seconds(a)
self.end_time = self.string_to_seconds(b)
@staticmethod
def string_to_seconds(s):
v = s.split(":")
r = int(v[0]) * 3600 + int(v[1]) * 60
if len(v) > 2:
r = r + int(v[2])
return r
@staticmethod
def seconds_from_midnight(d):
return (d - d.replace(hour=0, minute=0,
second=0, microsecond=0)).total_seconds()
def intervals(self, a):
ticktime_seconds = self.seconds_from_midnight(a)
return (ticktime_seconds, self.start_time, self.end_time)
def filter(self, a):
ticktime_seconds = self.seconds_from_midnight(a)
return (ticktime_seconds >= self.start_time and
ticktime_seconds <= self.end_time)
if __name__ == '__main__':
print(params('sample'))
print(params('sample').get('exitbars', None))
print(TimeFilter.string_to_seconds("14:30"))
run("sample", 1, {'exitbars': 1})
```
#### File: sptrader/tests/test_loader.py
```python
import cffi
import os
import sys
import time
import threading
from ctypes import *
location = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, location)
sys.path.insert(0, os.path.join(location, "..", "sptrader"))
import sptrader
import config
cv = threading.Condition()
login = config.logininfo
sp = sptrader.SPTrader()
sp.set_login_info(login['host'],
login['port'],
login['license'],
login['app_id'],
login['user_id'],
"test1")
@sp.ffi.callback("LoginReplyAddr")
def login_actions(ret_code, ret_msg):
cv.acquire()
print("login")
print(sp.get_login_status(81))
print(sp.logout())
input("Press any key to exit")
cv.notify()
cv.release()
sp.register_login_reply(login_actions)
cv.acquire()
print(sp.fields("SPApiOrder"))
print(sp.login())
cv.wait()
cv.release()
```
#### File: sptrader/tests/test_ticker.py
```python
import cffi
import os
import sys
import time
import threading
location = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, location)
sys.path.insert(0, os.path.join(location, "..", "sptrader"))
import sptrader
import config
import cffi_to_py
cv = threading.Condition()
login = config.logininfo
sp = sptrader.SPTrader()
sp.set_login_info(login['host'],
8080,
login['license'],
login['app_id'],
login['user_id'],
"test1")
@sp.ffi.callback("ApiTickerUpdateAddr")
def ticker_action(data):
print("Ticker")
print(sp.cdata_to_py(data[0]))
@sp.ffi.callback("ConnectedReplyAddr")
def connected_reply_func(host_type, con_status):
print("connected", host_type, con_status)
if host_type == 83 and con_status == 2:
sp.register_ticker_update(ticker_action)
print(sp.subscribe_ticker("HSIQ6", 1))
print(sp.subscribe_ticker("HSIU6", 1))
print(sp.subscribe_ticker("HSIZ6", 1))
print(sp.subscribe_ticker("HSIH7", 1))
@sp.ffi.callback("AccountInfoPushAddr")
def account_info_func(data):
print("Account")
print(sp.cdata_to_py(data[0]))
@sp.ffi.callback("InstrumentListReplyAddr")
def instrument_list_reply_func(is_ready, ret_msg):
print("InstrumentListReply")
print(is_ready)
print(sp.cdata_to_py(ret_msg))
print(sp.get_instrument())
@sp.ffi.callback("ApiPriceUpdateAddr")
def api_price_update_func(data):
print("api_price_update")
print(sp.cdata_to_py(data[0]))
@sp.ffi.callback("LoginReplyAddr")
def login_actions(ret_code, ret_msg):
print("login")
print(login['user_id'].encode("utf-8"))
print("instrument_list", sp.load_instrument_list())
print("price", sp.get_price_by_code("HSIQ6"))
print(sp.get_acc_bal_count())
print(sp.get_instrument_count())
print(sp.get_product_count())
sp.register_login_reply(login_actions)
sp.register_account_info_push(account_info_func)
sp.register_connecting_reply(connected_reply_func)
sp.register_instrument_list_reply(instrument_list_reply_func)
sp.register_price_update(api_price_update_func)
print(sp.login())
input("Press any key to exit")
sp.logout()
``` |
{
"source": "joequant/sto",
"score": 3
} |
#### File: bitcoin/blocksci-server/app-server.py
```python
from flask import Flask
app = Flask(__name__)
@app.route("/hello-world")
def hello():
return "Hello world!"
if __name__ == "__main__":
from gevent.pywsgi import WSGIServer
http_server = WSGIServer(('0.0.0.0', 5000), app)
http_server.serve_forever()
``` |
{
"source": "joequant/webdavfs",
"score": 2
} |
#### File: webdavfs/tests/test_opener.py
```python
from fs.opener import open_fs
def test_opener_webdav():
result = open_fs('webdav://foo.bar/webdav')
assert result.url.startswith('http://foo.bar')
def test_opener_webdav_443():
result = open_fs('webdav://foo.bar:443/webdav')
assert result.url.startswith('https://foo.bar')
def test_opener_webdavs():
result = open_fs('webdavs://foo.bar/webdav')
assert result.url.startswith('https://foo.bar')
``` |
{
"source": "JoeQureshi/stocal",
"score": 3
} |
#### File: stocal/tests/test_tutorial.py
```python
import unittest
from stocal import MassAction, Event, ReactionRule, Process
class Dilution(ReactionRule):
"""Dilution rule"""
Transition = MassAction
def novel_reactions(self, species):
yield self.Transition([species], [], 0.001)
class Polymerization(ReactionRule):
"""Polymerization rule"""
Transition = MassAction
def novel_reactions(self, k, l):
yield self.Transition([k, l], [k+l], 10.)
class Hydrolysis(ReactionRule):
"""Hydrolysis rule"""
Transition = MassAction
def novel_reactions(self, k):
for i in range(1, len(k)):
constant = 10.*i*(len(k)-i)
yield self.Transition([k], [k[:i], k[i:]], constant)
class TestTutorial(unittest.TestCase):
"""Test every code example of the tutorial"""
def test_simple_example(self):
"""Starting with a simple system"""
r1 = MassAction({'A': 2}, {'A2': 1}, 1.)
r2 = MassAction({'A2': 1}, {'A': 2}, 10.)
process = Process([r1, r2])
trajectory = process.trajectory({'A':100}, steps=1000)
for _ in trajectory:
result = trajectory.time, trajectory.state.get('A', 0), trajectory.state.get('A2', 0)
def test_events(self):
"""Adding events"""
r1 = MassAction({'A': 2}, {'A2': 1}, 1.)
r2 = MassAction({'A2': 1}, {'A': 2}, 10.)
feed = Event([], ['A'], 0.0, 1.0)
process = Process([r1, r2, feed])
trajectory = process.trajectory({}, steps=100)
for _ in trajectory:
pass
def test_rule_based_dilution(self):
"""Adding dilution"""
r1 = MassAction({'A': 2}, {'A2': 1}, 1.)
r2 = MassAction({'A2': 1}, {'A': 2}, 10.)
feed = Event([], ['A'], 0.0, 1.0)
process = Process([r1, r2, feed], [Dilution()])
trajectory = process.trajectory({}, steps=100)
for _ in trajectory:
pass
def test_rule_based_polymers(self):
"""Adding general polymerization and hydrolysis"""
feed = Event([], ['A'], 0.0, 1.0)
process = Process(transitions=[feed], rules=[Dilution(), Polymerization(), Hydrolysis()])
trajectory = process.trajectory({}, steps=100)
for _ in trajectory:
pass
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joer14/hackers-doorbell",
"score": 3
} |
#### File: joer14/hackers-doorbell/doorbell.py
```python
import os
from time import sleep
from flask import Flask
app = Flask(__name__)
invertCmd = """
osascript -e 'tell application "System Events" to key code 28 using {control down, option down, command down}'
"""
muteCmd = """
osascript -e 'tell application "System Events" to set volume with output muted'
"""
unmuteCmd = """
osascript -e 'tell application "System Events" to set volume without output muted'
"""
# may want to switch Spotify to iTunes or music player of choice
pauseCmd = """
osascript -e 'tell application "Spotify" to pause'
"""
def invert():
os.system(invertCmd)
sleep(0.1)
def flash(iters):
for _ in range(iters):
invert()
sleep(0.1)
invert()
sleep(0.1)
def activate():
os.system(pauseCmd)
os.system(muteCmd)
flash(3)
os.system(unmuteCmd)
@app.route('/')
def hello_world():
activate()
return 'Ding dong!'
if __name__ == "__main__":
app.run(host='0.0.0.0')
``` |
{
"source": "joer14/run-fm-party",
"score": 2
} |
#### File: joer14/run-fm-party/manage_frontend.py
```python
import os, shutil
from subprocess import call
# when updating/deploying the app, copy over the frontend dir to a temporary directory
# this is because zappa exclude isn't working for subdirectories, at least for me.
def build_and_copy_frontend(self):
current_path = os.path.dirname(os.path.realpath(__file__))
src_path = os.path.join(current_path,'frontend','build')
dest_path = os.path.join(current_path,'build')
# delete the build folder and it's contents already exist
if os.path.islink(dest_path):
os.unlink(dest_path)
elif os.path.isdir(dest_path):
shutil.rmtree(dest_path)
print 'deleted build folder'
# build the frontend too
status = call(["yarn","build"],cwd= os.path.join(current_path,'frontend'))
if status == 0:
print 'built frontend successfully'
# copy build folder over
shutil.copytree(src_path, dest_path)
print 'copied new build folder'
return 0
else:
return 1
# create a symlink between /frontend/build and /build
def symlink_build_dir(self):
current_path = os.path.dirname(os.path.realpath(__file__))
src_path = os.path.join(current_path,'frontend','build')
dest_path = os.path.join(current_path,'build')
# delete the build folder and it's contents already exist
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
print 'deleted build folder'
os.symlink(src_path, dest_path)
return 0
# if __name__ == "__main__":
# app.run()
``` |
{
"source": "joer9514/MoneyManagerApp",
"score": 2
} |
#### File: core/budget/models.py
```python
from django.db import models
from datetime import datetime
from core.user.models import User
from core.category.models import Category
class Budget(models.Model):
"""
Model of the budget entity
"""
id_budget = models.AutoField(primary_key=True, auto_created=True, unique=True, null=False, blank=False)
id_user = models.ForeignKey(User, on_delete=models.CASCADE, null=False, blank=False, verbose_name='User')
id_category = models.ForeignKey(Category, on_delete=models.CASCADE, null=False, blank=False,
verbose_name='Category')
name_budget = models.CharField(max_length=50, null=False, blank=False)
month_budget = models.DateField(default=datetime.now, null=False, blank=False)
value_budget = models.DecimalField(default=0.00, null=False, blank=False, max_digits=15, decimal_places=2)
class Meta:
"""
Special budget entity attributes
"""
verbose_name = 'Budget'
verbose_name_plural = 'Budgets'
db_table = 'budget'
ordering = ['id_budget']
def __str__(self):
"""
Representation of our budget object
"""
return '{}'.format(self.name_budget)
```
#### File: core/home_page/views.py
```python
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
#imports of model classes and functions
from core.saving.views import *
from core.budget.views import *
from core.movement.views import *
from django.views.generic import *
#Class that takes the template
class HomeListView(TemplateView):
template_name = 'home_page/home.html'
"""
function that obtains the objects from the
models and stores them in a dictionary
"""
def get_context_data(self, *args, **kwargs):
budget = Budget.objects.all()
saving = Saving.objects.all()
movement = Movement.objects.all()
return {'budgets': budget, 'savings': saving, 'movements': movement}
"""
Decorator method that validates prevents access to the o
ther application windows until you log in.
"""
@method_decorator(login_required)
#Inherits objects from the model
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.